Update README.md
Browse files
README.md
CHANGED
|
@@ -36,7 +36,7 @@ when compared to LIFT, SIFT and ORB.*
|
|
| 36 |
Here is a quick example of using the model to detect interest points in an image:
|
| 37 |
|
| 38 |
```python
|
| 39 |
-
from transformers import AutoImageProcessor,
|
| 40 |
import torch
|
| 41 |
from PIL import Image
|
| 42 |
import requests
|
|
@@ -44,8 +44,8 @@ import requests
|
|
| 44 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 45 |
image = Image.open(requests.get(url, stream=True).raw)
|
| 46 |
|
| 47 |
-
processor = AutoImageProcessor.from_pretrained("
|
| 48 |
-
model =
|
| 49 |
|
| 50 |
inputs = processor(image, return_tensors="pt")
|
| 51 |
outputs = model(**inputs)
|
|
@@ -57,7 +57,7 @@ You can also feed multiple images to the model. Due to the nature of SuperPoint,
|
|
| 57 |
you will need to use the mask attribute to retrieve the respective information :
|
| 58 |
|
| 59 |
```python
|
| 60 |
-
from transformers import AutoImageProcessor,
|
| 61 |
import torch
|
| 62 |
from PIL import Image
|
| 63 |
import requests
|
|
@@ -69,8 +69,8 @@ image_2 = Image.open(requests.get(url_image_2, stream=True).raw)
|
|
| 69 |
|
| 70 |
images = [image_1, image_2]
|
| 71 |
|
| 72 |
-
processor = AutoImageProcessor.from_pretrained("
|
| 73 |
-
model =
|
| 74 |
|
| 75 |
inputs = processor(images, return_tensors="pt")
|
| 76 |
outputs = model(**inputs)
|
|
|
|
| 36 |
Here is a quick example of using the model to detect interest points in an image:
|
| 37 |
|
| 38 |
```python
|
| 39 |
+
from transformers import AutoImageProcessor, SuperPointForKeypointDetection
|
| 40 |
import torch
|
| 41 |
from PIL import Image
|
| 42 |
import requests
|
|
|
|
| 44 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
| 45 |
image = Image.open(requests.get(url, stream=True).raw)
|
| 46 |
|
| 47 |
+
processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
|
| 48 |
+
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
|
| 49 |
|
| 50 |
inputs = processor(image, return_tensors="pt")
|
| 51 |
outputs = model(**inputs)
|
|
|
|
| 57 |
you will need to use the mask attribute to retrieve the respective information :
|
| 58 |
|
| 59 |
```python
|
| 60 |
+
from transformers import AutoImageProcessor, SuperPointForKeypointDetection
|
| 61 |
import torch
|
| 62 |
from PIL import Image
|
| 63 |
import requests
|
|
|
|
| 69 |
|
| 70 |
images = [image_1, image_2]
|
| 71 |
|
| 72 |
+
processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
|
| 73 |
+
model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
|
| 74 |
|
| 75 |
inputs = processor(images, return_tensors="pt")
|
| 76 |
outputs = model(**inputs)
|