Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -13,7 +13,7 @@ from diffusers import StableDiffusionPipeline
|
|
| 13 |
from diffusers import DDIMScheduler
|
| 14 |
from transformers import AutoProcessor, BlipForConditionalGeneration
|
| 15 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 16 |
-
import spaces
|
| 17 |
|
| 18 |
# load pipelines
|
| 19 |
sd_model_id = "stabilityai/stable-diffusion-2-1-base"
|
|
@@ -24,7 +24,7 @@ sem_pipe = SemanticStableDiffusionPipeline.from_pretrained(sd_model_id, torch_dt
|
|
| 24 |
blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 25 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base",torch_dtype=torch.float16).to(device)
|
| 26 |
|
| 27 |
-
@spaces.GPU
|
| 28 |
## IMAGE CPATIONING ##
|
| 29 |
def caption_image(input_image):
|
| 30 |
inputs = blip_processor(images=input_image, return_tensors="pt").to(device, torch.float16)
|
|
|
|
| 13 |
from diffusers import DDIMScheduler
|
| 14 |
from transformers import AutoProcessor, BlipForConditionalGeneration
|
| 15 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
| 16 |
+
# import spaces
|
| 17 |
|
| 18 |
# load pipelines
|
| 19 |
sd_model_id = "stabilityai/stable-diffusion-2-1-base"
|
|
|
|
| 24 |
blip_processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
|
| 25 |
blip_model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base",torch_dtype=torch.float16).to(device)
|
| 26 |
|
| 27 |
+
# @spaces.GPU
|
| 28 |
## IMAGE CPATIONING ##
|
| 29 |
def caption_image(input_image):
|
| 30 |
inputs = blip_processor(images=input_image, return_tensors="pt").to(device, torch.float16)
|