Spaces:
Runtime error
Runtime error
remove multi decoder
Browse files
app.py
CHANGED
|
@@ -13,16 +13,13 @@ import uuid
|
|
| 13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 14 |
dtype = torch.float16
|
| 15 |
|
| 16 |
-
|
| 17 |
-
torch.cuda.get_device_properties(0).total_memory < 18 * 1024 * 1024 * 1024
|
| 18 |
-
)
|
| 19 |
-
LOW_MEMORY = os.getenv("LOW_MEMORY", not MULTI_DECODER) == "1"
|
| 20 |
|
| 21 |
print(f"device: {device}")
|
| 22 |
print(f"dtype: {dtype}")
|
| 23 |
-
print(f"multi decoder: {MULTI_DECODER}")
|
| 24 |
print(f"low memory: {LOW_MEMORY}")
|
| 25 |
|
|
|
|
| 26 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
|
| 27 |
pipe = DiffusionPipeline.from_pretrained(
|
| 28 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
@@ -33,6 +30,7 @@ pipe = DiffusionPipeline.from_pretrained(
|
|
| 33 |
use_safetensors=True,
|
| 34 |
vae=vae,
|
| 35 |
)
|
|
|
|
| 36 |
pipe = pipe.to(device)
|
| 37 |
|
| 38 |
|
|
@@ -96,7 +94,7 @@ def predict(
|
|
| 96 |
cosine_scale_2=1,
|
| 97 |
cosine_scale_3=1,
|
| 98 |
sigma=0.8,
|
| 99 |
-
multi_decoder=
|
| 100 |
show_image=False,
|
| 101 |
lowvram=LOW_MEMORY,
|
| 102 |
)
|
|
@@ -127,7 +125,7 @@ with gr.Blocks(css=css) as demo:
|
|
| 127 |
[DemoFusion](https://ruoyidu.github.io/demofusion/demofusion.html) enables higher-resolution image generation.
|
| 128 |
You can upload an initial image and prompt to generate an enhanced version.
|
| 129 |
[Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL?duplicate=true) to avoid the queue.
|
| 130 |
-
GPU Time Comparison: T4: - A10G: ~175s A100: RTX 4090: ~48.1s
|
| 131 |
|
| 132 |
<small>
|
| 133 |
<b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
|
|
|
|
| 13 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 14 |
dtype = torch.float16
|
| 15 |
|
| 16 |
+
LOW_MEMORY = os.getenv("LOW_MEMORY", "0") == "1"
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
print(f"device: {device}")
|
| 19 |
print(f"dtype: {dtype}")
|
|
|
|
| 20 |
print(f"low memory: {LOW_MEMORY}")
|
| 21 |
|
| 22 |
+
|
| 23 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=dtype)
|
| 24 |
pipe = DiffusionPipeline.from_pretrained(
|
| 25 |
"stabilityai/stable-diffusion-xl-base-1.0",
|
|
|
|
| 30 |
use_safetensors=True,
|
| 31 |
vae=vae,
|
| 32 |
)
|
| 33 |
+
|
| 34 |
pipe = pipe.to(device)
|
| 35 |
|
| 36 |
|
|
|
|
| 94 |
cosine_scale_2=1,
|
| 95 |
cosine_scale_3=1,
|
| 96 |
sigma=0.8,
|
| 97 |
+
multi_decoder=False,
|
| 98 |
show_image=False,
|
| 99 |
lowvram=LOW_MEMORY,
|
| 100 |
)
|
|
|
|
| 125 |
[DemoFusion](https://ruoyidu.github.io/demofusion/demofusion.html) enables higher-resolution image generation.
|
| 126 |
You can upload an initial image and prompt to generate an enhanced version.
|
| 127 |
[Duplicate Space](https://huggingface.co/spaces/radames/Enhance-This-DemoFusion-SDXL?duplicate=true) to avoid the queue.
|
| 128 |
+
GPU Time Comparison: T4: ~276s - A10G: ~175s A100: ~43.5s RTX 4090: ~48.1s
|
| 129 |
|
| 130 |
<small>
|
| 131 |
<b>Notes</b> The author advises against the term "super resolution" because it's more like image-to-image generation than enhancement, but it's still a lot of fun!
|