prithivMLmods's picture
Update app.py
94d22ce verified
raw
history blame
18.2 kB
import os
import json
import copy
import time
import random
import logging
import numpy as np
from typing import Any, Dict, List, Optional, Union
import torch
from PIL import Image
import gradio as gr
import spaces
from diffusers import DiffusionPipeline
from huggingface_hub import (
hf_hub_download,
HfFileSystem,
ModelCard,
snapshot_download)
from diffusers.utils import load_image
import requests
from urllib.parse import urlparse
import tempfile
import shutil
import uuid
import zipfile
# Helper functions
def save_image(img):
unique_name = str(uuid.uuid4()) + ".png"
img.save(unique_name)
return unique_name
MAX_SEED = np.iinfo(np.int32).max
MAX_IMAGE_SIZE = 2048
# Load Qwen/Qwen-Image pipeline
dtype = torch.bfloat16
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Load Qwen model
pipe = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
# Aspect ratios
aspect_ratios = {
"1:1": (1328, 1328),
"16:9": (1664, 928),
"9:16": (928, 1664),
"4:3": (1472, 1140),
"3:4": (1140, 1472)
}
loras = [
# Sample Qwen-compatible LoRAs
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Studio-Realism/resolve/main/images/2.png",
"title": "Studio Realism",
"repo": "prithivMLmods/Qwen-Image-Studio-Realism",
"weights": "qwen-studio-realism.safetensors",
"trigger_word": "Studio Realism"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Sketch-Smudge/resolve/main/images/1.png",
"title": "Sketch Smudge",
"repo": "prithivMLmods/Qwen-Image-Sketch-Smudge",
"weights": "qwen-sketch-smudge.safetensors",
"trigger_word": "Sketch Smudge"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Anime-LoRA/resolve/main/images/1.png",
"title": "Qwen Anime",
"repo": "prithivMLmods/Qwen-Image-Anime-LoRA",
"weights": "qwen-anime.safetensors",
"trigger_word": "Qwen Anime"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Synthetic-Face/resolve/main/images/2.png",
"title": "Synthetic Face",
"repo": "prithivMLmods/Qwen-Image-Synthetic-Face",
"weights": "qwen-synthetic-face.safetensors",
"trigger_word": "Synthetic Face"
},
{
"image": "https://huggingface.co/prithivMLmods/Qwen-Image-Fragmented-Portraiture/resolve/main/images/3.png",
"title": "Fragmented Portraiture",
"repo": "prithivMLmods/Qwen-Image-Fragmented-Portraiture",
"weights": "qwen-fragmented-portraiture.safetensors",
"trigger_word": "Fragmented Portraiture"
},
]
def load_lora_opt(pipe, lora_input):
lora_input = lora_input.strip()
if not lora_input:
return
# If it's just an ID like "author/model"
if "/" in lora_input and not lora_input.startswith("http"):
pipe.load_lora_weights(lora_input, adapter_name="default")
return
if lora_input.startswith("http"):
url = lora_input
# Repo page (no blob/resolve)
if "huggingface.co" in url and "/blob/" not in url and "/resolve/" not in url:
repo_id = urlparse(url).path.strip("/")
pipe.load_lora_weights(repo_id, adapter_name="default")
return
# Blob link → convert to resolve link
if "/blob/" in url:
url = url.replace("/blob/", "/resolve/")
# Download direct file
tmp_dir = tempfile.mkdtemp()
local_path = os.path.join(tmp_dir, os.path.basename(urlparse(url).path))
try:
print(f"Downloading LoRA from {url}...")
resp = requests.get(url, stream=True)
resp.raise_for_status()
with open(local_path, "wb") as f:
for chunk in resp.iter_content(chunk_size=8192):
f.write(chunk)
print(f"Saved LoRA to {local_path}")
pipe.load_lora_weights(local_path, adapter_name="default")
finally:
shutil.rmtree(tmp_dir, ignore_errors=True)
def get_huggingface_safetensors(link):
split_link = link.split("/")
if len(split_link) == 2:
try:
response = requests.get(f"https://huggingface.co/api/models/{link}")
response.raise_for_status()
model_info = response.json()
# Check if it's a Qwen model
if "qwen" not in model_info.get("tags", []):
raise Exception("Not a Qwen LoRA model!")
# Get image if available
image_url = None
if "cardData" in model_info and "widget" in model_info["cardData"]:
if len(model_info["cardData"]["widget"]) > 0:
image_url = model_info["cardData"]["widget"][0].get("output", {}).get("url", None)
# Try to find safetensors file
safetensors_name = None
try:
model_files = requests.get(f"https://huggingface.co/api/models/{link}/tree/main").json()
for file in model_files:
if file.get("path", "").endswith(".safetensors"):
safetensors_name = file["path"]
break
except:
pass
return split_link[1], link, safetensors_name, "trigger_word", image_url
except Exception as e:
print(f"Error getting model info: {e}")
raise Exception(f"Failed to get model info: {e}")
return None, None, None, None, None
def check_custom_model(link):
if link.startswith("https://"):
if link.startswith("https://huggingface.co") or link.startswith("https://www.huggingface.co"):
link_split = link.split("huggingface.co/")
return get_huggingface_safetensors(link_split[1])
else:
return get_huggingface_safetensors(link)
def add_custom_lora(custom_lora):
global loras
if custom_lora:
try:
title, repo, path, trigger_word, image = check_custom_model(custom_lora)
if not title:
raise Exception("Invalid LoRA model")
print(f"Loaded custom LoRA: {repo}")
card = f'''
<div class="custom_lora_card">
<span>Loaded custom LoRA:</span>
<div class="card_internal">
<img src="{image}" />
<div>
<h3>{title}</h3>
<small>{"Using: <code><b>"+trigger_word+"</code></b> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}<br></small>
</div>
</div>
</div>
'''
existing_item_index = next((index for (index, item) in enumerate(loras) if item['repo'] == repo), None)
if not existing_item_index:
new_item = {
"image": image,
"title": title,
"repo": repo,
"weights": path,
"trigger_word": trigger_word
}
existing_item_index = len(loras)
loras.append(new_item)
return gr.update(visible=True, value=card), gr.update(visible=True), gr.Gallery(selected_index=None), f"Custom: {path}", existing_item_index, trigger_word
except Exception as e:
gr.Warning(f"Invalid LoRA: either you entered an invalid link, or a non-Qwen LoRA")
return gr.update(visible=True, value=f"Invalid LoRA: either you entered an invalid link, a non-Qwen LoRA"), gr.update(visible=False), gr.update(), "", None, ""
else:
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def remove_custom_lora():
return gr.update(visible=False), gr.update(visible=False), gr.update(), "", None, ""
def update_selection(evt: gr.SelectData, width, height):
selected_lora = loras[evt.index]
new_placeholder = f"Type a prompt for {selected_lora['title']}"
lora_repo = selected_lora["repo"]
updated_text = f"### Selected: [{lora_repo}](https://huggingface.co/{lora_repo}) ✅"
# Update aspect ratio based on LoRA if it has aspect info
if "aspect" in selected_lora:
if selected_lora["aspect"] == "portrait":
width = 928
height = 1664
elif selected_lora["aspect"] == "landscape":
width = 1664
height = 928
else:
width = 1328
height = 1328
return (
gr.update(placeholder=new_placeholder),
updated_text,
evt.index,
width,
height,
)
@spaces.GPU(duration=120)
def generate_qwen(
prompt: str,
negative_prompt: str = "",
seed: int = 0,
width: int = 1024,
height: int = 1024,
guidance_scale: float = 4.0,
randomize_seed: bool = False,
num_inference_steps: int = 50,
num_images: int = 1,
zip_images: bool = False,
lora_input: str = "",
lora_scale: float = 1.0,
progress=gr.Progress(track_tqdm=True),
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
generator = torch.Generator(device).manual_seed(seed)
start_time = time.time()
# Clear any existing LoRA adapters
current_adapters = pipe.get_list_adapters()
for adapter in current_adapters:
pipe.delete_adapters(adapter)
pipe.disable_lora()
use_lora = False
if lora_input and lora_input.strip() != "":
load_lora_opt(pipe, lora_input)
pipe.set_adapters(["default"], adapter_weights=[lora_scale])
use_lora = True
images = pipe(
prompt=prompt,
negative_prompt=negative_prompt if negative_prompt else "",
height=height,
width=width,
guidance_scale=guidance_scale,
num_inference_steps=num_inference_steps,
num_images_per_prompt=num_images,
generator=generator,
output_type="pil",
).images
end_time = time.time()
duration = end_time - start_time
image_paths = [save_image(img) for img in images]
zip_path = None
if zip_images:
zip_name = str(uuid.uuid4()) + ".zip"
with zipfile.ZipFile(zip_name, 'w') as zipf:
for i, img_path in enumerate(image_paths):
zipf.write(img_path, arcname=f"Img_{i}.png")
zip_path = zip_name
# Clean up adapters
current_adapters = pipe.get_list_adapters()
for adapter in current_adapters:
pipe.delete_adapters(adapter)
pipe.disable_lora()
return image_paths, seed, f"{duration:.2f}", zip_path
@spaces.GPU(duration=120)
def run_lora(
prompt: str,
negative_prompt: str,
use_negative_prompt: bool,
seed: int,
width: int,
height: int,
guidance_scale: float,
randomize_seed: bool,
num_inference_steps: int,
num_images: int,
zip_images: bool,
selected_index: int,
lora_scale: float,
progress=gr.Progress(track_tqdm=True),
):
if selected_index is None:
raise gr.Error("You must select a LoRA before proceeding.🧨")
selected_lora = loras[selected_index]
lora_repo = selected_lora["repo"]
trigger_word = selected_lora["trigger_word"]
if trigger_word:
prompt_mash = f"{trigger_word} {prompt}"
else:
prompt_mash = prompt
final_negative_prompt = negative_prompt if use_negative_prompt else ""
if randomize_seed:
seed = random.randint(0, MAX_SEED)
return generate_qwen(
prompt=prompt_mash,
negative_prompt=final_negative_prompt,
seed=seed,
width=width,
height=height,
guidance_scale=guidance_scale,
randomize_seed=False, # Already handled
num_inference_steps=num_inference_steps,
num_images=num_images,
zip_images=zip_images,
lora_input=lora_repo,
lora_scale=lora_scale,
progress=progress,
)
css = '''
#gen_btn{height: 100%}
#gen_column{align-self: stretch}
#title{text-align: center}
#title h1{font-size: 3em; display:inline-flex; align-items:center}
#title img{width: 100px; margin-right: 0.5em}
#gallery .grid-wrap{height: 10vh}
#lora_list{background: var(--block-background-fill);padding: 0 1em .3em; font-size: 90%}
.card_internal{display: flex;height: 100px;margin-top: .5em}
.card_internal img{margin-right: 1em}
.styler{--form-gap-width: 0px !important}
#progress{height:30px}
#progress .generating{display:none}
.progress-container {width: 100%;height: 30px;background-color: #f0f0f0;border-radius: 15px;overflow: hidden;margin-bottom: 20px}
.progress-bar {height: 100%;background-color: #4f46e5;width: calc(var(--current) / var(--total) * 100%);transition: width 0.5s ease-in-out}
'''
with gr.Blocks(theme="bethecloud/storj_theme", css=css, delete_cache=(120, 120)) as app:
title = gr.HTML("""<h1>Qwen Image LoRA DLC ❤️‍🔥</h1>""", elem_id="title")
selected_index = gr.State(None)
with gr.Row():
with gr.Column(scale=3):
prompt = gr.Textbox(label="Prompt", lines=1, placeholder="✦︎ Choose the LoRA and type the prompt")
with gr.Column(scale=1, elem_id="gen_column"):
generate_button = gr.Button("Generate", variant="primary", elem_id="gen_btn")
with gr.Row():
with gr.Column():
selected_info = gr.Markdown("")
gallery = gr.Gallery(
[(item["image"], item["title"]) for item in loras],
label="Qwen LoRA DLC's",
allow_preview=False,
columns=3,
elem_id="gallery",
show_share_button=False
)
with gr.Group():
custom_lora = gr.Textbox(label="Enter Custom LoRA", placeholder="prithivMLmods/Qwen-Image-Sketch-Smudge")
gr.Markdown("[Check the list of Qwen LoRA's](https://huggingface.co/models?other=base_model:adapter:Qwen/Qwen-Image)", elem_id="lora_list")
custom_lora_info = gr.HTML(visible=False)
custom_lora_button = gr.Button("Remove custom LoRA", visible=False)
with gr.Column():
result = gr.Gallery(label="Generated Images", columns=1, show_label=False, preview=True)
with gr.Row():
aspect_ratio = gr.Dropdown(
label="Aspect Ratio",
choices=list(aspect_ratios.keys()),
value="1:1",
)
with gr.Row():
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=48)
with gr.Row():
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
use_negative_prompt = gr.Checkbox(
label="Use negative prompt",
value=True,
)
negative_prompt = gr.Text(
label="Negative prompt",
max_lines=1,
placeholder="Enter a negative prompt",
value="text, watermark, copyright, blurry, low resolution",
)
with gr.Row():
cfg_scale = gr.Slider(label="CFG Scale", minimum=1, maximum=20, step=0.5, value=4.0)
steps = gr.Slider(label="Steps", minimum=1, maximum=100, step=1, value=50)
with gr.Row():
width = gr.Slider(label="Width", minimum=512, maximum=2048, step=64, value=1328)
height = gr.Slider(label="Height", minimum=512, maximum=2048, step=64, value=1328)
with gr.Row():
num_images = gr.Slider(label="Number of Images", minimum=1, maximum=5, step=1, value=1)
zip_images = gr.Checkbox(label="Zip generated images", value=False)
with gr.Row():
randomize_seed = gr.Checkbox(True, label="Randomize seed")
seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0, randomize=True)
lora_scale = gr.Slider(label="LoRA Scale", minimum=0, maximum=2, step=0.01, value=1.0)
# Output information
with gr.Row():
seed_display = gr.Textbox(label="Seed used", interactive=False)
generation_time = gr.Textbox(label="Generation time (seconds)", interactive=False)
zip_file = gr.File(label="Download ZIP")
# Update aspect ratio
def set_dimensions(ar):
w, h = aspect_ratios[ar]
return gr.update(value=w), gr.update(value=h)
aspect_ratio.change(
fn=set_dimensions,
inputs=aspect_ratio,
outputs=[width, height]
)
# Negative prompt visibility
use_negative_prompt.change(
fn=lambda x: gr.update(visible=x),
inputs=use_negative_prompt,
outputs=negative_prompt
)
gallery.select(
update_selection,
inputs=[width, height],
outputs=[prompt, selected_info, selected_index, width, height]
)
custom_lora.input(
add_custom_lora,
inputs=[custom_lora],
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
)
custom_lora_button.click(
remove_custom_lora,
outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
)
gr.on(
triggers=[generate_button.click, prompt.submit],
fn=run_lora,
inputs=[
prompt,
negative_prompt,
use_negative_prompt,
seed,
width,
height,
#guidance_scale,
randomize_seed,
steps,
num_images,
zip_images,
selected_index,
lora_scale,
],
outputs=[result, seed_display, generation_time, zip_file]
)
app.queue()
app.launch(share=False, ssr_mode=False, show_error=True)