Fibo-local / app.py
linoyts's picture
linoyts HF Staff
Update app.py
cda033e verified
raw
history blame
4.79 kB
import gradio as gr
import random
import os
import spaces
import torch
import time
import json
import numpy as np
from diffusers import BriaFiboPipeline
from diffusers.modular_pipelines import ModularPipeline
# resolutions=[
# "832 1248",
# "896 1152",
# "960 1088",
# "1024 1024",
# "1088 960",
# "1152 896",
# "1216 832",
# "1280 800",
# "1344 768",
# ]
MAX_SEED = np.iinfo(np.int32).max
dtype = torch.bfloat16
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.set_grad_enabled(False)
vlm_pipe = ModularPipeline.from_pretrained("briaai/FIBO-VLM-prompt-to-JSON", trust_remote_code=True).to(device)
pipe = BriaFiboPipeline.from_pretrained(
"briaai/FIBO",
trust_remote_code=True,
torch_dtype=dtype).to(device)
def handle_json(text):
try:
json.loads(text)
return text
except:
return "Error"
@spaces.GPU(duration=100)
def infer(prompt,
negative_prompt="",
seed=42,
randomize_seed=False,
width=1024,
height=1024,
guidance_scale=5,
num_inference_steps=50,
):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
t=time.time()
with torch.inference_mode():
# 1. Create a prompt to generate an initial image
output = vlm_pipe(prompt=prompt)
json_prompt = output.values["json_prompt"]
image = pipe(prompt=json_prompt,
num_inference_steps=num_inference_steps,
negative_prompt=negative_prompt,
width=width,height=height,
guidance_scale=guidance_scale).images[0]
return image, json_prompt
css = """
#col-container{
margin: 0 auto;
max-width: 768px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column(elem_id="col-container"):
gr.Markdown("## FOBI")
with gr.Group():
with gr.Column():
with gr.Row():
prompt_in = gr.Textbox(label="Prompt")
prompt_in_json = gr.JSON(label="Json")
submit_btn = gr.Button("Generate")
result = gr.Image(label="output")
with gr.Accordion("Advanced Settings", open=False):
with gr.Row():
seed = gr.Slider(
label="Seed",
minimum=0,
maximum=MAX_SEED,
step=1,
value=0,
)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
with gr.Row():
guidance_scale = gr.Slider(
label="guidance scale",
minimum=1.0,
maximum=10.0,
step=0.1,
value=5.0
)
num_inference_steps = gr.Slider(
label="number of inference steps",
minimum=1,
maximum=60,
step=1,
value=50,
)
height = gr.Slider(
label="Height",
minimum=768,
maximum=1248,
step=32,
value=1024,
)
width = gr.Slider(
label="Width",
minimum=832,
maximum=1344,
step=64,
value=1024,
)
with gr.Row():
negative_prompt = gr.Textbox(label="negative prompt", value=json.dumps(''))
negative_prompt_json = gr.JSON(label="json negative prompt", value=json.dumps(''))
# prompt_in.change(
# handle_json,
# inputs=prompt_in,
# outputs=prompt_in_json)
# negative_prompt.change(handle_json, inputs=negative_prompt, outputs=negative_prompt_json)
submit_btn.click(
fn = infer,
inputs = [
prompt_in,
negative_prompt,
seed,
randomize_seed,
width,
height,
guidance_scale,
num_inference_steps,
],
outputs = [
result, prompt_in_json
]
)
demo.queue().launch()