lucakempkes commited on
Commit
0c8edfe
·
1 Parent(s): 19fb882

support text to image

Browse files
Files changed (1) hide show
  1. app.py +80 -56
app.py CHANGED
@@ -5,7 +5,6 @@ import gradio as gr
5
  import tempfile
6
  import spaces
7
  import numpy as np
8
- from PIL import Image
9
  import random
10
 
11
  MODEL_ID = "FastVideo/FastWan2.2-TI2V-5B-FullAttn-Diffusers"
@@ -63,20 +62,23 @@ def handle_image_upload_for_dims_wan(uploaded_pil_image, current_h_val, current_
63
  except Exception as e:
64
  gr.Warning("Error attempting to calculate new dimensions")
65
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
66
-
67
- def get_duration(input_image, prompt, height, width,
68
- negative_prompt, duration_seconds,
69
- guidance_scale, steps,
70
- seed, randomize_seed,
71
- progress):
72
  if steps > 4 and duration_seconds > 4:
73
  return 90
74
  elif steps > 4 or duration_seconds > 4:
75
  return 75
76
  else:
77
  return 60
 
 
 
78
 
79
- @spaces.GPU(duration=get_duration)
80
  def generate_video(input_image, prompt, height, width, negative_prompt=default_negative_prompt, duration_seconds=2, guidance_scale=0, steps=4, seed=44, randomize_seed=False, progress=gr.Progress(track_tqdm=True)):
81
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
82
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
@@ -108,58 +110,80 @@ def generate_video(input_image, prompt, height, width, negative_prompt=default_n
108
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
109
  return video_path, current_seed
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  with gr.Blocks() as demo:
112
  gr.Markdown("# Fast Wan 2.2 TI2V 5B Demo")
113
  gr.Markdown("""This Demo is using [FastWan2.2-TI2V-5B](https://huggingface.co/FastVideo/FastWan2.2-TI2V-5B-FullAttn-Diffusers) which is fine-tuned with Sparse-distill method which allows wan to generate high quality videos in 3-5 steps.""")
114
 
115
- with gr.Row():
116
- with gr.Column():
117
- input_image_component = gr.Image(type="pil", label="Input Image (optional, auto-resized to target H/W)")
118
- prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
119
- duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
120
-
121
- with gr.Accordion("Advanced Settings", open=False):
122
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
123
- seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
124
- randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
125
- with gr.Row():
126
- height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
127
- width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
128
- steps_slider = gr.Slider(minimum=1, maximum=8, step=1, value=4, label="Inference Steps")
129
- guidance_scale_input = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.0, label="Guidance Scale")
130
- generate_button = gr.Button("Generate Video", variant="primary")
131
- with gr.Column():
132
- video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
133
-
134
- input_image_component.upload(
135
- fn=handle_image_upload_for_dims_wan,
136
- inputs=[input_image_component, height_input, width_input],
137
- outputs=[height_input, width_input]
138
- )
139
-
140
- input_image_component.clear(
141
- fn=handle_image_upload_for_dims_wan,
142
- inputs=[input_image_component, height_input, width_input],
143
- outputs=[height_input, width_input]
144
- )
145
-
146
- ui_inputs = [
147
- input_image_component, prompt_input, height_input, width_input,
148
- negative_prompt_input, duration_seconds_input,
149
- guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox
150
- ]
151
- generate_button.click(fn=generate_video, inputs=ui_inputs, outputs=[video_output, seed_input])
152
-
153
- gr.Examples(
154
- examples=[
155
- [None, "A person eating spaghetti", 1024, 720],
156
- ["cat.png", "The cat removes the glasses from its eyes.", 1088, 800],
157
- [None, "a penguin playfully dancing in the snow, Antarctica", 1024, 720],
158
- ["peng.png", "a penguin running towards camera joyfully, Antarctica", 896, 512],
159
- ],
160
-
161
- inputs=[input_image_component, prompt_input, height_input, width_input], outputs=[video_output, seed_input], fn=generate_video, cache_examples="lazy"
162
- )
163
 
164
  if __name__ == "__main__":
165
  demo.queue().launch()
 
5
  import tempfile
6
  import spaces
7
  import numpy as np
 
8
  import random
9
 
10
  MODEL_ID = "FastVideo/FastWan2.2-TI2V-5B-FullAttn-Diffusers"
 
62
  except Exception as e:
63
  gr.Warning("Error attempting to calculate new dimensions")
64
  return gr.update(value=DEFAULT_H_SLIDER_VALUE), gr.update(value=DEFAULT_W_SLIDER_VALUE)
65
+
66
+ def get_duration_video(input_image, prompt, height, width,
67
+ negative_prompt, duration_seconds,
68
+ guidance_scale, steps,
69
+ seed, randomize_seed,
70
+ progress):
71
  if steps > 4 and duration_seconds > 4:
72
  return 90
73
  elif steps > 4 or duration_seconds > 4:
74
  return 75
75
  else:
76
  return 60
77
+
78
+ def get_duration_image(prompt, height, width, negative_prompt, guidance_scale, steps, seed, randomize_seed, progress):
79
+ return 30 if steps > 4 else 20
80
 
81
+ @spaces.GPU(duration=get_duration_video)
82
  def generate_video(input_image, prompt, height, width, negative_prompt=default_negative_prompt, duration_seconds=2, guidance_scale=0, steps=4, seed=44, randomize_seed=False, progress=gr.Progress(track_tqdm=True)):
83
  target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
84
  target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
 
110
  export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
111
  return video_path, current_seed
112
 
113
+ @spaces.GPU(duration=get_duration_image)
114
+ def generate_image(prompt, height, width, negative_prompt=default_negative_prompt, guidance_scale=0, steps=4, seed=44, randomize_seed=False, progress=gr.Progress(track_tqdm=True)):
115
+ """Generates a single image using the text-to-video pipeline by requesting only one frame."""
116
+ target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
117
+ target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
118
+
119
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
120
+
121
+ with torch.inference_mode():
122
+ output_frame = text_to_video_pipe(
123
+ prompt=prompt,
124
+ negative_prompt=negative_prompt,
125
+ height=target_h,
126
+ width=target_w,
127
+ num_frames=1,
128
+ guidance_scale=float(guidance_scale),
129
+ num_inference_steps=int(steps),
130
+ generator=torch.Generator(device="cuda").manual_seed(current_seed)
131
+ ).frames[0][0]
132
+
133
+ return output_frame, current_seed
134
+
135
  with gr.Blocks() as demo:
136
  gr.Markdown("# Fast Wan 2.2 TI2V 5B Demo")
137
  gr.Markdown("""This Demo is using [FastWan2.2-TI2V-5B](https://huggingface.co/FastVideo/FastWan2.2-TI2V-5B-FullAttn-Diffusers) which is fine-tuned with Sparse-distill method which allows wan to generate high quality videos in 3-5 steps.""")
138
 
139
+ with gr.Tabs():
140
+ with gr.TabItem("Text/Image-to-Video"):
141
+ with gr.Row():
142
+ with gr.Column():
143
+ input_image_component = gr.Image(type="pil", label="Input Image (optional, auto-resized to target H/W)")
144
+ prompt_input = gr.Textbox(label="Prompt", value=default_prompt_i2v)
145
+ duration_seconds_input = gr.Slider(minimum=round(MIN_FRAMES_MODEL/FIXED_FPS,1), maximum=round(MAX_FRAMES_MODEL/FIXED_FPS,1), step=0.1, value=2, label="Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
146
+
147
+ with gr.Accordion("Advanced Settings", open=False):
148
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
149
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
150
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
151
+ with gr.Row():
152
+ height_input = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
153
+ width_input = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
154
+ steps_slider = gr.Slider(minimum=1, maximum=8, step=1, value=4, label="Inference Steps")
155
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.0, label="Guidance Scale")
156
+
157
+ generate_button = gr.Button("Generate Video", variant="primary")
158
+
159
+ with gr.Column():
160
+ video_output = gr.Video(label="Generated Video", autoplay=True, interactive=False)
161
+
162
+ input_image_component.upload(fn=handle_image_upload_for_dims_wan, inputs=[input_image_component, height_input, width_input], outputs=[height_input, width_input])
163
+ input_image_component.clear(fn=handle_image_upload_for_dims_wan, inputs=[input_image_component, height_input, width_input], outputs=[height_input, width_input])
164
+
165
+ ui_inputs_video = [input_image_component, prompt_input, height_input, width_input, negative_prompt_input, duration_seconds_input, guidance_scale_input, steps_slider, seed_input, randomize_seed_checkbox]
166
+ generate_button.click(fn=generate_video, inputs=ui_inputs_video, outputs=[video_output, seed_input])
167
+
168
+ with gr.TabItem("Text-to-Image"):
169
+ with gr.Row():
170
+ with gr.Column():
171
+ prompt_input_img = gr.Textbox(label="Prompt", value="A majestic lion in the savanna, cinematic lighting, 4k, high detail")
172
+ with gr.Accordion("Advanced Settings", open=False):
173
+ negative_prompt_input_img = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
174
+ seed_input_img = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42, interactive=True)
175
+ randomize_seed_checkbox_img = gr.Checkbox(label="Randomize seed", value=True, interactive=True)
176
+ with gr.Row():
177
+ height_input_img = gr.Slider(minimum=SLIDER_MIN_H, maximum=SLIDER_MAX_H, step=MOD_VALUE, value=DEFAULT_H_SLIDER_VALUE, label=f"Output Height (multiple of {MOD_VALUE})")
178
+ width_input_img = gr.Slider(minimum=SLIDER_MIN_W, maximum=SLIDER_MAX_W, step=MOD_VALUE, value=DEFAULT_W_SLIDER_VALUE, label=f"Output Width (multiple of {MOD_VALUE})")
179
+ steps_slider_img = gr.Slider(minimum=1, maximum=8, step=1, value=4, label="Inference Steps")
180
+ guidance_scale_input_img = gr.Slider(minimum=0.0, maximum=5.0, step=0.01, value=0.0, label="Guidance Scale")
181
+ generate_button_img = gr.Button("Generate Image", variant="primary")
182
+ with gr.Column():
183
+ image_output = gr.Image(label="Generated Image", interactive=False)
184
+
185
+ ui_inputs_img = [prompt_input_img, height_input_img, width_input_img, negative_prompt_input_img, guidance_scale_input_img, steps_slider_img, seed_input_img, randomize_seed_checkbox_img]
186
+ generate_button_img.click(fn=generate_image, inputs=ui_inputs_img, outputs=[image_output, seed_input_img])
187
 
188
  if __name__ == "__main__":
189
  demo.queue().launch()