Spaces:
Running
Running
Create app.py
Browse files
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
import os
|
| 3 |
from huggingface_hub import InferenceClient
|
| 4 |
import tempfile
|
|
@@ -6,9 +5,6 @@ import shutil
|
|
| 6 |
from pathlib import Path
|
| 7 |
|
| 8 |
# Initialize the client
|
| 9 |
-
client = InferenceClient(
|
| 10 |
-
provider="fal-ai",
|
| 11 |
-
api_key=os.environ.get("HF_TOKEN"),
|
| 12 |
bill_to="huggingface",
|
| 13 |
)
|
| 14 |
|
|
@@ -25,6 +21,7 @@ def text_to_video(prompt, duration=5, aspect_ratio="16:9", resolution="720p", pr
|
|
| 25 |
video = client.text_to_video(
|
| 26 |
prompt,
|
| 27 |
model="akhaliq/veo3.1-fast",
|
|
|
|
| 28 |
)
|
| 29 |
|
| 30 |
# Save the video to a temporary file
|
|
@@ -38,6 +35,7 @@ def text_to_video(prompt, duration=5, aspect_ratio="16:9", resolution="720p", pr
|
|
| 38 |
return None, f"❌ Error generating video: {str(e)}"
|
| 39 |
|
| 40 |
def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
|
|
|
|
| 41 |
"""Generate video from image and prompt"""
|
| 42 |
try:
|
| 43 |
if profile is None:
|
|
@@ -54,6 +52,8 @@ def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="7
|
|
| 54 |
# If image is a file path
|
| 55 |
with open(image, "rb") as image_file:
|
| 56 |
input_image = image_file.read()
|
|
|
|
|
|
|
| 57 |
else:
|
| 58 |
# If image is already bytes or similar
|
| 59 |
import io
|
|
@@ -76,6 +76,7 @@ def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="7
|
|
| 76 |
input_image,
|
| 77 |
prompt=prompt,
|
| 78 |
model="akhaliq/veo3.1-fast-image-to-video",
|
|
|
|
| 79 |
)
|
| 80 |
|
| 81 |
# Save the video to a temporary file
|
|
@@ -152,6 +153,15 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator
|
|
| 152 |
with gr.Tab("📝 Text to Video", id=0):
|
| 153 |
gr.Markdown("### Transform your text descriptions into dynamic videos")
|
| 154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
with gr.Row():
|
| 156 |
with gr.Column(scale=1):
|
| 157 |
text_prompt = gr.Textbox(
|
|
@@ -197,6 +207,12 @@ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(), title="AI Video Generator
|
|
| 197 |
with gr.Tab("🖼️ Image to Video", id=1):
|
| 198 |
gr.Markdown("### Bring your static images to life with motion")
|
| 199 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 200 |
with gr.Row():
|
| 201 |
with gr.Column(scale=1):
|
| 202 |
image_input = gr.Image(
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
from huggingface_hub import InferenceClient
|
| 3 |
import tempfile
|
|
|
|
| 5 |
from pathlib import Path
|
| 6 |
|
| 7 |
# Initialize the client
|
|
|
|
|
|
|
|
|
|
| 8 |
bill_to="huggingface",
|
| 9 |
)
|
| 10 |
|
|
|
|
| 21 |
video = client.text_to_video(
|
| 22 |
prompt,
|
| 23 |
model="akhaliq/veo3.1-fast",
|
| 24 |
+
|
| 25 |
)
|
| 26 |
|
| 27 |
# Save the video to a temporary file
|
|
|
|
| 35 |
return None, f"❌ Error generating video: {str(e)}"
|
| 36 |
|
| 37 |
def image_to_video(image, prompt, duration=5, aspect_ratio="16:9", resolution="720p", profile: gr.OAuthProfile | None = None):
|
| 38 |
+
|
| 39 |
"""Generate video from image and prompt"""
|
| 40 |
try:
|
| 41 |
if profile is None:
|
|
|
|
| 52 |
# If image is a file path
|
| 53 |
with open(image, "rb") as image_file:
|
| 54 |
input_image = image_file.read()
|
| 55 |
+
|
| 56 |
+
|
| 57 |
else:
|
| 58 |
# If image is already bytes or similar
|
| 59 |
import io
|
|
|
|
| 76 |
input_image,
|
| 77 |
prompt=prompt,
|
| 78 |
model="akhaliq/veo3.1-fast-image-to-video",
|
| 79 |
+
|
| 80 |
)
|
| 81 |
|
| 82 |
# Save the video to a temporary file
|
|
|
|
| 153 |
with gr.Tab("📝 Text to Video", id=0):
|
| 154 |
gr.Markdown("### Transform your text descriptions into dynamic videos")
|
| 155 |
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
|
| 165 |
with gr.Row():
|
| 166 |
with gr.Column(scale=1):
|
| 167 |
text_prompt = gr.Textbox(
|
|
|
|
| 207 |
with gr.Tab("🖼️ Image to Video", id=1):
|
| 208 |
gr.Markdown("### Bring your static images to life with motion")
|
| 209 |
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
|
| 216 |
with gr.Row():
|
| 217 |
with gr.Column(scale=1):
|
| 218 |
image_input = gr.Image(
|