Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -45,7 +45,8 @@ MAX_NUM_FRAMES = 257
|
|
| 45 |
FPS = 30.0
|
| 46 |
MIN_DIM_SLIDER = 256
|
| 47 |
TARGET_FIXED_SIDE = 768
|
| 48 |
-
|
|
|
|
| 49 |
|
| 50 |
class calculateDuration:
|
| 51 |
def __init__(self, activity_name=""):
|
|
@@ -110,9 +111,6 @@ def get_huggingface_safetensors_for_ltx(link): # Renamed for clarity
|
|
| 110 |
try:
|
| 111 |
list_of_files = fs.ls(link, detail=False)
|
| 112 |
safetensors_name = None
|
| 113 |
-
# Simplified logic: find first .safetensors, or prioritize specific names if needed
|
| 114 |
-
# For LoRAs, usually there's one main .safetensors file.
|
| 115 |
-
# The complex step-based selection from app(2) might be overkill unless LTX LoRAs follow that pattern.
|
| 116 |
|
| 117 |
# Prioritize files common for LoRAs
|
| 118 |
common_lora_filenames = ["lora.safetensors", "pytorch_lora_weights.safetensors"]
|
|
@@ -311,13 +309,13 @@ def get_duration(prompt, negative_prompt, image, video, height, width, mode, ste
|
|
| 311 |
frames_to_use, seed, randomize_seed, guidance_scale, duration_input, improve_texture,
|
| 312 |
# New LoRA params
|
| 313 |
selected_lora_index, lora_scale_value,
|
| 314 |
-
progress):
|
| 315 |
if duration_input > 7:
|
| 316 |
-
return
|
| 317 |
else:
|
| 318 |
-
return
|
| 319 |
|
| 320 |
-
@spaces.GPU(duration=get_duration)
|
| 321 |
def generate(prompt,
|
| 322 |
negative_prompt,
|
| 323 |
image,
|
|
@@ -339,16 +337,13 @@ def generate(prompt,
|
|
| 339 |
progress=gr.Progress(track_tqdm=True)):
|
| 340 |
|
| 341 |
effective_prompt = prompt
|
|
|
|
|
|
|
| 342 |
|
| 343 |
# --- LoRA Handling ---
|
| 344 |
# Unload any existing LoRAs from main pipes first to prevent conflicts
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
try:
|
| 348 |
-
pipe.unload_lora_weights()
|
| 349 |
-
print("Previous LoRAs unloaded if any.")
|
| 350 |
-
except Exception as e:
|
| 351 |
-
print(f"Note: Could not unload LoRAs (maybe none were loaded): {e}")
|
| 352 |
|
| 353 |
if selected_lora_index is not None and 0 <= selected_lora_index < len(loras):
|
| 354 |
selected_lora_data = loras[selected_lora_index]
|
|
@@ -356,7 +351,30 @@ def generate(prompt,
|
|
| 356 |
lora_weights_name = selected_lora_data.get("weights", None)
|
| 357 |
lora_trigger = selected_lora_data.get("trigger_word", "")
|
| 358 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
print(f"Selected LoRA: {selected_lora_data['title']} from {lora_repo_id}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
if lora_trigger:
|
| 361 |
print(f"Applying trigger word: {lora_trigger}")
|
| 362 |
|
|
@@ -364,25 +382,7 @@ def generate(prompt,
|
|
| 364 |
effective_prompt = f"{lora_trigger} {prompt}"
|
| 365 |
else: # Default to append or if not specified
|
| 366 |
effective_prompt = f"{prompt} {lora_trigger}"
|
| 367 |
-
|
| 368 |
-
with calculateDuration(f"Loading LoRA weights for {selected_lora_data['title']}"):
|
| 369 |
-
try:
|
| 370 |
-
# Load into main generation pipe
|
| 371 |
-
pipe.load_lora_weights(
|
| 372 |
-
lora_repo_id,
|
| 373 |
-
weight_name=lora_weights_name,
|
| 374 |
-
adapter_name="active_lora" # Use a consistent adapter name
|
| 375 |
-
)
|
| 376 |
-
pipe.set_adapters(["active_lora"], adapter_weights=[lora_scale_value])
|
| 377 |
-
print(f"LoRA loaded into main pipe with scale {lora_scale_value}")
|
| 378 |
|
| 379 |
-
except Exception as e:
|
| 380 |
-
gr.Warning(f"Failed to load LoRA '{selected_lora_data['title']}': {e}. Proceeding without LoRA.")
|
| 381 |
-
print(f"Error loading LoRA: {e}")
|
| 382 |
-
# Ensure pipes are clean if loading failed mid-way (though unload_lora_weights should handle this)
|
| 383 |
-
try:
|
| 384 |
-
pipe.unload_lora_weights()
|
| 385 |
-
except: pass # Ignore errors here
|
| 386 |
else:
|
| 387 |
print("No LoRA selected or invalid index.")
|
| 388 |
# --- End LoRA Handling ---
|
|
@@ -431,7 +431,7 @@ def generate(prompt,
|
|
| 431 |
timesteps_first_pass = None
|
| 432 |
timesteps_second_pass = None
|
| 433 |
|
| 434 |
-
with calculateDuration("
|
| 435 |
latents = pipe(
|
| 436 |
conditions=condition1,
|
| 437 |
prompt=effective_prompt, # Use prompt with trigger word
|
|
|
|
| 45 |
FPS = 30.0
|
| 46 |
MIN_DIM_SLIDER = 256
|
| 47 |
TARGET_FIXED_SIDE = 768
|
| 48 |
+
last_lora = ""
|
| 49 |
+
last_fused=False
|
| 50 |
|
| 51 |
class calculateDuration:
|
| 52 |
def __init__(self, activity_name=""):
|
|
|
|
| 111 |
try:
|
| 112 |
list_of_files = fs.ls(link, detail=False)
|
| 113 |
safetensors_name = None
|
|
|
|
|
|
|
|
|
|
| 114 |
|
| 115 |
# Prioritize files common for LoRAs
|
| 116 |
common_lora_filenames = ["lora.safetensors", "pytorch_lora_weights.safetensors"]
|
|
|
|
| 309 |
frames_to_use, seed, randomize_seed, guidance_scale, duration_input, improve_texture,
|
| 310 |
# New LoRA params
|
| 311 |
selected_lora_index, lora_scale_value,
|
| 312 |
+
progress):
|
| 313 |
if duration_input > 7:
|
| 314 |
+
return 95
|
| 315 |
else:
|
| 316 |
+
return 85
|
| 317 |
|
| 318 |
+
@spaces.GPU(duration=get_duration)
|
| 319 |
def generate(prompt,
|
| 320 |
negative_prompt,
|
| 321 |
image,
|
|
|
|
| 337 |
progress=gr.Progress(track_tqdm=True)):
|
| 338 |
|
| 339 |
effective_prompt = prompt
|
| 340 |
+
global last_fused, last_lora
|
| 341 |
+
|
| 342 |
|
| 343 |
# --- LoRA Handling ---
|
| 344 |
# Unload any existing LoRAs from main pipes first to prevent conflicts
|
| 345 |
+
|
| 346 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 347 |
|
| 348 |
if selected_lora_index is not None and 0 <= selected_lora_index < len(loras):
|
| 349 |
selected_lora_data = loras[selected_lora_index]
|
|
|
|
| 351 |
lora_weights_name = selected_lora_data.get("weights", None)
|
| 352 |
lora_trigger = selected_lora_data.get("trigger_word", "")
|
| 353 |
|
| 354 |
+
print("Last LoRA: ", last_lora)
|
| 355 |
+
print("Current LoRA: ", repo_name)
|
| 356 |
+
print("Last fused: ", last_fused)
|
| 357 |
+
|
| 358 |
print(f"Selected LoRA: {selected_lora_data['title']} from {lora_repo_id}")
|
| 359 |
+
|
| 360 |
+
if last_lora != lora_repo_id:
|
| 361 |
+
if(last_fused):
|
| 362 |
+
with calculateDuration("Unloading previous LoRAs"):
|
| 363 |
+
pipe.unfuse_lora()
|
| 364 |
+
pipe.unload_lora_weights()
|
| 365 |
+
print("Previous LoRAs unloaded if any.")
|
| 366 |
+
|
| 367 |
+
with calculateDuration(f"Loading LoRA weights for {selected_lora_data['title']}"):
|
| 368 |
+
pipe.load_lora_weights(
|
| 369 |
+
lora_repo_id,
|
| 370 |
+
weight_name=lora_weights_name,
|
| 371 |
+
)
|
| 372 |
+
#pipe.set_adapters(["active_lora"], adapter_weights=[lora_scale_value])
|
| 373 |
+
pipe.fuse_lora(lora_scale_value)
|
| 374 |
+
print(f"LoRA loaded into main pipe with scale {lora_scale_value}")
|
| 375 |
+
last_fused = True
|
| 376 |
+
last_lora = lora_repo_id
|
| 377 |
+
|
| 378 |
if lora_trigger:
|
| 379 |
print(f"Applying trigger word: {lora_trigger}")
|
| 380 |
|
|
|
|
| 382 |
effective_prompt = f"{lora_trigger} {prompt}"
|
| 383 |
else: # Default to append or if not specified
|
| 384 |
effective_prompt = f"{prompt} {lora_trigger}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 385 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
else:
|
| 387 |
print("No LoRA selected or invalid index.")
|
| 388 |
# --- End LoRA Handling ---
|
|
|
|
| 431 |
timesteps_first_pass = None
|
| 432 |
timesteps_second_pass = None
|
| 433 |
|
| 434 |
+
with calculateDuration("video generation"):
|
| 435 |
latents = pipe(
|
| 436 |
conditions=condition1,
|
| 437 |
prompt=effective_prompt, # Use prompt with trigger word
|