Spaces:
Runtime error
Runtime error
Update ltx_video/pipelines/pipeline_ltx_video.py
Browse files
ltx_video/pipelines/pipeline_ltx_video.py
CHANGED
|
@@ -25,6 +25,9 @@ from transformers import (
|
|
| 25 |
AutoTokenizer,
|
| 26 |
)
|
| 27 |
|
|
|
|
|
|
|
|
|
|
| 28 |
from ltx_video.models.autoencoders.causal_video_autoencoder import (
|
| 29 |
CausalVideoAutoencoder,
|
| 30 |
)
|
|
@@ -50,6 +53,12 @@ from ltx_video.models.autoencoders.vae_encode import (
|
|
| 50 |
|
| 51 |
|
| 52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
def log_memory_snapshot(event_name: str):
|
| 54 |
"""
|
| 55 |
Exibe um log do consumo de mem贸ria VRAM da GPU atual no exato momento da chamada.
|
|
@@ -62,8 +71,11 @@ def log_memory_snapshot(event_name: str):
|
|
| 62 |
reserved = torch.cuda.memory_reserved(device) / (1024 ** 2)
|
| 63 |
total = torch.cuda.get_device_properties(device).total_memory / (1024 ** 2)
|
| 64 |
|
| 65 |
-
#
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
| 67 |
f"馃 MEMORY LOG [{event_name.ljust(35)}]: "
|
| 68 |
f"Alocada: {allocated:.2f} MB | "
|
| 69 |
f"Reservada: {reserved:.2f} MB | "
|
|
@@ -71,7 +83,6 @@ def log_memory_snapshot(event_name: str):
|
|
| 71 |
f"({(reserved/total)*100:.1f}%)"
|
| 72 |
)
|
| 73 |
|
| 74 |
-
|
| 75 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 76 |
|
| 77 |
|
|
|
|
| 25 |
AutoTokenizer,
|
| 26 |
)
|
| 27 |
|
| 28 |
+
|
| 29 |
+
import logging
|
| 30 |
+
|
| 31 |
from ltx_video.models.autoencoders.causal_video_autoencoder import (
|
| 32 |
CausalVideoAutoencoder,
|
| 33 |
)
|
|
|
|
| 53 |
|
| 54 |
|
| 55 |
|
| 56 |
+
import logging # Garanta que a importa莽茫o padr茫o esteja no topo do arquivo
|
| 57 |
+
import torch # J谩 deve estar l谩
|
| 58 |
+
|
| 59 |
+
# A linha abaixo j谩 existe no seu arquivo e est谩 correta
|
| 60 |
+
logger = logging.get_logger(__name__)
|
| 61 |
+
|
| 62 |
def log_memory_snapshot(event_name: str):
|
| 63 |
"""
|
| 64 |
Exibe um log do consumo de mem贸ria VRAM da GPU atual no exato momento da chamada.
|
|
|
|
| 71 |
reserved = torch.cuda.memory_reserved(device) / (1024 ** 2)
|
| 72 |
total = torch.cuda.get_device_properties(device).total_memory / (1024 ** 2)
|
| 73 |
|
| 74 |
+
# =========================================================================
|
| 75 |
+
# ========================== ALTERA脟脙O PRINCIPAL ==========================
|
| 76 |
+
# Substitua 'print' por 'logger.info' ou 'logger.debug'
|
| 77 |
+
# =========================================================================
|
| 78 |
+
logger.info(
|
| 79 |
f"馃 MEMORY LOG [{event_name.ljust(35)}]: "
|
| 80 |
f"Alocada: {allocated:.2f} MB | "
|
| 81 |
f"Reservada: {reserved:.2f} MB | "
|
|
|
|
| 83 |
f"({(reserved/total)*100:.1f}%)"
|
| 84 |
)
|
| 85 |
|
|
|
|
| 86 |
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
| 87 |
|
| 88 |
|