euiia commited on
Commit
f5c99ab
·
verified ·
1 Parent(s): eda2fac

Update hd_specialist.py

Browse files
Files changed (1) hide show
  1. hd_specialist.py +114 -43
hd_specialist.py CHANGED
@@ -1,5 +1,13 @@
1
- # hd_specialist.py (Versão Final - Estrutura de Arquivos Corrigida)
2
- #https://huggingface.co/spaces/ByteDance-Seed/SeedVR2-3B
 
 
 
 
 
 
 
 
3
  import torch
4
  import imageio
5
  import os
@@ -12,10 +20,12 @@ import shlex
12
  import subprocess
13
  from pathlib import Path
14
  from urllib.parse import urlparse
15
- from torch.hub import download_url_to_file, get_dir
16
  from omegaconf import OmegaConf
 
 
17
 
18
- # --- Importações diretas, assumindo que as pastas estão na raiz ---
19
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
20
  from common.config import load_config
21
  from common.seed import set_seed
@@ -25,103 +35,164 @@ from data.video.transforms.rearrange import Rearrange
25
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
26
  from torchvision.transforms import Compose, Lambda, Normalize
27
  from torchvision.io.video import read_video
28
- from einops import rearrange
29
 
30
  logger = logging.getLogger(__name__)
31
 
32
- # Função auxiliar para download
33
  def _load_file_from_url(url, model_dir='./', file_name=None):
 
34
  os.makedirs(model_dir, exist_ok=True)
35
  filename = file_name or os.path.basename(urlparse(url).path)
36
  cached_file = os.path.abspath(os.path.join(model_dir, filename))
37
  if not os.path.exists(cached_file):
38
- logger.info(f'Baixando: "{url}" para {cached_file}')
39
  download_url_to_file(url, cached_file, hash_prefix=None, progress=True)
40
  return cached_file
41
 
42
  class HDSpecialist:
43
  """
44
- Implementa o Especialista HD (Δ+) usando a infraestrutura oficial do SeedVR.
 
45
  """
46
  def __init__(self, workspace_dir="deformes_workspace"):
47
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
48
  self.runner = None
49
  self.workspace_dir = workspace_dir
50
  self.is_initialized = False
51
- logger.info("Especialista HD (SeedVR) inicializado. Modelo será carregado sob demanda.")
52
-
53
- def _setup_dependencies(self):
54
- """Instala dependências complexas como Apex."""
55
- logger.info("Configurando dependências do SeedVR (Apex)...")
56
- apex_url = 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/apex-0.1-cp310-cp310-linux_x86_64.whl'
57
- apex_wheel_path = _load_file_from_url(url=apex_url)
58
-
59
- # Instala a roda do Apex baixada
60
- subprocess.run(shlex.split(f"pip install {apex_wheel_path}"), check=True)
61
- logger.info("✅ Dependência Apex instalada com sucesso.")
62
 
63
  def _download_models(self):
64
- """Baixa os checkpoints necessários para o SeedVR2."""
65
- logger.info("Verificando e baixando modelos do SeedVR2...")
66
  ckpt_dir = Path('./ckpts')
67
  ckpt_dir.mkdir(exist_ok=True)
68
 
69
- pretrain_model_url = {
70
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
71
- 'dit': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
 
72
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
73
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
74
  }
75
 
76
- _load_file_from_url(url=pretrain_model_url['dit'], model_dir='./ckpts/')
77
- _load_file_from_url(url=pretrain_model_url['vae'], model_dir='./ckpts/')
78
- _load_file_from_url(url=pretrain_model_url['pos_emb'])
79
- _load_file_from_url(url=pretrain_model_url['neg_emb'])
80
- logger.info("Modelos do SeedVR2 baixados com sucesso.")
81
-
82
- def _initialize_runner(self):
83
- """Carrega e configura o modelo SeedVR sob demanda."""
84
  if self.runner is not None:
85
  return
86
 
87
- self._setup_dependencies()
88
  self._download_models()
89
 
90
- logger.info("Inicializando o runner do SeedVR2...")
91
- config_path = os.path.join('./configs_3b', 'main.yaml')
 
 
 
 
 
 
 
 
92
  config = load_config(config_path)
93
 
94
  self.runner = VideoDiffusionInfer(config)
95
  OmegaConf.set_readonly(self.runner.config, False)
96
 
97
- self.runner.configure_dit_model(device=self.device, checkpoint='./ckpts/seedvr2_ema_3b.pth')
98
  self.runner.configure_vae_model()
99
 
100
  if hasattr(self.runner.vae, "set_memory_limit"):
101
  self.runner.vae.set_memory_limit(**self.runner.config.vae.memory_limit)
102
 
103
  self.is_initialized = True
104
- logger.info("Runner do SeedVR2 inicializado e pronto.")
105
 
106
  def _unload_runner(self):
107
- """Remove o runner da VRAM para liberar recursos."""
108
  if self.runner is not None:
109
  del self.runner
110
  self.runner = None
111
  gc.collect()
112
  torch.cuda.empty_cache()
113
  self.is_initialized = False
114
- logger.info("Runner do SeedVR2 descarregado da VRAM.")
115
 
116
- def process_video(self, input_video_path: str, output_video_path: str, prompt: str, seed: int = 666, fps_out: int = 24) -> str:
117
- """Aplica o aprimoramento HD a um vídeo usando a lógica oficial do SeedVR."""
 
 
118
  try:
119
- self._initialize_runner()
120
  set_seed(seed, same_across_ranks=True)
121
- # ... (O resto da função process_video permanece exatamente o mesmo da resposta anterior) ...
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  finally:
124
  self._unload_runner()
125
 
126
- # Instância Singleton
127
  hd_specialist_singleton = HDSpecialist()
 
1
+ # hd_specialist.py
2
+ #
3
+ # Copyright (C) 2025 Carlos Rodrigues dos Santos
4
+ #
5
+ # This file implements the HD Specialist (Δ+), which uses the SeedVR model
6
+ # for video super-resolution. It's designed to be called by the ADUC orchestrator
7
+ # to perform the final HD mastering pass on a generated video. It manages the
8
+ # loading/unloading of the heavy SeedVR models to conserve VRAM and can switch
9
+ # between different model sizes (e.g., 3B and 7B).
10
+
11
  import torch
12
  import imageio
13
  import os
 
20
  import subprocess
21
  from pathlib import Path
22
  from urllib.parse import urlparse
23
+ from torch.hub import download_url_to_file
24
  from omegaconf import OmegaConf
25
+ import mediapy
26
+ from einops import rearrange
27
 
28
+ # Assuming these files are in the project structure
29
  from projects.video_diffusion_sr.infer import VideoDiffusionInfer
30
  from common.config import load_config
31
  from common.seed import set_seed
 
35
  from projects.video_diffusion_sr.color_fix import wavelet_reconstruction
36
  from torchvision.transforms import Compose, Lambda, Normalize
37
  from torchvision.io.video import read_video
 
38
 
39
  logger = logging.getLogger(__name__)
40
 
 
41
  def _load_file_from_url(url, model_dir='./', file_name=None):
42
+ """Helper function to download files from a URL to a local directory."""
43
  os.makedirs(model_dir, exist_ok=True)
44
  filename = file_name or os.path.basename(urlparse(url).path)
45
  cached_file = os.path.abspath(os.path.join(model_dir, filename))
46
  if not os.path.exists(cached_file):
47
+ logger.info(f'Downloading: "{url}" to {cached_file}')
48
  download_url_to_file(url, cached_file, hash_prefix=None, progress=True)
49
  return cached_file
50
 
51
  class HDSpecialist:
52
  """
53
+ Implements the HD Specialist (Δ+) using the SeedVR infrastructure.
54
+ Manages model loading, inference, and memory on demand.
55
  """
56
  def __init__(self, workspace_dir="deformes_workspace"):
57
  self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
58
  self.runner = None
59
  self.workspace_dir = workspace_dir
60
  self.is_initialized = False
61
+ logger.info("HD Specialist (SeedVR) initialized. Model will be loaded on demand.")
 
 
 
 
 
 
 
 
 
 
62
 
63
  def _download_models(self):
64
+ """Downloads the necessary checkpoints for SeedVR2."""
65
+ logger.info("Verifying and downloading SeedVR2 models...")
66
  ckpt_dir = Path('./ckpts')
67
  ckpt_dir.mkdir(exist_ok=True)
68
 
69
+ pretrain_model_urls = {
70
  'vae': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/ema_vae.pth',
71
+ 'dit_3b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/seedvr2_ema_3b.pth',
72
+ 'dit_7b': 'https://huggingface.co/ByteDance-Seed/SeedVR2-7B/resolve/main/seedvr2_ema_7b.pth',
73
  'pos_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/pos_emb.pt',
74
  'neg_emb': 'https://huggingface.co/ByteDance-Seed/SeedVR2-3B/resolve/main/neg_emb.pt'
75
  }
76
 
77
+ for key, url in pretrain_model_urls.items():
78
+ _load_file_from_url(url=url, model_dir='./ckpts/')
79
+
80
+ logger.info("SeedVR2 models downloaded successfully.")
81
+
82
+ def _initialize_runner(self, model_version: str):
83
+ """Loads and configures the SeedVR model on demand based on the selected version."""
 
84
  if self.runner is not None:
85
  return
86
 
 
87
  self._download_models()
88
 
89
+ logger.info(f"Initializing SeedVR2 {model_version} runner...")
90
+ if model_version == '3B':
91
+ config_path = os.path.join('./configs_3b', 'main.yaml')
92
+ checkpoint_path = './ckpts/seedvr2_ema_3b.pth'
93
+ elif model_version == '7B':
94
+ config_path = os.path.join('./configs_7b', 'main.yaml')
95
+ checkpoint_path = './ckpts/seedvr2_ema_7b.pth'
96
+ else:
97
+ raise ValueError(f"Unsupported SeedVR model version: {model_version}")
98
+
99
  config = load_config(config_path)
100
 
101
  self.runner = VideoDiffusionInfer(config)
102
  OmegaConf.set_readonly(self.runner.config, False)
103
 
104
+ self.runner.configure_dit_model(device=self.device, checkpoint=checkpoint_path)
105
  self.runner.configure_vae_model()
106
 
107
  if hasattr(self.runner.vae, "set_memory_limit"):
108
  self.runner.vae.set_memory_limit(**self.runner.config.vae.memory_limit)
109
 
110
  self.is_initialized = True
111
+ logger.info(f"Runner for SeedVR2 {model_version} initialized and ready.")
112
 
113
  def _unload_runner(self):
114
+ """Removes the runner from VRAM to free resources."""
115
  if self.runner is not None:
116
  del self.runner
117
  self.runner = None
118
  gc.collect()
119
  torch.cuda.empty_cache()
120
  self.is_initialized = False
121
+ logger.info("SeedVR2 runner unloaded from VRAM.")
122
 
123
+ def process_video(self, input_video_path: str, output_video_path: str, prompt: str,
124
+ model_version: str = '3B', steps: int = 50, seed: int = 666,
125
+ progress: gr.Progress = None) -> str:
126
+ """Applies HD enhancement to a video using the SeedVR logic."""
127
  try:
128
+ self._initialize_runner(model_version)
129
  set_seed(seed, same_across_ranks=True)
130
+
131
+ # --- Adapted inference logic from SeedVR scripts ---
132
+ self.runner.config.diffusion.timesteps.sampling.steps = steps
133
+ self.runner.configure_diffusion()
134
+
135
+ video_tensor = read_video(input_video_path, output_format="TCHW")[0] / 255.0
136
+ res_h, res_w = video_tensor.shape[-2:]
137
+
138
+ video_transform = Compose([
139
+ NaResize(resolution=(res_h * res_w) ** 0.5, mode="area", downsample_only=False),
140
+ Lambda(lambda x: torch.clamp(x, 0.0, 1.0)),
141
+ DivisibleCrop((16, 16)),
142
+ Normalize(0.5, 0.5),
143
+ Rearrange("t c h w -> c t h w"),
144
+ ])
145
+
146
+ cond_latents = [video_transform(video_tensor.to(self.device))]
147
+ input_videos = cond_latents
148
+
149
+ self.runner.dit.to("cpu")
150
+ self.runner.vae.to(self.device)
151
+ cond_latents = self.runner.vae_encode(cond_latents)
152
+ self.runner.vae.to("cpu"); gc.collect(); torch.cuda.empty_cache()
153
+ self.runner.dit.to(self.device)
154
+
155
+ text_pos_embeds = torch.load('./ckpts/pos_emb.pt').to(self.device)
156
+ text_neg_embeds = torch.load('./ckpts/neg_emb.pt').to(self.device)
157
+ text_embeds_dict = {"texts_pos": [text_pos_embeds], "texts_neg": [text_neg_embeds]}
158
+
159
+ noises = [torch.randn_like(latent) for latent in cond_latents]
160
+ conditions = [self.runner.get_condition(noise, latent_blur=latent, task="sr") for noise, latent in zip(noises, cond_latents)]
161
+
162
+ with torch.no_grad(), torch.autocast("cuda", torch.bfloat16, enabled=True):
163
+ video_tensors = self.runner.inference(
164
+ noises=noises,
165
+ conditions=conditions,
166
+ dit_offload=True,
167
+ **text_embeds_dict,
168
+ )
169
+
170
+ self.runner.dit.to("cpu"); gc.collect(); torch.cuda.empty_cache()
171
+
172
+ self.runner.vae.to(self.device)
173
+ samples = self.runner.vae_decode(video_tensors)
174
+
175
+ final_sample = samples[0]
176
+ input_video_sample = input_videos[0]
177
+
178
+ if final_sample.shape[1] < input_video_sample.shape[1]: # if generated frames are less
179
+ input_video_sample = input_video_sample[:, :final_sample.shape[1]]
180
+
181
+ final_sample = wavelet_reconstruction(
182
+ rearrange(final_sample, "c t h w -> t c h w"),
183
+ rearrange(input_video_sample, "c t h w -> t c h w")
184
+ )
185
+
186
+ final_sample = rearrange(final_sample, "t c h w -> t h w c")
187
+ final_sample = final_sample.clip(-1, 1).mul_(0.5).add_(0.5).mul_(255).round()
188
+ final_sample_np = final_sample.to(torch.uint8).cpu().numpy()
189
+
190
+ mediapy.write_video(output_video_path, final_sample_np, fps=24)
191
+ logger.info(f"HD Mastered video saved to: {output_video_path}")
192
+ return output_video_path
193
 
194
  finally:
195
  self._unload_runner()
196
 
197
+ # Singleton instance
198
  hd_specialist_singleton = HDSpecialist()