# app.py # # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos # # Versão 6.0.0 (Clean, Interactive & Consolidated UI) - Final import gradio as gr import yaml import logging import os import sys import shutil import time import json # --- 1. IMPORTAÇÃO DO FRAMEWORK E CONFIGURAÇÃO --- import aduc_framework from aduc_framework.types import PreProductionParams, ProductionParams # Configuração de Tema Cinemático cinematic_theme = gr.themes.Base( primary_hue=gr.themes.colors.indigo, secondary_hue=gr.themes.colors.purple, neutral_hue=gr.themes.colors.slate, font=(gr.themes.GoogleFont("Inter"), "ui-sans-serif", "system-ui", "sans-serif"), ).set( body_background_fill="#111827", body_text_color="#E5E7EB", button_primary_background_fill="linear-gradient(90deg, #4F46E5, #8B5CF6)", button_primary_text_color="#FFFFFF", button_secondary_background_fill="#374151", button_secondary_border_color="#4B5563", button_secondary_text_color="#E5E7EB", block_background_fill="#1F2937", block_border_width="1px", block_border_color="#374151", block_label_background_fill="#374151", block_label_text_color="#E5E7EB", block_title_text_color="#FFFFFF", input_background_fill="#374151", input_border_color="#4B5563", input_placeholder_color="#9CA3AF", ) # Configuração de Logging LOG_FILE_PATH = "aduc_log.txt" if os.path.exists(LOG_FILE_PATH): os.remove(LOG_FILE_PATH) log_format = '%(asctime)s - %(levelname)s - [%(name)s:%(funcName)s] - %(message)s' root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) root_logger.handlers.clear() stream_handler = logging.StreamHandler(sys.stdout) stream_handler.setFormatter(logging.Formatter(log_format)) root_logger.addHandler(stream_handler) file_handler = logging.FileHandler(LOG_FILE_PATH, mode='w', encoding='utf-8') file_handler.setFormatter(logging.Formatter(log_format)) root_logger.addHandler(file_handler) logger = logging.getLogger(__name__) # Inicialização do Aduc Framework try: with open("config.yaml", 'r') as f: config = yaml.safe_load(f) WORKSPACE_DIR = config['application']['workspace_dir'] aduc = aduc_framework.create_aduc_instance(workspace_dir=WORKSPACE_DIR) logger.info("Interface Gradio inicializada e conectada ao Aduc Framework.") except Exception as e: logger.critical(f"ERRO CRÍTICO durante a inicialização: {e}", exc_info=True) with gr.Blocks() as demo: gr.Markdown("# ERRO CRÍTICO NA INICIALIZAÇÃO") gr.Markdown("Não foi possível iniciar o Aduc Framework. Verifique os logs para mais detalhes.") gr.Textbox(value=str(e), label="Detalhes do Erro", lines=10) demo.launch() exit() # --- 2. FUNÇÕES WRAPPER (UI <-> FRAMEWORK) --- def run_pre_production_wrapper(prompt, num_keyframes, ref_files, resolution_str, duration_per_fragment, progress=gr.Progress()): if not ref_files: raise gr.Error("Por favor, forneça pelo menos uma imagem de referência.") target_resolution = int(resolution_str.split('x')[0]) ref_paths = [aduc.process_image_for_story(f.name, target_resolution, f"ref_processed_{i}.png") for i, f in enumerate(ref_files)] params = PreProductionParams(prompt=prompt, num_keyframes=int(num_keyframes), ref_paths=ref_paths, resolution=target_resolution, duration_per_fragment=duration_per_fragment) final_result = {} for update in aduc.task_pre_production(params, progress): yield { generation_state_holder: update.get("updated_state", gr.skip()), storyboard_output: update.get("storyboard", gr.skip()), keyframe_gallery: gr.update(value=update.get("final_keyframes", [])), } final_result = update yield { generation_state_holder: final_result.get("updated_state"), step3_accordion: gr.update(visible=True, open=True) } def run_original_production_wrapper(current_state_dict, trim_percent, handler_strength, dest_strength, guidance_scale, stg_scale, steps, progress=gr.Progress()): yield {final_video_output: gr.update(value=None, visible=True, label="🎬 Produzindo seu filme...")} production_params = ProductionParams(trim_percent=int(trim_percent), handler_strength=handler_strength, destination_convergence_strength=dest_strength, guidance_scale=guidance_scale, stg_scale=stg_scale, inference_steps=int(steps)) final_video_path, latent_paths, updated_state = aduc.task_produce_original_movie(params=production_params, progress_callback=progress) yield { final_video_output: gr.update(value=final_video_path, label="✅ Filme Original Master"), step4_accordion: gr.update(visible=True, open=True), original_latents_paths_state: latent_paths, current_source_video_state: final_video_path, generation_state_holder: updated_state.model_dump(), } def run_upscaler_wrapper(source_video, latent_paths, chunk_size, progress=gr.Progress()): if not source_video or not latent_paths: raise gr.Error("Fonte de vídeo ou latentes originais não encontrados para o Upscaler.") yield {final_video_output: gr.update(label="Pós-Produção: Upscaler Latente...")} final_path = source_video for update in aduc.task_run_latent_upscaler(latent_paths, int(chunk_size), progress): if "final_path" in update: final_path = update['final_path'] yield {final_video_output: gr.update(value=final_path, label="✅ Upscale Latente Concluído"), current_source_video_state: final_path} def run_hd_wrapper(source_video, steps, global_prompt, progress=gr.Progress()): if not source_video: raise gr.Error("Fonte de vídeo não encontrada para a Masterização HD.") yield {final_video_output: gr.update(label="Pós-Produção: Masterização HD...")} final_path = source_video for update in aduc.task_run_hd_mastering(source_video, int(steps), global_prompt, progress): if "final_path" in update: final_path = update['final_path'] yield {final_video_output: gr.update(value=final_path, label="✅ Masterização HD Concluída"), current_source_video_state: final_path} def run_audio_wrapper(source_video, audio_prompt, global_prompt, progress=gr.Progress()): if not source_video: raise gr.Error("Fonte de vídeo não encontrada para a Geração de Áudio.") yield {final_video_output: gr.update(label="Pós-Produção: Geração de Áudio...")} final_audio_prompt = audio_prompt if audio_prompt and audio_prompt.strip() else global_prompt final_path = source_video for update in aduc.task_run_audio_generation(source_video, final_audio_prompt, progress): if "final_path" in update: final_path = update['final_path'] yield {final_video_output: gr.update(value=final_path, label="✅ Filme Final com Áudio")} def get_log_content(): try: with open(LOG_FILE_PATH, "r", encoding="utf-8") as f: return f.read() except FileNotFoundError: return "Arquivo de log ainda não criado." # --- 3. DEFINIÇÃO DA UI --- with gr.Blocks(theme=cinematic_theme, css="style.css") as demo: generation_state_holder = gr.State(value={}) original_latents_paths_state = gr.State(value=[]) current_source_video_state = gr.State(value=None) gr.Markdown("
Crie um filme completo com vídeo e áudio, orquestrado por uma equipe de IAs especialistas.
") with gr.Accordion("Etapa 1: Roteiro e Cenas-Chave (Pré-Produção)", open=True) as step1_accordion: prompt_input = gr.Textbox(label="Ideia Geral do Filme", value="Um leão majestoso caminha pela savana, senta-se e ruge para o sol poente.") with gr.Row(): lang_selector = gr.Radio(["🇧🇷", "🇺🇸", "🇨🇳"], value="🇧🇷", label="Idioma / Language") resolution_selector = gr.Radio(["512x512", "768x768", "1024x1024"], value="512x512", label="Resolução Base") ref_image_input = gr.File(label="Grupo de Imagens do Usuário", file_count="multiple", file_types=["image"], type="filepath") with gr.Row(): num_keyframes_slider = gr.Slider(minimum=2, maximum=42, value=4, step=2, label="Número de Cenas-Chave (Par)") duration_per_fragment_slider = gr.Slider(label="Duração de cada Clipe (s)", minimum=2.0, maximum=10.0, value=4.0, step=0.1) storyboard_and_keyframes_button = gr.Button("Gerar Roteiro e Keyframes", variant="primary") with gr.Accordion("Etapa 2: Produção do Vídeo Original", open=False, visible=False) as step3_accordion: trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label="Poda Causal (%)") handler_strength = gr.Slider(label="Força do Déjà-Vu", minimum=0.0, maximum=1.0, value=0.5, step=0.05) dest_strength = gr.Slider(label="Força da Âncora Final", minimum=0.0, maximum=1.0, value=0.75, step=0.05) guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label="Escala de Orientação") stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label="Escala STG") inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label="Passos de Inferência") produce_original_button = gr.Button("🎬 Produzir Vídeo Original", variant="primary") with gr.Accordion("Etapa 3: Pós-Produção (Opcional)", open=False, visible=False) as step4_accordion: gr.Markdown("Aplique melhorias ao filme. Cada etapa usa o resultado da anterior como fonte.") with gr.Accordion("A. Upscaler Latente 2x", open=True): upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label="Fragmentos por Lote") run_upscaler_button = gr.Button("Executar Upscaler Latente", variant="secondary") with gr.Accordion("B. Masterização HD (SeedVR)", open=True): hd_steps_slider = gr.Slider(minimum=20, maximum=150, value=100, step=5, label="Passos de Inferência HD") run_hd_button = gr.Button("Executar Masterização HD", variant="secondary") with gr.Accordion("C. Geração de Áudio", open=True): audio_prompt_input = gr.Textbox(label="Prompt de Áudio Detalhado (Opcional)", lines=2, placeholder="Descreva os sons, efeitos e música.") run_audio_button = gr.Button("Gerar Áudio", variant="secondary") final_video_output = gr.Video(label="Filme Final (Resultado da Última Etapa)", visible=False, interactive=False) with gr.Accordion("Grupo das Keyframes", open=False) as keyframes_accordion: keyframe_gallery = gr.Gallery(label="Keyframes Gerados", visible=True, object_fit="contain", height="auto", type="filepath") with gr.Accordion("🧬 DNA Digital da Geração (JSON)", open=False) as data_accordion: storyboard_output = gr.JSON(label="Roteiro Gerado (Storyboard)") generation_data_output = gr.JSON(label="Estado de Geração Completo") with gr.Accordion("📝 Log de Geração (Detalhado)", open=False) as log_accordion: log_display = gr.Textbox(label="Log da Sessão", lines=20, interactive=False, autoscroll=True) update_log_button = gr.Button("Atualizar Log") # --- 4. CONEXÕES DE EVENTOS --- storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[generation_state_holder, storyboard_output, keyframe_gallery, step3_accordion]) produce_original_button.click(fn=run_original_production_wrapper, inputs=[generation_state_holder, trim_percent_slider, handler_strength, dest_strength, guidance_scale_slider, stg_scale_slider, inference_steps_slider], outputs=[final_video_output, step4_accordion, original_latents_paths_state, current_source_video_state, generation_state_holder]) run_upscaler_button.click(fn=run_upscaler_wrapper, inputs=[current_source_video_state, original_latents_paths_state, upscaler_chunk_size_slider], outputs=[final_video_output, current_source_video_state]) run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_steps_slider, prompt_input], outputs=[final_video_output, current_source_video_state]) run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[final_video_output]) generation_state_holder.change(fn=lambda state: state, inputs=generation_state_holder, outputs=generation_data_output) update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display]) # --- 5. INICIALIZAÇÃO DA APLICAÇÃO --- if __name__ == "__main__": if os.path.exists(WORKSPACE_DIR): shutil.rmtree(WORKSPACE_DIR) os.makedirs(WORKSPACE_DIR) logger.info("Aplicação Gradio iniciada. Lançando interface...") demo.queue().launch()