# app.py
#
# Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
#
# Version: 2.3.0
#
# Contact:
# Carlos Rodrigues dos Santos
# carlex22@gmail.com
#
# Related Repositories and Projects:
# GitHub: https://github.com/carlex22/Aduc-sdr
# YouTube (Results): https://m.youtube.com/channel/UC3EgoJi_Fv7yuDpvfYNtoIQ
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
{default_lang.get('app_subtitle')}
") with gr.Row(): lang_selector = gr.Radio(["π§π·", "πΊπΈ", "π¨π³"], value="π§π·", label=default_lang.get('lang_selector_label')) resolution_selector = gr.Radio(["480x480", "720x720", "960x960"], value="480x480", label="Base Resolution") with gr.Accordion(default_lang.get('step1_accordion'), open=True) as step1_accordion: prompt_input = gr.Textbox(label=default_lang.get('prompt_label'), value="A majestic lion walks across the savanna, sits down, and then roars at the setting sun.") ref_image_input = gr.File(label=default_lang.get('ref_images_label'), file_count="multiple", file_types=["image"]) with gr.Row(): num_keyframes_slider = gr.Slider(minimum=3, maximum=42, value=5, step=1, label=default_lang.get('keyframes_label')) duration_per_fragment_slider = gr.Slider(label=default_lang.get('duration_label'), info=default_lang.get('duration_info'), minimum=2.0, maximum=10.0, value=4.0, step=0.1) with gr.Row(): storyboard_and_keyframes_button = gr.Button(default_lang.get('storyboard_and_keyframes_button'), variant="primary") storyboard_from_photos_button = gr.Button(default_lang.get('storyboard_from_photos_button'), variant="secondary") step1_mode_b_info_md = gr.Markdown(f"*{default_lang.get('step1_mode_b_info')}*") storyboard_output = gr.JSON(label=default_lang.get('storyboard_output_label')) keyframe_gallery = gr.Gallery(label=default_lang.get('keyframes_gallery_label'), visible=True, object_fit="contain", height="auto", type="filepath") with gr.Accordion(default_lang.get('step3_accordion'), open=False, visible=False) as step3_accordion: step3_description_md = gr.Markdown(default_lang.get('step3_description')) with gr.Accordion(default_lang.get('ltx_advanced_options'), open=False) as ltx_advanced_options_accordion: with gr.Accordion(default_lang.get('causality_controls_title'), open=True) as causality_accordion: trim_percent_slider = gr.Slider(minimum=10, maximum=90, value=50, step=5, label=default_lang.get('trim_percent_label'), info=default_lang.get('trim_percent_info')) with gr.Row(): forca_guia_slider = gr.Slider(label=default_lang.get('forca_guia_label'), minimum=0.0, maximum=1.0, value=0.5, step=0.05, info=default_lang.get('forca_guia_info')) convergencia_destino_slider = gr.Slider(label=default_lang.get('convergencia_final_label'), minimum=0.0, maximum=1.0, value=0.75, step=0.05, info=default_lang.get('convergencia_final_info')) with gr.Accordion(default_lang.get('ltx_pipeline_options'), open=True) as ltx_pipeline_accordion: with gr.Row(): guidance_scale_slider = gr.Slider(minimum=1.0, maximum=10.0, value=2.0, step=0.1, label=default_lang.get('guidance_scale_label'), info=default_lang.get('guidance_scale_info')) stg_scale_slider = gr.Slider(minimum=0.0, maximum=1.0, value=0.025, step=0.005, label=default_lang.get('stg_scale_label'), info=default_lang.get('stg_scale_info')) inference_steps_slider = gr.Slider(minimum=10, maximum=50, value=20, step=1, label=default_lang.get('steps_label'), info=default_lang.get('steps_info')) produce_original_button = gr.Button(default_lang.get('produce_original_button'), variant="primary") original_video_output = gr.Video(label="Original Master Video", visible=False, interactive=False) with gr.Accordion(default_lang.get('step4_accordion'), open=False, visible=False) as step4_accordion: step4_description_md = gr.Markdown(default_lang.get('step4_description')) with gr.Accordion(default_lang.get('sub_step_a_upscaler'), open=True) as sub_step_a_accordion: upscaler_description_md = gr.Markdown(default_lang.get('upscaler_description')) with gr.Accordion(default_lang.get('upscaler_options'), open=False) as upscaler_options_accordion: upscaler_chunk_size_slider = gr.Slider(minimum=1, maximum=10, value=2, step=1, label=default_lang.get('upscaler_chunk_size_label'), info=default_lang.get('upscaler_chunk_size_info')) run_upscaler_button = gr.Button(default_lang.get('run_upscaler_button'), variant="secondary") upscaler_video_output = gr.Video(label="Upscaled Video", visible=False, interactive=False) with gr.Accordion(default_lang.get('sub_step_b_hd'), open=True) as sub_step_b_accordion: hd_description_md = gr.Markdown(default_lang.get('hd_description')) with gr.Accordion(default_lang.get('hd_options'), open=False) as hd_options_accordion: hd_model_radio = gr.Radio(["3B", "7B"], value="7B", label=default_lang.get('hd_model_label')) hd_steps_slider = gr.Slider(minimum=20, maximum=150, value=100, step=5, label=default_lang.get('hd_steps_label'), info=default_lang.get('hd_steps_info')) run_hd_button = gr.Button(default_lang.get('run_hd_button'), variant="secondary") hd_video_output = gr.Video(label="HD Mastered Video", visible=False, interactive=False) with gr.Accordion(default_lang.get('sub_step_c_audio'), open=True) as sub_step_c_accordion: audio_description_md = gr.Markdown(default_lang.get('audio_description')) with gr.Accordion(default_lang.get('audio_options'), open=False) as audio_options_accordion: audio_prompt_input = gr.Textbox(label=default_lang.get('audio_prompt_label'), info=default_lang.get('audio_prompt_info'), lines=3) run_audio_button = gr.Button(default_lang.get('run_audio_button'), variant="secondary") audio_video_output = gr.Video(label="Video with Audio", visible=False, interactive=False) final_video_output = gr.Video(label=default_lang.get('final_video_label'), visible=False, interactive=False) with gr.Accordion(default_lang.get('log_accordion_label'), open=False) as log_accordion: log_display = gr.Textbox(label=default_lang.get('log_display_label'), lines=20, interactive=False, autoscroll=True) update_log_button = gr.Button(default_lang.get('update_log_button')) # --- 4. UI EVENT CONNECTIONS --- all_ui_components = [title_md, subtitle_md, lang_selector, step1_accordion, prompt_input, ref_image_input, num_keyframes_slider, duration_per_fragment_slider, storyboard_and_keyframes_button, storyboard_from_photos_button, step1_mode_b_info_md, storyboard_output, keyframe_gallery, step3_accordion, step3_description_md, produce_original_button, ltx_advanced_options_accordion, causality_accordion, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, ltx_pipeline_accordion, guidance_scale_slider, stg_scale_slider, inference_steps_slider, step4_accordion, step4_description_md, sub_step_a_accordion, upscaler_description_md, upscaler_options_accordion, upscaler_chunk_size_slider, run_upscaler_button, sub_step_b_accordion, hd_description_md, hd_options_accordion, hd_model_radio, hd_steps_slider, run_hd_button, sub_step_c_accordion, audio_description_md, audio_options_accordion, audio_prompt_input, run_audio_button, final_video_output, log_accordion, log_display, update_log_button] def create_lang_update_fn(): def update_lang(lang_emoji): lang_code_map = {"π§π·": "pt", "πΊπΈ": "en", "π¨π³": "zh"} lang_code = lang_code_map.get(lang_emoji, "en") lang_map = i18n.get(lang_code, i18n.get('en', {})) return [gr.update(value=f"{lang_map.get('app_subtitle')}
"),gr.update(label=lang_map.get('lang_selector_label')),gr.update(label=lang_map.get('step1_accordion')),gr.update(label=lang_map.get('prompt_label')),gr.update(label=lang_map.get('ref_images_label')),gr.update(label=lang_map.get('keyframes_label')),gr.update(label=lang_map.get('duration_label'), info=lang_map.get('duration_info')),gr.update(value=lang_map.get('storyboard_and_keyframes_button')),gr.update(value=lang_map.get('storyboard_from_photos_button')),gr.update(value=f"*{lang_map.get('step1_mode_b_info')}*"),gr.update(label=lang_map.get('storyboard_output_label')),gr.update(label=lang_map.get('keyframes_gallery_label')),gr.update(label=lang_map.get('step3_accordion')),gr.update(value=lang_map.get('step3_description')),gr.update(value=lang_map.get('produce_original_button')),gr.update(label=lang_map.get('ltx_advanced_options')),gr.update(label=lang_map.get('causality_controls_title')),gr.update(label=lang_map.get('trim_percent_label'), info=lang_map.get('trim_percent_info')),gr.update(label=lang_map.get('forca_guia_label'), info=lang_map.get('forca_guia_info')),gr.update(label=lang_map.get('convergencia_final_label'), info=lang_map.get('convergencia_final_info')),gr.update(label=lang_map.get('ltx_pipeline_options')),gr.update(label=lang_map.get('guidance_scale_label'), info=lang_map.get('guidance_scale_info')),gr.update(label=lang_map.get('stg_scale_label'), info=lang_map.get('stg_scale_info')),gr.update(label=lang_map.get('steps_label'), info=lang_map.get('steps_info')),gr.update(label=lang_map.get('step4_accordion')),gr.update(value=lang_map.get('step4_description')),gr.update(label=lang_map.get('sub_step_a_upscaler')),gr.update(value=lang_map.get('upscaler_description')),gr.update(label=lang_map.get('upscaler_options')),gr.update(label=lang_map.get('upscaler_chunk_size_label'), info=lang_map.get('upscaler_chunk_size_info')),gr.update(value=lang_map.get('run_upscaler_button')),gr.update(label=lang_map.get('sub_step_b_hd')),gr.update(value=lang_map.get('hd_description')),gr.update(label=lang_map.get('hd_options')),gr.update(label=lang_map.get('hd_model_label')),gr.update(label=lang_map.get('hd_steps_label'), info=lang_map.get('hd_steps_info')),gr.update(value=lang_map.get('run_hd_button')),gr.update(label=lang_map.get('sub_step_c_audio')),gr.update(value=lang_map.get('audio_description')),gr.update(label=lang_map.get('audio_options')),gr.update(label=lang_map.get('audio_prompt_label'), info=lang_map.get('audio_prompt_info')),gr.update(value=lang_map.get('run_audio_button')),gr.update(label=lang_map.get('final_video_label')),gr.update(label=lang_map.get('log_accordion_label')),gr.update(label=lang_map.get('log_display_label')),gr.update(value=lang_map.get('update_log_button'))] return update_lang lang_selector.change(fn=create_lang_update_fn(), inputs=lang_selector, outputs=all_ui_components) storyboard_and_keyframes_button.click(fn=run_pre_production_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input, resolution_selector, duration_per_fragment_slider], outputs=[storyboard_output, keyframe_gallery, step3_accordion]) storyboard_from_photos_button.click(fn=run_pre_production_photo_wrapper, inputs=[prompt_input, num_keyframes_slider, ref_image_input], outputs=[storyboard_output, keyframe_gallery, step3_accordion]) produce_original_button.click(fn=run_original_production_wrapper, inputs=[keyframe_gallery, prompt_input, duration_per_fragment_slider, trim_percent_slider, forca_guia_slider, convergencia_destino_slider, guidance_scale_slider, stg_scale_slider, inference_steps_slider, resolution_selector], outputs=[original_video_output, final_video_output, step4_accordion, original_latents_paths_state, original_video_path_state, current_source_video_state]) run_upscaler_button.click(fn=run_upscaler_wrapper, inputs=[original_latents_paths_state, upscaler_chunk_size_slider], outputs=[upscaler_video_output, final_video_output, upscaled_video_path_state, current_source_video_state]) run_hd_button.click(fn=run_hd_wrapper, inputs=[current_source_video_state, hd_model_radio, hd_steps_slider, prompt_input], outputs=[hd_video_output, final_video_output, hd_video_path_state, current_source_video_state]) run_audio_button.click(fn=run_audio_wrapper, inputs=[current_source_video_state, audio_prompt_input, prompt_input], outputs=[audio_video_output, final_video_output]) update_log_button.click(fn=get_log_content, inputs=[], outputs=[log_display]) # --- 5. APPLICATION LAUNCH --- if __name__ == "__main__": if os.path.exists(WORKSPACE_DIR): logger.info(f"Clearing previous workspace at: {WORKSPACE_DIR}") shutil.rmtree(WORKSPACE_DIR) os.makedirs(WORKSPACE_DIR) logger.info(f"Application started. Launching Gradio interface...") demo.queue().launch()