|
|
|
|
|
|
|
|
|
|
|
import gradio as gr |
|
|
import os |
|
|
import sys |
|
|
import traceback |
|
|
from pathlib import Path |
|
|
import torch |
|
|
import numpy as np |
|
|
from PIL import Image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from api.ltx_video_complete import VideoService |
|
|
|
|
|
video_service = VideoService() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_initial_state(): |
|
|
return { |
|
|
"low_res_video": None, |
|
|
"low_res_latents": None, |
|
|
"refined_video_ltx": None, |
|
|
"refined_latents_ltx": None, |
|
|
"used_seed": None |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
def run_generate_low(prompt, neg_prompt, start_img, height, width, duration, cfg, seed, randomize_seed, progress=gr.Progress(track_tqdm=True)): |
|
|
"""Executa a primeira etapa: geração de um vídeo base em baixa resolução.""" |
|
|
print("UI: Chamando generate_low") |
|
|
if True: |
|
|
|
|
|
image_filepaths = [] |
|
|
if start_img: |
|
|
image_filepaths.append(start_img) |
|
|
|
|
|
|
|
|
used_seed = None if randomize_seed else seed |
|
|
|
|
|
video_path, tensor_path, final_seed = video_service.generate_low_resolution( |
|
|
prompt=prompt, negative_prompt=neg_prompt, |
|
|
height=height, width=width, duration_secs=duration, |
|
|
guidance_scale=cfg, seed=used_seed, |
|
|
image_filepaths=image_filepaths |
|
|
) |
|
|
|
|
|
new_state = { |
|
|
"low_res_video": video_path, |
|
|
"low_res_latents": tensor_path, |
|
|
"refined_video_ltx": None, |
|
|
"refined_latents_ltx": None, |
|
|
"used_seed": final_seed |
|
|
} |
|
|
|
|
|
return video_path, new_state, gr.update(visible=True) |
|
|
|
|
|
def run_ltx_refinement(state, prompt, neg_prompt, cfg, progress=gr.Progress(track_tqdm=True)): |
|
|
"""Executa o processo de refinamento secundário.""" |
|
|
print("UI: Chamando a função ponte 'apply_secondary_refinement'") |
|
|
|
|
|
try: |
|
|
|
|
|
video_path, tensor_path = video_service.apply_secondary_refinement( |
|
|
latents_path=state["low_res_latents"], |
|
|
prompt=prompt, |
|
|
negative_prompt=neg_prompt, |
|
|
guidance_scale=cfg, |
|
|
seed=state["used_seed"] |
|
|
) |
|
|
|
|
|
|
|
|
state["refined_video_ltx"] = video_path |
|
|
state["refined_latents_ltx"] = tensor_path |
|
|
|
|
|
return video_path, state |
|
|
|
|
|
except Exception as e: |
|
|
print(f"[ERRO na UI] Falha durante o refinamento secundário: {e}") |
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
raise gr.Error(f"Falha no Refinamento: {e}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# LTX Video - Geração e Pós-Produção por Etapas") |
|
|
|
|
|
app_state = gr.State(value=create_initial_state()) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### Etapa 1: Configurações de Geração") |
|
|
prompt_input = gr.Textbox(label="Prompt", value="A majestic dragon flying over a medieval castle", lines=3) |
|
|
neg_prompt_input = gr.Textbox(visible=False, label="Negative Prompt", value="worst quality, blurry, low quality, jittery", lines=2) |
|
|
start_image = gr.Image(label="Imagem de Início (Opcional)", type="filepath", sources=["upload", "clipboard"]) |
|
|
|
|
|
with gr.Accordion("Parâmetros Avançados", open=False): |
|
|
height_input = gr.Slider(label="Height", value=512, step=32, minimum=256, maximum=1024) |
|
|
width_input = gr.Slider(label="Width", value=704, step=32, minimum=256, maximum=1024) |
|
|
duration_input = gr.Slider(label="Duração (s)", value=4, step=1, minimum=1, maximum=10) |
|
|
cfg_input = gr.Slider(label="Guidance Scale (CFG)", value=3.0, step=0.1, minimum=1.0, maximum=10.0) |
|
|
seed_input = gr.Number(label="Seed", value=42, precision=0) |
|
|
randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) |
|
|
|
|
|
generate_low_btn = gr.Button("1. Gerar Vídeo Base (Low-Res)", variant="primary") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### Vídeo Base Gerado") |
|
|
low_res_video_output = gr.Video(label="O resultado da Etapa 1 aparecerá aqui", interactive=False) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as post_prod_group: |
|
|
gr.Markdown("<hr style='margin-top: 20px; margin-bottom: 20px;'>") |
|
|
gr.Markdown("## Etapa 2: Pós-Produção") |
|
|
gr.Markdown("Use o vídeo gerado acima como entrada para as ferramentas abaixo. **O prompt e a CFG da Etapa 1 serão reutilizados.**") |
|
|
|
|
|
with gr.Tabs(): |
|
|
|
|
|
with gr.TabItem("🚀 Upscaler Textura (LTX)"): |
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### Parâmetros de Refinamento") |
|
|
gr.Markdown("Esta etapa reutiliza o prompt, o prompt negativo e a CFG da Etapa 1 para manter a consistência.") |
|
|
ltx_refine_btn = gr.Button("Aplicar Refinamento de Textura LTX", variant="primary") |
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### Resultado do Refinamento") |
|
|
ltx_refined_video_output = gr.Video(label="Vídeo com Textura Refinada (LTX)", interactive=False) |
|
|
|
|
|
|
|
|
with gr.TabItem("✨ Upscaler SeedVR"): |
|
|
gr.Markdown("### Resultado do Upscaling") |
|
|
|
|
|
|
|
|
with gr.TabItem("🔊 Áudio (MM-Audio)"): |
|
|
gr.Markdown("*(Funcionalidade futura para adicionar som aos vídeos)*") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
generate_low_btn.click( |
|
|
fn=run_generate_low, |
|
|
inputs=[prompt_input, neg_prompt_input, start_image, height_input, width_input, duration_input, cfg_input, seed_input, randomize_seed], |
|
|
outputs=[low_res_video_output, app_state, post_prod_group] |
|
|
) |
|
|
|
|
|
|
|
|
ltx_refine_btn.click( |
|
|
fn=run_ltx_refinement, |
|
|
inputs=[app_state, prompt_input, neg_prompt_input, cfg_input], |
|
|
outputs=[ltx_refined_video_output, app_state] |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.queue().launch(server_name="0.0.0.0", server_port=7860, debug=True, show_error=True) |
|
|
|