prithivMLmods commited on
Commit
e8a4265
·
verified ·
1 Parent(s): c362b5b

update app

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -138,7 +138,7 @@ css = """
138
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
139
  print(f"Using Main Device: {device}")
140
 
141
- QWEN_VL_MODEL_ID = "Qwen/Qwen2.5-VL-3B-Instruct"
142
  print(f"Loading OCR Model: {QWEN_VL_MODEL_ID}...")
143
 
144
  qwen_processor = AutoProcessor.from_pretrained(QWEN_VL_MODEL_ID, trust_remote_code=True)
@@ -335,7 +335,7 @@ example_image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
335
 
336
  with gr.Blocks() as demo:
337
  gr.Markdown("# **Vision-to-VibeVoice-en**", elem_id="main-title")
338
- gr.Markdown("Perform vision-to-audio inference with [Qwen2.5VL](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct) + [VibeVoice-Realtime-0.5B](https://huggingface.co/microsoft/VibeVoice-Realtime-0.5B).")
339
  with gr.Row():
340
  with gr.Column(scale=1):
341
  gr.Markdown("### 1. Vision Input")
 
138
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
139
  print(f"Using Main Device: {device}")
140
 
141
+ QWEN_VL_MODEL_ID = "Qwen/Qwen2.5-VL-7B-Instruct"
142
  print(f"Loading OCR Model: {QWEN_VL_MODEL_ID}...")
143
 
144
  qwen_processor = AutoProcessor.from_pretrained(QWEN_VL_MODEL_ID, trust_remote_code=True)
 
335
 
336
  with gr.Blocks() as demo:
337
  gr.Markdown("# **Vision-to-VibeVoice-en**", elem_id="main-title")
338
+ gr.Markdown("Perform vision-to-audio inference with [Qwen2.5VL](https://huggingface.co/Qwen/Qwen2.5-VL-7B-Instruct) + [VibeVoice-Realtime-0.5B](https://huggingface.co/microsoft/VibeVoice-Realtime-0.5B).")
339
  with gr.Row():
340
  with gr.Column(scale=1):
341
  gr.Markdown("### 1. Vision Input")