Spaces:
Running
on
Zero
Running
on
Zero
Vladyslav Humennyy
commited on
Commit
·
e892bca
1
Parent(s):
0154070
Fix commits
Browse files
app.py
CHANGED
|
@@ -14,7 +14,11 @@ import torch
|
|
| 14 |
from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer, TextIteratorStreamer
|
| 15 |
from analytics import AnalyticsLogger
|
| 16 |
from kernels import get_kernel
|
| 17 |
-
from typing import Any
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
#vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
|
| 20 |
|
|
@@ -214,11 +218,6 @@ def bot(
|
|
| 214 |
# Use processor if images are present
|
| 215 |
if processor is not None and has_images:
|
| 216 |
try:
|
| 217 |
-
# Processor expects messages with PIL images
|
| 218 |
-
from PIL import Image
|
| 219 |
-
import base64
|
| 220 |
-
import io
|
| 221 |
-
|
| 222 |
processor_history = []
|
| 223 |
for msg in history:
|
| 224 |
role = msg.get("role", "user")
|
|
|
|
| 14 |
from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer, TextIteratorStreamer
|
| 15 |
from analytics import AnalyticsLogger
|
| 16 |
from kernels import get_kernel
|
| 17 |
+
from typing import Any, Optional, Dict
|
| 18 |
+
|
| 19 |
+
from PIL import Image
|
| 20 |
+
import base64
|
| 21 |
+
import io
|
| 22 |
|
| 23 |
#vllm_flash_attn3 = get_kernel("kernels-community/vllm-flash-attn3")
|
| 24 |
|
|
|
|
| 218 |
# Use processor if images are present
|
| 219 |
if processor is not None and has_images:
|
| 220 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
processor_history = []
|
| 222 |
for msg in history:
|
| 223 |
role = msg.get("role", "user")
|