Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -9,7 +9,7 @@ import gradio as gr
|
|
| 9 |
import torch
|
| 10 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 11 |
|
| 12 |
-
MODEL_ID = "le-llm/lapa-v0.1-
|
| 13 |
|
| 14 |
|
| 15 |
|
|
@@ -49,7 +49,7 @@ def respond(
|
|
| 49 |
add_generation_prompt=True,
|
| 50 |
enable_thinking=True,
|
| 51 |
)
|
| 52 |
-
input_text += "<think>"
|
| 53 |
print(input_text)
|
| 54 |
inputs = tokenizer(input_text, return_tensors="pt").to(model.device) # .to(device)
|
| 55 |
|
|
|
|
| 9 |
import torch
|
| 10 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 11 |
|
| 12 |
+
MODEL_ID = "le-llm/lapa-v0.1-reasoning-only"
|
| 13 |
|
| 14 |
|
| 15 |
|
|
|
|
| 49 |
add_generation_prompt=True,
|
| 50 |
enable_thinking=True,
|
| 51 |
)
|
| 52 |
+
input_text += "<think>" # TODO: remove short term fix
|
| 53 |
print(input_text)
|
| 54 |
inputs = tokenizer(input_text, return_tensors="pt").to(model.device) # .to(device)
|
| 55 |
|