Spaces:
Sleeping
Sleeping
fix: inputs problem
Browse files
app.py
CHANGED
|
@@ -26,16 +26,18 @@ def inference(prompt: str, max_tokens: int = 256) -> str:
|
|
| 26 |
tokenize = False,
|
| 27 |
add_generation_prompt = True, # Must add for generation
|
| 28 |
)
|
|
|
|
| 29 |
|
| 30 |
outputs = model.generate(
|
| 31 |
-
**
|
| 32 |
-
max_new_tokens
|
| 33 |
-
temperature
|
| 34 |
-
|
|
|
|
| 35 |
)
|
| 36 |
|
| 37 |
response = tokenizer.decode(
|
| 38 |
-
outputs[0][inputs["input_ids"].shape[1]:],
|
| 39 |
skip_special_tokens=True
|
| 40 |
)
|
| 41 |
return response
|
|
|
|
| 26 |
tokenize = False,
|
| 27 |
add_generation_prompt = True, # Must add for generation
|
| 28 |
)
|
| 29 |
+
inputs = tokenizer(text, return_tensors="pt").to("cuda")
|
| 30 |
|
| 31 |
outputs = model.generate(
|
| 32 |
+
**inputs,
|
| 33 |
+
max_new_tokens=max_tokens,
|
| 34 |
+
temperature=0.7,
|
| 35 |
+
top_p=0.8,
|
| 36 |
+
top_k=20,
|
| 37 |
)
|
| 38 |
|
| 39 |
response = tokenizer.decode(
|
| 40 |
+
outputs[0][inputs["input_ids"].shape[1]:],
|
| 41 |
skip_special_tokens=True
|
| 42 |
)
|
| 43 |
return response
|