Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -16,15 +16,14 @@ else:
|
|
| 16 |
@spaces.GPU
|
| 17 |
def generate_response(passage: str, question: str) -> str:
|
| 18 |
# Prepare the input text by combining the passage and question
|
| 19 |
-
|
| 20 |
-
|
|
|
|
|
|
|
|
|
|
| 21 |
|
| 22 |
-
|
| 23 |
-
outputs = model.generate(**inputs, max_new_tokens=150)
|
| 24 |
|
| 25 |
-
# Decode only the generated part, skipping the prompt input
|
| 26 |
-
# generated_tokens = outputs[0][inputs.input_ids.shape[-1]:] # Ignore input tokens in the output
|
| 27 |
-
response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
|
| 28 |
|
| 29 |
return response
|
| 30 |
|
|
|
|
| 16 |
@spaces.GPU
|
| 17 |
def generate_response(passage: str, question: str) -> str:
|
| 18 |
# Prepare the input text by combining the passage and question
|
| 19 |
+
chat = [{"role": "user", "content": f"Passage: {passage}\nQuestion: {question}"}]
|
| 20 |
+
|
| 21 |
+
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
| 22 |
+
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
| 23 |
+
response = model.generate(input_ids=inputs.to(olmo.device), max_new_tokens=100)
|
| 24 |
|
| 25 |
+
response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
|
|
|
|
| 26 |
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
return response
|
| 29 |
|