Fix toxicchat bug
Browse files
app.py
CHANGED
|
@@ -102,7 +102,6 @@ def predict_toxicity(text, model, tokenizer, device, model_name):
|
|
| 102 |
outputs = model.generate(**inputs)
|
| 103 |
|
| 104 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
|
| 105 |
-
prediction = "Toxic" if prediction == "positive" else "Not Toxic"
|
| 106 |
else:
|
| 107 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
|
| 108 |
|
|
|
|
| 102 |
outputs = model.generate(**inputs)
|
| 103 |
|
| 104 |
prediction = tokenizer.decode(outputs[0], skip_special_tokens=True).strip().lower()
|
|
|
|
| 105 |
else:
|
| 106 |
inputs = tokenizer(text, return_tensors="pt", truncation=True, max_length=128, padding="max_length").to(device)
|
| 107 |
|