Added code to map outputs back to the labels
#4
by
nadaaaita
- opened
README.md
CHANGED
|
@@ -78,4 +78,20 @@ tokenizer = AutoTokenizer.from_pretrained("reachosen/autotrain-sdohv7-3701198597
|
|
| 78 |
inputs = tokenizer("The Patient is homeless", return_tensors="pt")
|
| 79 |
|
| 80 |
outputs = model(**inputs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
```
|
|
|
|
| 78 |
inputs = tokenizer("The Patient is homeless", return_tensors="pt")
|
| 79 |
|
| 80 |
outputs = model(**inputs)
|
| 81 |
+
|
| 82 |
+
# Extract logits from the outputs
|
| 83 |
+
logits = outputs.logits
|
| 84 |
+
|
| 85 |
+
# Apply softmax to get probabilities
|
| 86 |
+
probabilities = F.softmax(logits, dim=1)
|
| 87 |
+
|
| 88 |
+
# get class mapping
|
| 89 |
+
configs_fp = "path to model files" + "config.json"
|
| 90 |
+
with open(configs_fp, 'r') as configs_file:
|
| 91 |
+
class_mapping = json.load(configs_file)['id2label']
|
| 92 |
+
|
| 93 |
+
# Create the class_probs dictionary
|
| 94 |
+
class_probs = {class_mapping[str(i)]: probabilities[0][i].item() for i in range(len(probabilities[0]))}
|
| 95 |
+
|
| 96 |
+
|
| 97 |
```
|