Spaces:
Runtime error
Runtime error
taka-yamakoshi
commited on
Commit
·
e2ecd0a
1
Parent(s):
9096322
debug
Browse files
app.py
CHANGED
|
@@ -135,7 +135,8 @@ def separate_options(option_locs):
|
|
| 135 |
|
| 136 |
def mask_out(input_ids,pron_locs,option_locs,mask_id):
|
| 137 |
assert np.all(np.diff(pron_locs)==1)
|
| 138 |
-
|
|
|
|
| 139 |
|
| 140 |
if __name__=='__main__':
|
| 141 |
wide_setup()
|
|
@@ -216,10 +217,10 @@ if __name__=='__main__':
|
|
| 216 |
st.write(' '.join([tokenizer.decode([token]) for token in token_ids]))
|
| 217 |
|
| 218 |
if st.session_state['page_status'] == 'finish_debug':
|
| 219 |
-
option_1_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_1_locs['sent_1'])]
|
| 220 |
-
option_1_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_1_locs['sent_2'])]
|
| 221 |
-
option_2_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_2_locs['sent_1'])]
|
| 222 |
-
option_2_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_2_locs['sent_2'])]
|
| 223 |
assert np.all(option_1_tokens_1==option_1_tokens_2) and np.all(option_2_tokens_1==option_2_tokens_2)
|
| 224 |
option_1_tokens = option_1_tokens_1
|
| 225 |
option_2_tokens = option_2_tokens_1
|
|
@@ -233,6 +234,8 @@ if __name__=='__main__':
|
|
| 233 |
])
|
| 234 |
outputs = SkeletonAlbertForMaskedLM(model,input_ids,interventions=interventions)
|
| 235 |
logprobs = F.log_softmax(outputs['logits'], dim = -1)
|
|
|
|
|
|
|
| 236 |
|
| 237 |
|
| 238 |
preds_0 = [torch.multinomial(torch.exp(probs), num_samples=1).squeeze(dim=-1) for probs in logprobs[0][1:-1]]
|
|
|
|
| 135 |
|
| 136 |
def mask_out(input_ids,pron_locs,option_locs,mask_id):
|
| 137 |
assert np.all(np.diff(pron_locs)==1)
|
| 138 |
+
# note annotations are shifted by 1 because special tokens were omitted
|
| 139 |
+
return input_ids[:pron_locs[0]+1] + [mask_id for _ in range(len(option_locs))] + input_ids[pron_locs[-1]+2:]
|
| 140 |
|
| 141 |
if __name__=='__main__':
|
| 142 |
wide_setup()
|
|
|
|
| 217 |
st.write(' '.join([tokenizer.decode([token]) for token in token_ids]))
|
| 218 |
|
| 219 |
if st.session_state['page_status'] == 'finish_debug':
|
| 220 |
+
option_1_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_1_locs['sent_1'])+1]
|
| 221 |
+
option_1_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_1_locs['sent_2'])+1]
|
| 222 |
+
option_2_tokens_1 = np.array(input_ids_dict['sent_1'])[np.array(option_2_locs['sent_1'])+1]
|
| 223 |
+
option_2_tokens_2 = np.array(input_ids_dict['sent_2'])[np.array(option_2_locs['sent_2'])+1]
|
| 224 |
assert np.all(option_1_tokens_1==option_1_tokens_2) and np.all(option_2_tokens_1==option_2_tokens_2)
|
| 225 |
option_1_tokens = option_1_tokens_1
|
| 226 |
option_2_tokens = option_2_tokens_1
|
|
|
|
| 234 |
])
|
| 235 |
outputs = SkeletonAlbertForMaskedLM(model,input_ids,interventions=interventions)
|
| 236 |
logprobs = F.log_softmax(outputs['logits'], dim = -1)
|
| 237 |
+
logprobs_1, logprobs_2 = logprobs[:num_heads], logprobs[num_heads:]
|
| 238 |
+
evals_1 = [logprobs_1[:,pron_locs[0]+1+i,token] for i,token in enumerate(option_1_tokens)]
|
| 239 |
|
| 240 |
|
| 241 |
preds_0 = [torch.multinomial(torch.exp(probs), num_samples=1).squeeze(dim=-1) for probs in logprobs[0][1:-1]]
|