| { | |
| "best_metric": 0.7751432587577413, | |
| "best_model_checkpoint": "2-en-de-**1-wikispan-*unsup-ensemble-last-64-768-6*-64-768-3e-5-8600/checkpoint-1800**-64-128-3e-5-2600/checkpoint-2501", | |
| "epoch": 1.0, | |
| "global_step": 2501, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 1.5e-05, | |
| "loss": 33.9859, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 3e-05, | |
| "loss": 21.6865, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.875e-05, | |
| "loss": 20.6193, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 2.75e-05, | |
| "loss": 17.9514, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 2.625e-05, | |
| "loss": 17.5616, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 2.5e-05, | |
| "loss": 16.6805, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 2.3749999999999998e-05, | |
| "loss": 15.423, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 2.25e-05, | |
| "loss": 14.8845, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.125e-05, | |
| "loss": 14.5805, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 1.9999999999999998e-05, | |
| "loss": 14.6016, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 1.8750000000000002e-05, | |
| "loss": 13.951, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.7500000000000002e-05, | |
| "loss": 12.9799, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 1.625e-05, | |
| "loss": 12.8452, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 1.5e-05, | |
| "loss": 13.2246, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.375e-05, | |
| "loss": 12.2712, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 1.25e-05, | |
| "loss": 11.9556, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.125e-05, | |
| "loss": 11.5849, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 9.999999999999999e-06, | |
| "loss": 11.5427, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 8.750000000000001e-06, | |
| "loss": 11.3689, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 7.5e-06, | |
| "loss": 10.4619, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 6.25e-06, | |
| "loss": 11.2686, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.9999999999999996e-06, | |
| "loss": 10.772, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.75e-06, | |
| "loss": 11.0352, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 2.4999999999999998e-06, | |
| "loss": 10.7418, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 1.2499999999999999e-06, | |
| "loss": 10.8549, | |
| "step": 2500 | |
| } | |
| ], | |
| "max_steps": 2600, | |
| "num_train_epochs": 2, | |
| "total_flos": 165708798492672.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |