| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.3427004797806717, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.9619973443448544, | |
| "epoch": 0.006854009595613434, | |
| "grad_norm": 0.8562721610069275, | |
| "learning_rate": 0.0001995887594242632, | |
| "loss": 0.7973, | |
| "mean_token_accuracy": 0.7519877135753632, | |
| "num_tokens": 15771.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7034977793693542, | |
| "epoch": 0.013708019191226868, | |
| "grad_norm": 0.5451128482818604, | |
| "learning_rate": 0.0001991318254512223, | |
| "loss": 0.5986, | |
| "mean_token_accuracy": 0.8309322476387024, | |
| "num_tokens": 33062.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 0.6603402759879827, | |
| "epoch": 0.0205620287868403, | |
| "grad_norm": 0.5171676278114319, | |
| "learning_rate": 0.00019867489147818142, | |
| "loss": 0.633, | |
| "mean_token_accuracy": 0.8433935061097145, | |
| "num_tokens": 48936.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 0.6830728624016047, | |
| "epoch": 0.027416038382453736, | |
| "grad_norm": 0.4969835877418518, | |
| "learning_rate": 0.0001982179575051405, | |
| "loss": 0.6773, | |
| "mean_token_accuracy": 0.8266744241118431, | |
| "num_tokens": 61449.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.5286078054457903, | |
| "epoch": 0.03427004797806717, | |
| "grad_norm": 0.44698312878608704, | |
| "learning_rate": 0.00019776102353209963, | |
| "loss": 0.5558, | |
| "mean_token_accuracy": 0.8533428102731705, | |
| "num_tokens": 77104.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5590948283672332, | |
| "epoch": 0.0411240575736806, | |
| "grad_norm": 0.38724300265312195, | |
| "learning_rate": 0.00019730408955905872, | |
| "loss": 0.5771, | |
| "mean_token_accuracy": 0.8514142513275147, | |
| "num_tokens": 91558.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.599293502047658, | |
| "epoch": 0.047978067169294036, | |
| "grad_norm": 0.5922872424125671, | |
| "learning_rate": 0.00019684715558601783, | |
| "loss": 0.5309, | |
| "mean_token_accuracy": 0.851950392127037, | |
| "num_tokens": 105756.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.6024694256484509, | |
| "epoch": 0.05483207676490747, | |
| "grad_norm": 0.5078150629997253, | |
| "learning_rate": 0.00019639022161297692, | |
| "loss": 0.6727, | |
| "mean_token_accuracy": 0.8480887472629547, | |
| "num_tokens": 120163.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.5648054199293255, | |
| "epoch": 0.061686086360520906, | |
| "grad_norm": 0.29077377915382385, | |
| "learning_rate": 0.00019593328763993604, | |
| "loss": 0.5509, | |
| "mean_token_accuracy": 0.8548661589622497, | |
| "num_tokens": 138293.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.5659369576722384, | |
| "epoch": 0.06854009595613433, | |
| "grad_norm": 0.3394547700881958, | |
| "learning_rate": 0.00019547635366689515, | |
| "loss": 0.5729, | |
| "mean_token_accuracy": 0.8460367009043693, | |
| "num_tokens": 157530.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.6021960902959108, | |
| "epoch": 0.07539410555174778, | |
| "grad_norm": 0.42912933230400085, | |
| "learning_rate": 0.00019501941969385424, | |
| "loss": 0.5465, | |
| "mean_token_accuracy": 0.851969163119793, | |
| "num_tokens": 170607.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.610968679189682, | |
| "epoch": 0.0822481151473612, | |
| "grad_norm": 0.3759806752204895, | |
| "learning_rate": 0.00019456248572081335, | |
| "loss": 0.632, | |
| "mean_token_accuracy": 0.83554507791996, | |
| "num_tokens": 185672.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.5211818940937519, | |
| "epoch": 0.08910212474297464, | |
| "grad_norm": 0.503212034702301, | |
| "learning_rate": 0.00019410555174777244, | |
| "loss": 0.547, | |
| "mean_token_accuracy": 0.8610908895730972, | |
| "num_tokens": 200482.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.4536220826208591, | |
| "epoch": 0.09595613433858807, | |
| "grad_norm": 0.7268697619438171, | |
| "learning_rate": 0.00019364861777473156, | |
| "loss": 0.4726, | |
| "mean_token_accuracy": 0.870763523876667, | |
| "num_tokens": 216537.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.5031640276312828, | |
| "epoch": 0.10281014393420151, | |
| "grad_norm": 0.33594396710395813, | |
| "learning_rate": 0.00019319168380169065, | |
| "loss": 0.5923, | |
| "mean_token_accuracy": 0.8628711074590683, | |
| "num_tokens": 232400.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.6555169004946947, | |
| "epoch": 0.10966415352981494, | |
| "grad_norm": 0.5894250869750977, | |
| "learning_rate": 0.00019273474982864976, | |
| "loss": 0.5634, | |
| "mean_token_accuracy": 0.838917362689972, | |
| "num_tokens": 244273.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.6185528110712767, | |
| "epoch": 0.11651816312542837, | |
| "grad_norm": 0.5221670269966125, | |
| "learning_rate": 0.00019227781585560888, | |
| "loss": 0.6818, | |
| "mean_token_accuracy": 0.8385803163051605, | |
| "num_tokens": 262927.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.43512718714773657, | |
| "epoch": 0.12337217272104181, | |
| "grad_norm": 0.4728280007839203, | |
| "learning_rate": 0.00019182088188256796, | |
| "loss": 0.5178, | |
| "mean_token_accuracy": 0.8747259676456451, | |
| "num_tokens": 271716.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.6649946108460426, | |
| "epoch": 0.13022618231665525, | |
| "grad_norm": 0.47320684790611267, | |
| "learning_rate": 0.00019136394790952708, | |
| "loss": 0.6651, | |
| "mean_token_accuracy": 0.8223798260092735, | |
| "num_tokens": 287518.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.44864910580217837, | |
| "epoch": 0.13708019191226867, | |
| "grad_norm": 0.4356485903263092, | |
| "learning_rate": 0.00019090701393648617, | |
| "loss": 0.4631, | |
| "mean_token_accuracy": 0.8835410609841347, | |
| "num_tokens": 306299.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.5033049076795578, | |
| "epoch": 0.1439342015078821, | |
| "grad_norm": 0.48287737369537354, | |
| "learning_rate": 0.00019045007996344528, | |
| "loss": 0.5933, | |
| "mean_token_accuracy": 0.8669374987483025, | |
| "num_tokens": 321955.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.43575111888349055, | |
| "epoch": 0.15078821110349555, | |
| "grad_norm": 0.5973707437515259, | |
| "learning_rate": 0.0001899931459904044, | |
| "loss": 0.4856, | |
| "mean_token_accuracy": 0.8797904253005981, | |
| "num_tokens": 332265.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.6193726476281881, | |
| "epoch": 0.157642220699109, | |
| "grad_norm": 0.28756600618362427, | |
| "learning_rate": 0.0001895362120173635, | |
| "loss": 0.6056, | |
| "mean_token_accuracy": 0.8307039767503739, | |
| "num_tokens": 346377.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.5753613166511059, | |
| "epoch": 0.1644962302947224, | |
| "grad_norm": 0.4320402145385742, | |
| "learning_rate": 0.0001890792780443226, | |
| "loss": 0.5834, | |
| "mean_token_accuracy": 0.8549783885478973, | |
| "num_tokens": 362964.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.5963706407696009, | |
| "epoch": 0.17135023989033585, | |
| "grad_norm": 0.4648321866989136, | |
| "learning_rate": 0.0001886223440712817, | |
| "loss": 0.6272, | |
| "mean_token_accuracy": 0.8493530780076981, | |
| "num_tokens": 375717.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.5467700261622668, | |
| "epoch": 0.1782042494859493, | |
| "grad_norm": 0.3487129211425781, | |
| "learning_rate": 0.00018816541009824083, | |
| "loss": 0.5449, | |
| "mean_token_accuracy": 0.8526464059948922, | |
| "num_tokens": 394586.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.4246529323980212, | |
| "epoch": 0.1850582590815627, | |
| "grad_norm": 0.7286052703857422, | |
| "learning_rate": 0.00018770847612519992, | |
| "loss": 0.45, | |
| "mean_token_accuracy": 0.8814342901110649, | |
| "num_tokens": 411636.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.5389048531651497, | |
| "epoch": 0.19191226867717615, | |
| "grad_norm": 0.3287123441696167, | |
| "learning_rate": 0.00018725154215215904, | |
| "loss": 0.5138, | |
| "mean_token_accuracy": 0.8506704963743686, | |
| "num_tokens": 427077.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.5297756217420101, | |
| "epoch": 0.1987662782727896, | |
| "grad_norm": 0.5151430368423462, | |
| "learning_rate": 0.00018679460817911812, | |
| "loss": 0.5953, | |
| "mean_token_accuracy": 0.8586609676480293, | |
| "num_tokens": 442257.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.5314730744808912, | |
| "epoch": 0.20562028786840303, | |
| "grad_norm": 0.9860548377037048, | |
| "learning_rate": 0.00018633767420607724, | |
| "loss": 0.5143, | |
| "mean_token_accuracy": 0.8650062039494515, | |
| "num_tokens": 458093.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.5666845880448819, | |
| "epoch": 0.21247429746401644, | |
| "grad_norm": 0.8684160113334656, | |
| "learning_rate": 0.00018588074023303635, | |
| "loss": 0.5487, | |
| "mean_token_accuracy": 0.8509581357240676, | |
| "num_tokens": 471306.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.5297997735440731, | |
| "epoch": 0.21932830705962988, | |
| "grad_norm": 0.3815328776836395, | |
| "learning_rate": 0.00018542380625999544, | |
| "loss": 0.6052, | |
| "mean_token_accuracy": 0.8568633005023003, | |
| "num_tokens": 488461.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.5316725082695484, | |
| "epoch": 0.22618231665524333, | |
| "grad_norm": 0.5312303900718689, | |
| "learning_rate": 0.00018496687228695456, | |
| "loss": 0.6035, | |
| "mean_token_accuracy": 0.858753177523613, | |
| "num_tokens": 503665.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.6088610142469406, | |
| "epoch": 0.23303632625085674, | |
| "grad_norm": 0.40660324692726135, | |
| "learning_rate": 0.00018450993831391365, | |
| "loss": 0.6232, | |
| "mean_token_accuracy": 0.8444906592369079, | |
| "num_tokens": 521925.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.6339217025786639, | |
| "epoch": 0.23989033584647018, | |
| "grad_norm": 0.5640454888343811, | |
| "learning_rate": 0.00018405300434087276, | |
| "loss": 0.6188, | |
| "mean_token_accuracy": 0.8382566079497338, | |
| "num_tokens": 535970.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.5411492632701993, | |
| "epoch": 0.24674434544208362, | |
| "grad_norm": 0.42631176114082336, | |
| "learning_rate": 0.00018359607036783185, | |
| "loss": 0.5528, | |
| "mean_token_accuracy": 0.8523587495088577, | |
| "num_tokens": 551676.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.5561750333756208, | |
| "epoch": 0.25359835503769707, | |
| "grad_norm": 0.5579405426979065, | |
| "learning_rate": 0.00018313913639479097, | |
| "loss": 0.5793, | |
| "mean_token_accuracy": 0.8607801914215087, | |
| "num_tokens": 568488.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.5319446712732315, | |
| "epoch": 0.2604523646333105, | |
| "grad_norm": 0.8342606425285339, | |
| "learning_rate": 0.00018268220242175008, | |
| "loss": 0.4994, | |
| "mean_token_accuracy": 0.863979734480381, | |
| "num_tokens": 582963.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.5910112973302603, | |
| "epoch": 0.2673063742289239, | |
| "grad_norm": 0.5433372259140015, | |
| "learning_rate": 0.00018222526844870917, | |
| "loss": 0.6669, | |
| "mean_token_accuracy": 0.8433835208415985, | |
| "num_tokens": 598471.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.46995992250740526, | |
| "epoch": 0.27416038382453733, | |
| "grad_norm": 0.26409879326820374, | |
| "learning_rate": 0.00018176833447566828, | |
| "loss": 0.5199, | |
| "mean_token_accuracy": 0.87328050583601, | |
| "num_tokens": 614036.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.5400116696953774, | |
| "epoch": 0.2810143934201508, | |
| "grad_norm": 0.3498149514198303, | |
| "learning_rate": 0.00018131140050262737, | |
| "loss": 0.5902, | |
| "mean_token_accuracy": 0.8512750566005707, | |
| "num_tokens": 630937.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.45603593066334724, | |
| "epoch": 0.2878684030157642, | |
| "grad_norm": 0.6973631978034973, | |
| "learning_rate": 0.0001808544665295865, | |
| "loss": 0.484, | |
| "mean_token_accuracy": 0.8728810593485832, | |
| "num_tokens": 642492.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.5664497867226601, | |
| "epoch": 0.29472241261137766, | |
| "grad_norm": 0.4047413170337677, | |
| "learning_rate": 0.0001803975325565456, | |
| "loss": 0.5107, | |
| "mean_token_accuracy": 0.8518401965498924, | |
| "num_tokens": 656785.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.5749023761600256, | |
| "epoch": 0.3015764222069911, | |
| "grad_norm": 0.5084949135780334, | |
| "learning_rate": 0.0001799405985835047, | |
| "loss": 0.5558, | |
| "mean_token_accuracy": 0.8492432355880737, | |
| "num_tokens": 671870.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.4889295015484095, | |
| "epoch": 0.30843043180260454, | |
| "grad_norm": 0.42546579241752625, | |
| "learning_rate": 0.0001794836646104638, | |
| "loss": 0.5416, | |
| "mean_token_accuracy": 0.8649413183331489, | |
| "num_tokens": 685980.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.5743775438517332, | |
| "epoch": 0.315284441398218, | |
| "grad_norm": 0.3708641231060028, | |
| "learning_rate": 0.0001790267306374229, | |
| "loss": 0.5976, | |
| "mean_token_accuracy": 0.8467541456222534, | |
| "num_tokens": 699287.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 0.5913191799074411, | |
| "epoch": 0.32213845099383137, | |
| "grad_norm": 0.37332257628440857, | |
| "learning_rate": 0.000178569796664382, | |
| "loss": 0.5695, | |
| "mean_token_accuracy": 0.8441656738519668, | |
| "num_tokens": 714803.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 0.45778534524142744, | |
| "epoch": 0.3289924605894448, | |
| "grad_norm": 0.5047005414962769, | |
| "learning_rate": 0.0001781128626913411, | |
| "loss": 0.4778, | |
| "mean_token_accuracy": 0.8752694010734559, | |
| "num_tokens": 732120.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 0.5643713362514973, | |
| "epoch": 0.33584647018505825, | |
| "grad_norm": 0.4013417065143585, | |
| "learning_rate": 0.0001776559287183002, | |
| "loss": 0.5366, | |
| "mean_token_accuracy": 0.8520827397704125, | |
| "num_tokens": 745974.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 0.4815288335084915, | |
| "epoch": 0.3427004797806717, | |
| "grad_norm": 0.3859888017177582, | |
| "learning_rate": 0.00017719899474525933, | |
| "loss": 0.5521, | |
| "mean_token_accuracy": 0.8687581121921539, | |
| "num_tokens": 759499.0, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 4377, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6055358125599744.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |