| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9969230769230769, | |
| "eval_steps": 100, | |
| "global_step": 243, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0041025641025641026, | |
| "grad_norm": 88.41062031234637, | |
| "learning_rate": 8e-08, | |
| "loss": 6.7986, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.020512820512820513, | |
| "grad_norm": 81.24895205230344, | |
| "learning_rate": 4e-07, | |
| "loss": 6.2457, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.041025641025641026, | |
| "grad_norm": 39.9401979208905, | |
| "learning_rate": 8e-07, | |
| "loss": 4.2654, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06153846153846154, | |
| "grad_norm": 4.582726549940067, | |
| "learning_rate": 1.2e-06, | |
| "loss": 1.5329, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.08205128205128205, | |
| "grad_norm": 1.3581398857248763, | |
| "learning_rate": 1.6e-06, | |
| "loss": 1.0931, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.10256410256410256, | |
| "grad_norm": 5.5355901284220215, | |
| "learning_rate": 2e-06, | |
| "loss": 1.1385, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.12307692307692308, | |
| "grad_norm": 1.9322177956430964, | |
| "learning_rate": 1.9974051702905273e-06, | |
| "loss": 1.0739, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.14358974358974358, | |
| "grad_norm": 4.079114329852253, | |
| "learning_rate": 1.9896341474445524e-06, | |
| "loss": 1.0427, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.1641025641025641, | |
| "grad_norm": 2.8750759874209098, | |
| "learning_rate": 1.976727260423982e-06, | |
| "loss": 1.0043, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.18461538461538463, | |
| "grad_norm": 2.067325776643061, | |
| "learning_rate": 1.9587514915766123e-06, | |
| "loss": 1.0347, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.20512820512820512, | |
| "grad_norm": 1.6077537019128674, | |
| "learning_rate": 1.935800129020554e-06, | |
| "loss": 0.9891, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.22564102564102564, | |
| "grad_norm": 2.390881051535591, | |
| "learning_rate": 1.907992282510675e-06, | |
| "loss": 0.9456, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.24615384615384617, | |
| "grad_norm": 1.4707572779498144, | |
| "learning_rate": 1.8754722652995345e-06, | |
| "loss": 0.9588, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 2.005066867381637, | |
| "learning_rate": 1.8384088452007577e-06, | |
| "loss": 0.8989, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.28717948717948716, | |
| "grad_norm": 1.6368463754047495, | |
| "learning_rate": 1.7969943687415575e-06, | |
| "loss": 0.92, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 1.7094124448833328, | |
| "learning_rate": 1.751443762949772e-06, | |
| "loss": 0.9332, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.3282051282051282, | |
| "grad_norm": 1.7316171677913168, | |
| "learning_rate": 1.7019934199557866e-06, | |
| "loss": 0.908, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.3487179487179487, | |
| "grad_norm": 2.4068816500475063, | |
| "learning_rate": 1.6488999701978903e-06, | |
| "loss": 0.8519, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.36923076923076925, | |
| "grad_norm": 1.4465939975024569, | |
| "learning_rate": 1.5924389505977035e-06, | |
| "loss": 0.9329, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.38974358974358975, | |
| "grad_norm": 1.4314193126021653, | |
| "learning_rate": 1.5329033746173974e-06, | |
| "loss": 0.8849, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.41025641025641024, | |
| "grad_norm": 1.8802864112299025, | |
| "learning_rate": 1.4706022116196205e-06, | |
| "loss": 0.8645, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4307692307692308, | |
| "grad_norm": 1.2998891675379092, | |
| "learning_rate": 1.4058587834217354e-06, | |
| "loss": 0.8719, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.4512820512820513, | |
| "grad_norm": 1.467435624285604, | |
| "learning_rate": 1.3390090863657047e-06, | |
| "loss": 0.8924, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.4717948717948718, | |
| "grad_norm": 1.4308609683013642, | |
| "learning_rate": 1.2704000476115078e-06, | |
| "loss": 0.8371, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.49230769230769234, | |
| "grad_norm": 1.528592018119071, | |
| "learning_rate": 1.200387724703341e-06, | |
| "loss": 0.8612, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 1.3356249687391901, | |
| "learning_rate": 1.1293354577522264e-06, | |
| "loss": 0.8612, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 1.4723763010858135, | |
| "learning_rate": 1.0576119838245842e-06, | |
| "loss": 0.8445, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5538461538461539, | |
| "grad_norm": 1.1948328880288306, | |
| "learning_rate": 9.85589523322443e-07, | |
| "loss": 0.8221, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5743589743589743, | |
| "grad_norm": 1.2177832347997841, | |
| "learning_rate": 9.136418482863228e-07, | |
| "loss": 0.8834, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5948717948717949, | |
| "grad_norm": 1.820351894065908, | |
| "learning_rate": 8.42142342645646e-07, | |
| "loss": 0.8318, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 1.3069098183076444, | |
| "learning_rate": 7.714620644833109e-07, | |
| "loss": 0.8637, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.6358974358974359, | |
| "grad_norm": 1.3226533500861553, | |
| "learning_rate": 7.019678203706163e-07, | |
| "loss": 0.8703, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.6564102564102564, | |
| "grad_norm": 1.2863098084588132, | |
| "learning_rate": 6.340202617660841e-07, | |
| "loss": 0.8177, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.676923076923077, | |
| "grad_norm": 1.1621535781739931, | |
| "learning_rate": 5.679720133572206e-07, | |
| "loss": 0.8611, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6974358974358974, | |
| "grad_norm": 1.6047585071587278, | |
| "learning_rate": 5.041658430584852e-07, | |
| "loss": 0.7952, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.717948717948718, | |
| "grad_norm": 1.3524486934293127, | |
| "learning_rate": 4.429328831625565e-07, | |
| "loss": 0.8393, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.7384615384615385, | |
| "grad_norm": 1.2525525637041273, | |
| "learning_rate": 3.8459091187650726e-07, | |
| "loss": 0.775, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7589743589743589, | |
| "grad_norm": 1.406080966982348, | |
| "learning_rate": 3.294427041611425e-07, | |
| "loss": 0.8396, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7794871794871795, | |
| "grad_norm": 1.2577422552540243, | |
| "learning_rate": 2.777744604320705e-07, | |
| "loss": 0.8438, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 1.0634890683989555, | |
| "learning_rate": 2.2985432127701941e-07, | |
| "loss": 0.8266, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.8205128205128205, | |
| "grad_norm": 1.3048223495687348, | |
| "learning_rate": 1.8593097589751316e-07, | |
| "loss": 0.8273, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.841025641025641, | |
| "grad_norm": 1.2899768892839738, | |
| "learning_rate": 1.4623237149661137e-07, | |
| "loss": 0.775, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.8615384615384616, | |
| "grad_norm": 1.657304058453659, | |
| "learning_rate": 1.1096453031056264e-07, | |
| "loss": 0.7838, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.882051282051282, | |
| "grad_norm": 1.349663867325565, | |
| "learning_rate": 8.031048042356392e-08, | |
| "loss": 0.8106, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.9025641025641026, | |
| "grad_norm": 1.7314606495399814, | |
| "learning_rate": 5.442930591433992e-08, | |
| "loss": 0.7853, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 1.3003917785727415, | |
| "learning_rate": 3.345532126395578e-08, | |
| "loss": 0.7901, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.9435897435897436, | |
| "grad_norm": 1.7220623940198039, | |
| "learning_rate": 1.7497374309405344e-08, | |
| "loss": 0.8636, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9641025641025641, | |
| "grad_norm": 1.8177723954646483, | |
| "learning_rate": 6.6382813604083375e-09, | |
| "loss": 0.8136, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.9846153846153847, | |
| "grad_norm": 1.1764075811793078, | |
| "learning_rate": 9.343974109685682e-10, | |
| "loss": 0.8408, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9969230769230769, | |
| "step": 243, | |
| "total_flos": 33729568407552.0, | |
| "train_loss": 1.0789150033958654, | |
| "train_runtime": 67356.065, | |
| "train_samples_per_second": 0.463, | |
| "train_steps_per_second": 0.004 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 243, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 33729568407552.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |