| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.5787781350482315, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.012861736334405145, | |
| "grad_norm": 1.0649385452270508, | |
| "learning_rate": 0.0001, | |
| "loss": 2.6309, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.02572347266881029, | |
| "grad_norm": 0.9491050243377686, | |
| "learning_rate": 9.949748743718594e-05, | |
| "loss": 2.6373, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.03858520900321544, | |
| "grad_norm": 0.9376218318939209, | |
| "learning_rate": 9.899497487437186e-05, | |
| "loss": 2.7692, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05144694533762058, | |
| "grad_norm": 1.0216950178146362, | |
| "learning_rate": 9.84924623115578e-05, | |
| "loss": 2.5093, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06430868167202572, | |
| "grad_norm": 1.054303526878357, | |
| "learning_rate": 9.798994974874372e-05, | |
| "loss": 2.371, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.07717041800643087, | |
| "grad_norm": 1.059228777885437, | |
| "learning_rate": 9.748743718592965e-05, | |
| "loss": 2.2294, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09003215434083602, | |
| "grad_norm": 1.0482760667800903, | |
| "learning_rate": 9.698492462311559e-05, | |
| "loss": 2.0812, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10289389067524116, | |
| "grad_norm": 1.2738276720046997, | |
| "learning_rate": 9.64824120603015e-05, | |
| "loss": 2.0018, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.1157556270096463, | |
| "grad_norm": 1.0511283874511719, | |
| "learning_rate": 9.597989949748745e-05, | |
| "loss": 1.7588, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.12861736334405144, | |
| "grad_norm": 0.9217624068260193, | |
| "learning_rate": 9.547738693467337e-05, | |
| "loss": 1.7429, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.1414790996784566, | |
| "grad_norm": 0.685434103012085, | |
| "learning_rate": 9.49748743718593e-05, | |
| "loss": 1.9808, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15434083601286175, | |
| "grad_norm": 0.8728280067443848, | |
| "learning_rate": 9.447236180904523e-05, | |
| "loss": 1.5599, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.16720257234726688, | |
| "grad_norm": 0.6496291160583496, | |
| "learning_rate": 9.396984924623115e-05, | |
| "loss": 1.5217, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18006430868167203, | |
| "grad_norm": 0.632135808467865, | |
| "learning_rate": 9.34673366834171e-05, | |
| "loss": 1.4064, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19292604501607716, | |
| "grad_norm": 0.6891590356826782, | |
| "learning_rate": 9.296482412060302e-05, | |
| "loss": 1.5262, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2057877813504823, | |
| "grad_norm": 0.6528868079185486, | |
| "learning_rate": 9.246231155778895e-05, | |
| "loss": 1.4092, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.21864951768488747, | |
| "grad_norm": 0.7315002083778381, | |
| "learning_rate": 9.195979899497488e-05, | |
| "loss": 1.533, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.2315112540192926, | |
| "grad_norm": 0.6680942177772522, | |
| "learning_rate": 9.14572864321608e-05, | |
| "loss": 1.3878, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24437299035369775, | |
| "grad_norm": 0.5954132676124573, | |
| "learning_rate": 9.095477386934675e-05, | |
| "loss": 1.4235, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2572347266881029, | |
| "grad_norm": 0.7016865611076355, | |
| "learning_rate": 9.045226130653267e-05, | |
| "loss": 1.349, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27009646302250806, | |
| "grad_norm": 0.6608514189720154, | |
| "learning_rate": 8.99497487437186e-05, | |
| "loss": 1.3343, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.2829581993569132, | |
| "grad_norm": 0.7280902862548828, | |
| "learning_rate": 8.944723618090453e-05, | |
| "loss": 1.3994, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.2958199356913183, | |
| "grad_norm": 0.7174026966094971, | |
| "learning_rate": 8.894472361809045e-05, | |
| "loss": 1.4069, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.3086816720257235, | |
| "grad_norm": 0.7075796723365784, | |
| "learning_rate": 8.84422110552764e-05, | |
| "loss": 1.3257, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3215434083601286, | |
| "grad_norm": 0.7438945174217224, | |
| "learning_rate": 8.793969849246232e-05, | |
| "loss": 1.3734, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33440514469453375, | |
| "grad_norm": 0.669459342956543, | |
| "learning_rate": 8.743718592964825e-05, | |
| "loss": 1.3843, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.34726688102893893, | |
| "grad_norm": 0.7187721729278564, | |
| "learning_rate": 8.693467336683418e-05, | |
| "loss": 1.2812, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36012861736334406, | |
| "grad_norm": 0.7727590799331665, | |
| "learning_rate": 8.64321608040201e-05, | |
| "loss": 1.3305, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.3729903536977492, | |
| "grad_norm": 0.7686368227005005, | |
| "learning_rate": 8.592964824120603e-05, | |
| "loss": 1.4395, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.3858520900321543, | |
| "grad_norm": 0.8188142776489258, | |
| "learning_rate": 8.542713567839196e-05, | |
| "loss": 1.3322, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.3987138263665595, | |
| "grad_norm": 0.7664804458618164, | |
| "learning_rate": 8.49246231155779e-05, | |
| "loss": 1.3569, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.4115755627009646, | |
| "grad_norm": 0.7511367797851562, | |
| "learning_rate": 8.442211055276383e-05, | |
| "loss": 1.2811, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.42443729903536975, | |
| "grad_norm": 0.8608831167221069, | |
| "learning_rate": 8.391959798994975e-05, | |
| "loss": 1.2244, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.43729903536977494, | |
| "grad_norm": 0.8403910398483276, | |
| "learning_rate": 8.341708542713568e-05, | |
| "loss": 1.4243, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45016077170418006, | |
| "grad_norm": 0.8469493985176086, | |
| "learning_rate": 8.291457286432161e-05, | |
| "loss": 1.2308, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4630225080385852, | |
| "grad_norm": 0.8314189314842224, | |
| "learning_rate": 8.241206030150754e-05, | |
| "loss": 1.3045, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.4758842443729904, | |
| "grad_norm": 0.8816382884979248, | |
| "learning_rate": 8.190954773869348e-05, | |
| "loss": 1.3571, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.4887459807073955, | |
| "grad_norm": 0.8717392086982727, | |
| "learning_rate": 8.14070351758794e-05, | |
| "loss": 1.3506, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5016077170418006, | |
| "grad_norm": 0.8880780935287476, | |
| "learning_rate": 8.090452261306533e-05, | |
| "loss": 1.2376, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5144694533762058, | |
| "grad_norm": 0.7529111504554749, | |
| "learning_rate": 8.040201005025126e-05, | |
| "loss": 1.3724, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5273311897106109, | |
| "grad_norm": 1.0163393020629883, | |
| "learning_rate": 7.989949748743719e-05, | |
| "loss": 1.2542, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5401929260450161, | |
| "grad_norm": 0.651192307472229, | |
| "learning_rate": 7.939698492462313e-05, | |
| "loss": 1.5108, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5530546623794212, | |
| "grad_norm": 0.8402130007743835, | |
| "learning_rate": 7.889447236180904e-05, | |
| "loss": 1.1996, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.5659163987138264, | |
| "grad_norm": 0.7727481722831726, | |
| "learning_rate": 7.839195979899498e-05, | |
| "loss": 1.2289, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5787781350482315, | |
| "grad_norm": 0.6933633685112, | |
| "learning_rate": 7.788944723618091e-05, | |
| "loss": 1.1982, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.5916398713826366, | |
| "grad_norm": 0.5633453130722046, | |
| "learning_rate": 7.738693467336684e-05, | |
| "loss": 1.4112, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6045016077170418, | |
| "grad_norm": 0.7134105563163757, | |
| "learning_rate": 7.688442211055277e-05, | |
| "loss": 1.2367, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.617363344051447, | |
| "grad_norm": 0.6546292304992676, | |
| "learning_rate": 7.638190954773869e-05, | |
| "loss": 1.1962, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6302250803858521, | |
| "grad_norm": 0.7516946196556091, | |
| "learning_rate": 7.587939698492463e-05, | |
| "loss": 1.1747, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6430868167202572, | |
| "grad_norm": 0.718727171421051, | |
| "learning_rate": 7.537688442211056e-05, | |
| "loss": 1.3876, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6559485530546624, | |
| "grad_norm": 0.6199938058853149, | |
| "learning_rate": 7.487437185929649e-05, | |
| "loss": 1.2323, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.6688102893890675, | |
| "grad_norm": 0.7272217869758606, | |
| "learning_rate": 7.437185929648241e-05, | |
| "loss": 1.3287, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.6816720257234726, | |
| "grad_norm": 0.6393375992774963, | |
| "learning_rate": 7.386934673366834e-05, | |
| "loss": 1.3454, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.6945337620578779, | |
| "grad_norm": 0.6419456005096436, | |
| "learning_rate": 7.336683417085427e-05, | |
| "loss": 1.1051, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.707395498392283, | |
| "grad_norm": 0.7256225943565369, | |
| "learning_rate": 7.28643216080402e-05, | |
| "loss": 1.2236, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7202572347266881, | |
| "grad_norm": 0.6486414670944214, | |
| "learning_rate": 7.236180904522614e-05, | |
| "loss": 1.3265, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7331189710610932, | |
| "grad_norm": 0.6492426991462708, | |
| "learning_rate": 7.185929648241206e-05, | |
| "loss": 1.3362, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7459807073954984, | |
| "grad_norm": 0.7237244248390198, | |
| "learning_rate": 7.135678391959799e-05, | |
| "loss": 1.1568, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.7588424437299035, | |
| "grad_norm": 0.7400479912757874, | |
| "learning_rate": 7.085427135678392e-05, | |
| "loss": 1.2116, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.7717041800643086, | |
| "grad_norm": 0.7453139424324036, | |
| "learning_rate": 7.035175879396985e-05, | |
| "loss": 1.293, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.7845659163987139, | |
| "grad_norm": 0.7200148105621338, | |
| "learning_rate": 6.984924623115579e-05, | |
| "loss": 1.1902, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.797427652733119, | |
| "grad_norm": 0.753768801689148, | |
| "learning_rate": 6.93467336683417e-05, | |
| "loss": 1.218, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8102893890675241, | |
| "grad_norm": 0.7190577387809753, | |
| "learning_rate": 6.884422110552764e-05, | |
| "loss": 1.2068, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8231511254019293, | |
| "grad_norm": 0.7753349542617798, | |
| "learning_rate": 6.834170854271357e-05, | |
| "loss": 1.3279, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8360128617363344, | |
| "grad_norm": 0.7965415120124817, | |
| "learning_rate": 6.78391959798995e-05, | |
| "loss": 1.0254, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.8488745980707395, | |
| "grad_norm": 0.8631449937820435, | |
| "learning_rate": 6.733668341708544e-05, | |
| "loss": 1.2029, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8617363344051447, | |
| "grad_norm": 0.7210382223129272, | |
| "learning_rate": 6.683417085427135e-05, | |
| "loss": 1.1602, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.8745980707395499, | |
| "grad_norm": 0.6991996765136719, | |
| "learning_rate": 6.633165829145729e-05, | |
| "loss": 1.2122, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.887459807073955, | |
| "grad_norm": 0.6861876249313354, | |
| "learning_rate": 6.582914572864322e-05, | |
| "loss": 1.2715, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9003215434083601, | |
| "grad_norm": 0.7111251354217529, | |
| "learning_rate": 6.532663316582915e-05, | |
| "loss": 1.1894, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9131832797427653, | |
| "grad_norm": 0.7575141787528992, | |
| "learning_rate": 6.482412060301508e-05, | |
| "loss": 1.2182, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9260450160771704, | |
| "grad_norm": 0.6892881393432617, | |
| "learning_rate": 6.4321608040201e-05, | |
| "loss": 1.1138, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9389067524115756, | |
| "grad_norm": 0.7526578903198242, | |
| "learning_rate": 6.381909547738694e-05, | |
| "loss": 1.1088, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.9517684887459807, | |
| "grad_norm": 0.6483733654022217, | |
| "learning_rate": 6.331658291457287e-05, | |
| "loss": 1.2254, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.9646302250803859, | |
| "grad_norm": 0.7623192071914673, | |
| "learning_rate": 6.28140703517588e-05, | |
| "loss": 1.2961, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.977491961414791, | |
| "grad_norm": 0.7486171126365662, | |
| "learning_rate": 6.231155778894473e-05, | |
| "loss": 1.1917, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.9903536977491961, | |
| "grad_norm": 0.6872939467430115, | |
| "learning_rate": 6.180904522613065e-05, | |
| "loss": 1.147, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.0064308681672025, | |
| "grad_norm": 1.3864794969558716, | |
| "learning_rate": 6.130653266331658e-05, | |
| "loss": 1.746, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.0192926045016077, | |
| "grad_norm": 0.5814358592033386, | |
| "learning_rate": 6.080402010050251e-05, | |
| "loss": 1.0255, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.0321543408360128, | |
| "grad_norm": 0.6417332887649536, | |
| "learning_rate": 6.030150753768844e-05, | |
| "loss": 1.1216, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.045016077170418, | |
| "grad_norm": 0.648380696773529, | |
| "learning_rate": 5.979899497487438e-05, | |
| "loss": 1.1417, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.0578778135048232, | |
| "grad_norm": 0.6810972094535828, | |
| "learning_rate": 5.929648241206031e-05, | |
| "loss": 1.1271, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.0707395498392283, | |
| "grad_norm": 0.6431122422218323, | |
| "learning_rate": 5.879396984924623e-05, | |
| "loss": 1.0729, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.0836012861736335, | |
| "grad_norm": 0.7915307283401489, | |
| "learning_rate": 5.829145728643216e-05, | |
| "loss": 1.1238, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.0964630225080385, | |
| "grad_norm": 0.7646147012710571, | |
| "learning_rate": 5.778894472361809e-05, | |
| "loss": 1.1225, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.1093247588424437, | |
| "grad_norm": 0.7343133687973022, | |
| "learning_rate": 5.728643216080403e-05, | |
| "loss": 1.1414, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.122186495176849, | |
| "grad_norm": 0.6509206891059875, | |
| "learning_rate": 5.6783919597989955e-05, | |
| "loss": 1.0904, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.135048231511254, | |
| "grad_norm": 0.6493409276008606, | |
| "learning_rate": 5.628140703517588e-05, | |
| "loss": 1.1185, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1479099678456592, | |
| "grad_norm": 0.6985325813293457, | |
| "learning_rate": 5.577889447236181e-05, | |
| "loss": 1.1248, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.1607717041800643, | |
| "grad_norm": 0.7278170585632324, | |
| "learning_rate": 5.527638190954774e-05, | |
| "loss": 1.1493, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.1736334405144695, | |
| "grad_norm": 0.7814955115318298, | |
| "learning_rate": 5.477386934673368e-05, | |
| "loss": 1.8804, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.1864951768488745, | |
| "grad_norm": 0.6044358015060425, | |
| "learning_rate": 5.4271356783919604e-05, | |
| "loss": 0.9426, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.1993569131832797, | |
| "grad_norm": 0.6450907588005066, | |
| "learning_rate": 5.376884422110553e-05, | |
| "loss": 0.9643, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.212218649517685, | |
| "grad_norm": 0.845541775226593, | |
| "learning_rate": 5.3266331658291455e-05, | |
| "loss": 1.2485, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.22508038585209, | |
| "grad_norm": 0.653166651725769, | |
| "learning_rate": 5.276381909547739e-05, | |
| "loss": 0.9115, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.2379421221864952, | |
| "grad_norm": 0.8042985796928406, | |
| "learning_rate": 5.226130653266332e-05, | |
| "loss": 1.2722, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.2508038585209003, | |
| "grad_norm": 0.7926493287086487, | |
| "learning_rate": 5.175879396984925e-05, | |
| "loss": 1.1146, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.2636655948553055, | |
| "grad_norm": 0.7699697017669678, | |
| "learning_rate": 5.125628140703518e-05, | |
| "loss": 1.114, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.2765273311897105, | |
| "grad_norm": 0.7615493535995483, | |
| "learning_rate": 5.0753768844221104e-05, | |
| "loss": 1.1514, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.2893890675241158, | |
| "grad_norm": 0.7418377995491028, | |
| "learning_rate": 5.0251256281407036e-05, | |
| "loss": 1.0847, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.302250803858521, | |
| "grad_norm": 0.7438411712646484, | |
| "learning_rate": 4.974874371859297e-05, | |
| "loss": 1.2096, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 1.315112540192926, | |
| "grad_norm": 0.7060010433197021, | |
| "learning_rate": 4.92462311557789e-05, | |
| "loss": 1.1058, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 1.3279742765273312, | |
| "grad_norm": 0.574945330619812, | |
| "learning_rate": 4.874371859296483e-05, | |
| "loss": 0.9299, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 1.3408360128617363, | |
| "grad_norm": 0.7485166788101196, | |
| "learning_rate": 4.824120603015075e-05, | |
| "loss": 1.5913, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.3536977491961415, | |
| "grad_norm": 0.7644198536872864, | |
| "learning_rate": 4.7738693467336685e-05, | |
| "loss": 1.1538, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 1.3665594855305465, | |
| "grad_norm": 0.7649131417274475, | |
| "learning_rate": 4.723618090452262e-05, | |
| "loss": 1.04, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 1.3794212218649518, | |
| "grad_norm": 0.7314745783805847, | |
| "learning_rate": 4.673366834170855e-05, | |
| "loss": 0.9756, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 1.392282958199357, | |
| "grad_norm": 0.7615334391593933, | |
| "learning_rate": 4.6231155778894475e-05, | |
| "loss": 1.053, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 1.405144694533762, | |
| "grad_norm": 0.7232903242111206, | |
| "learning_rate": 4.57286432160804e-05, | |
| "loss": 0.9716, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 1.4180064308681672, | |
| "grad_norm": 0.8094847798347473, | |
| "learning_rate": 4.522613065326633e-05, | |
| "loss": 1.0367, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4308681672025725, | |
| "grad_norm": 0.950025200843811, | |
| "learning_rate": 4.4723618090452266e-05, | |
| "loss": 1.1011, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 1.4437299035369775, | |
| "grad_norm": 0.7514937520027161, | |
| "learning_rate": 4.42211055276382e-05, | |
| "loss": 1.0202, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 1.4565916398713825, | |
| "grad_norm": 0.836333692073822, | |
| "learning_rate": 4.3718592964824124e-05, | |
| "loss": 1.1794, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 1.4694533762057878, | |
| "grad_norm": 0.8447477221488953, | |
| "learning_rate": 4.321608040201005e-05, | |
| "loss": 1.0942, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 1.482315112540193, | |
| "grad_norm": 0.8204519748687744, | |
| "learning_rate": 4.271356783919598e-05, | |
| "loss": 1.3409, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 1.495176848874598, | |
| "grad_norm": 0.730801522731781, | |
| "learning_rate": 4.2211055276381914e-05, | |
| "loss": 0.9899, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 1.5080385852090032, | |
| "grad_norm": 0.879811704158783, | |
| "learning_rate": 4.170854271356784e-05, | |
| "loss": 1.1081, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.5209003215434085, | |
| "grad_norm": 0.7798628807067871, | |
| "learning_rate": 4.120603015075377e-05, | |
| "loss": 1.0908, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 1.5337620578778135, | |
| "grad_norm": 0.870912492275238, | |
| "learning_rate": 4.07035175879397e-05, | |
| "loss": 1.2096, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 1.5466237942122185, | |
| "grad_norm": 0.7109503149986267, | |
| "learning_rate": 4.020100502512563e-05, | |
| "loss": 1.0298, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 1.5594855305466238, | |
| "grad_norm": 0.8116230964660645, | |
| "learning_rate": 3.969849246231156e-05, | |
| "loss": 1.0723, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.572347266881029, | |
| "grad_norm": 0.7052260637283325, | |
| "learning_rate": 3.919597989949749e-05, | |
| "loss": 1.0198, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 1.585209003215434, | |
| "grad_norm": 0.8413100838661194, | |
| "learning_rate": 3.869346733668342e-05, | |
| "loss": 1.0381, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 1.5980707395498392, | |
| "grad_norm": 0.8870115876197815, | |
| "learning_rate": 3.8190954773869346e-05, | |
| "loss": 1.5338, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 1.6109324758842445, | |
| "grad_norm": 0.6777501702308655, | |
| "learning_rate": 3.768844221105528e-05, | |
| "loss": 0.8878, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.6237942122186495, | |
| "grad_norm": 0.7898180484771729, | |
| "learning_rate": 3.7185929648241204e-05, | |
| "loss": 0.961, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 1.6366559485530545, | |
| "grad_norm": 0.7508682012557983, | |
| "learning_rate": 3.668341708542714e-05, | |
| "loss": 0.9697, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 1.6495176848874598, | |
| "grad_norm": 0.956596314907074, | |
| "learning_rate": 3.618090452261307e-05, | |
| "loss": 1.2065, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 1.662379421221865, | |
| "grad_norm": 0.8160056471824646, | |
| "learning_rate": 3.5678391959798995e-05, | |
| "loss": 1.0585, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 1.67524115755627, | |
| "grad_norm": 0.9093283414840698, | |
| "learning_rate": 3.517587939698493e-05, | |
| "loss": 1.008, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 1.6881028938906752, | |
| "grad_norm": 0.8321830630302429, | |
| "learning_rate": 3.467336683417085e-05, | |
| "loss": 1.1026, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 1.7009646302250805, | |
| "grad_norm": 0.7878942489624023, | |
| "learning_rate": 3.4170854271356785e-05, | |
| "loss": 1.0497, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.7138263665594855, | |
| "grad_norm": 0.88408362865448, | |
| "learning_rate": 3.366834170854272e-05, | |
| "loss": 1.3551, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 1.7266881028938905, | |
| "grad_norm": 0.7452175617218018, | |
| "learning_rate": 3.3165829145728643e-05, | |
| "loss": 0.9679, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 1.739549839228296, | |
| "grad_norm": 0.8924412727355957, | |
| "learning_rate": 3.2663316582914576e-05, | |
| "loss": 1.0265, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 1.752411575562701, | |
| "grad_norm": 0.9149259924888611, | |
| "learning_rate": 3.21608040201005e-05, | |
| "loss": 0.9405, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 1.765273311897106, | |
| "grad_norm": 0.935562252998352, | |
| "learning_rate": 3.1658291457286434e-05, | |
| "loss": 1.1063, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 1.7781350482315113, | |
| "grad_norm": 0.8658350110054016, | |
| "learning_rate": 3.1155778894472366e-05, | |
| "loss": 1.0239, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 1.7909967845659165, | |
| "grad_norm": 0.9327634572982788, | |
| "learning_rate": 3.065326633165829e-05, | |
| "loss": 1.1195, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 1.8038585209003215, | |
| "grad_norm": 0.8411900997161865, | |
| "learning_rate": 3.015075376884422e-05, | |
| "loss": 0.8957, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.8167202572347267, | |
| "grad_norm": 0.9331545829772949, | |
| "learning_rate": 2.9648241206030153e-05, | |
| "loss": 1.1823, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 1.829581993569132, | |
| "grad_norm": 0.9505443572998047, | |
| "learning_rate": 2.914572864321608e-05, | |
| "loss": 1.1083, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.842443729903537, | |
| "grad_norm": 0.7980955243110657, | |
| "learning_rate": 2.8643216080402015e-05, | |
| "loss": 1.074, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.855305466237942, | |
| "grad_norm": 0.9247193336486816, | |
| "learning_rate": 2.814070351758794e-05, | |
| "loss": 1.2129, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.8681672025723473, | |
| "grad_norm": 0.887328028678894, | |
| "learning_rate": 2.763819095477387e-05, | |
| "loss": 1.0688, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.8810289389067525, | |
| "grad_norm": 0.7812901735305786, | |
| "learning_rate": 2.7135678391959802e-05, | |
| "loss": 0.9337, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.8938906752411575, | |
| "grad_norm": 0.8578721880912781, | |
| "learning_rate": 2.6633165829145728e-05, | |
| "loss": 1.2328, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.9067524115755627, | |
| "grad_norm": 0.9639273285865784, | |
| "learning_rate": 2.613065326633166e-05, | |
| "loss": 1.0618, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.919614147909968, | |
| "grad_norm": 0.7975261807441711, | |
| "learning_rate": 2.562814070351759e-05, | |
| "loss": 0.9403, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.932475884244373, | |
| "grad_norm": 0.9210904240608215, | |
| "learning_rate": 2.5125628140703518e-05, | |
| "loss": 1.1497, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.945337620578778, | |
| "grad_norm": 0.7670096755027771, | |
| "learning_rate": 2.462311557788945e-05, | |
| "loss": 1.048, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.9581993569131833, | |
| "grad_norm": 0.9265581965446472, | |
| "learning_rate": 2.4120603015075376e-05, | |
| "loss": 1.1377, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 1.9710610932475885, | |
| "grad_norm": 0.853067934513092, | |
| "learning_rate": 2.361809045226131e-05, | |
| "loss": 1.1952, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 1.9839228295819935, | |
| "grad_norm": 0.8661015629768372, | |
| "learning_rate": 2.3115577889447238e-05, | |
| "loss": 1.094, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.9967845659163987, | |
| "grad_norm": 1.4559237957000732, | |
| "learning_rate": 2.2613065326633167e-05, | |
| "loss": 1.576, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 2.012861736334405, | |
| "grad_norm": 0.8805817365646362, | |
| "learning_rate": 2.21105527638191e-05, | |
| "loss": 1.0219, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 2.0257234726688105, | |
| "grad_norm": 0.7943904399871826, | |
| "learning_rate": 2.1608040201005025e-05, | |
| "loss": 1.0843, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 2.0385852090032155, | |
| "grad_norm": 0.8749082684516907, | |
| "learning_rate": 2.1105527638190957e-05, | |
| "loss": 1.0128, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 2.0514469453376205, | |
| "grad_norm": 0.80024653673172, | |
| "learning_rate": 2.0603015075376886e-05, | |
| "loss": 1.0139, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 2.0643086816720255, | |
| "grad_norm": 0.8480293154716492, | |
| "learning_rate": 2.0100502512562815e-05, | |
| "loss": 0.9936, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 2.077170418006431, | |
| "grad_norm": 0.800495445728302, | |
| "learning_rate": 1.9597989949748744e-05, | |
| "loss": 1.0542, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 2.090032154340836, | |
| "grad_norm": 0.8782421350479126, | |
| "learning_rate": 1.9095477386934673e-05, | |
| "loss": 1.0353, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 2.102893890675241, | |
| "grad_norm": 0.8080796599388123, | |
| "learning_rate": 1.8592964824120602e-05, | |
| "loss": 0.9628, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 2.1157556270096465, | |
| "grad_norm": 0.7773626446723938, | |
| "learning_rate": 1.8090452261306535e-05, | |
| "loss": 0.9945, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 2.1286173633440515, | |
| "grad_norm": 0.7648970484733582, | |
| "learning_rate": 1.7587939698492464e-05, | |
| "loss": 1.0015, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 2.1414790996784565, | |
| "grad_norm": 0.8139113187789917, | |
| "learning_rate": 1.7085427135678393e-05, | |
| "loss": 1.0132, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 2.154340836012862, | |
| "grad_norm": 0.8528121113777161, | |
| "learning_rate": 1.6582914572864322e-05, | |
| "loss": 1.004, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 2.167202572347267, | |
| "grad_norm": 0.7888818383216858, | |
| "learning_rate": 1.608040201005025e-05, | |
| "loss": 0.9956, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 2.180064308681672, | |
| "grad_norm": 0.8569130301475525, | |
| "learning_rate": 1.5577889447236183e-05, | |
| "loss": 0.9705, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 2.192926045016077, | |
| "grad_norm": 0.7740685939788818, | |
| "learning_rate": 1.507537688442211e-05, | |
| "loss": 1.4226, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 2.2057877813504825, | |
| "grad_norm": 0.8912209272384644, | |
| "learning_rate": 1.457286432160804e-05, | |
| "loss": 1.0053, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 2.2186495176848875, | |
| "grad_norm": 0.9198904633522034, | |
| "learning_rate": 1.407035175879397e-05, | |
| "loss": 1.0057, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 2.2315112540192925, | |
| "grad_norm": 0.8956185579299927, | |
| "learning_rate": 1.3567839195979901e-05, | |
| "loss": 0.9636, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 2.244372990353698, | |
| "grad_norm": 0.7728644609451294, | |
| "learning_rate": 1.306532663316583e-05, | |
| "loss": 1.1026, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 2.257234726688103, | |
| "grad_norm": 0.8695865273475647, | |
| "learning_rate": 1.2562814070351759e-05, | |
| "loss": 1.0474, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 2.270096463022508, | |
| "grad_norm": 0.8445467352867126, | |
| "learning_rate": 1.2060301507537688e-05, | |
| "loss": 1.1477, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 2.282958199356913, | |
| "grad_norm": 0.8170430660247803, | |
| "learning_rate": 1.1557788944723619e-05, | |
| "loss": 1.0361, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 2.2958199356913185, | |
| "grad_norm": 0.9592326283454895, | |
| "learning_rate": 1.105527638190955e-05, | |
| "loss": 1.0354, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 2.3086816720257235, | |
| "grad_norm": 0.9984281659126282, | |
| "learning_rate": 1.0552763819095479e-05, | |
| "loss": 0.977, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 2.3215434083601285, | |
| "grad_norm": 0.8793332576751709, | |
| "learning_rate": 1.0050251256281408e-05, | |
| "loss": 0.9957, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 2.334405144694534, | |
| "grad_norm": 0.8469133973121643, | |
| "learning_rate": 9.547738693467337e-06, | |
| "loss": 0.981, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 2.347266881028939, | |
| "grad_norm": 0.8359603881835938, | |
| "learning_rate": 9.045226130653267e-06, | |
| "loss": 0.921, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 2.360128617363344, | |
| "grad_norm": 0.9214036464691162, | |
| "learning_rate": 8.542713567839196e-06, | |
| "loss": 1.0061, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 2.372990353697749, | |
| "grad_norm": 0.8119861483573914, | |
| "learning_rate": 8.040201005025125e-06, | |
| "loss": 1.0287, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 2.3858520900321545, | |
| "grad_norm": 0.8458288908004761, | |
| "learning_rate": 7.537688442211055e-06, | |
| "loss": 0.9324, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 2.3987138263665595, | |
| "grad_norm": 0.9167879223823547, | |
| "learning_rate": 7.035175879396985e-06, | |
| "loss": 1.0395, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 2.4115755627009645, | |
| "grad_norm": 0.9315198659896851, | |
| "learning_rate": 6.532663316582915e-06, | |
| "loss": 0.9878, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 2.42443729903537, | |
| "grad_norm": 0.8909194469451904, | |
| "learning_rate": 6.030150753768844e-06, | |
| "loss": 1.0509, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 2.437299035369775, | |
| "grad_norm": 0.8457067012786865, | |
| "learning_rate": 5.527638190954775e-06, | |
| "loss": 0.9853, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 2.45016077170418, | |
| "grad_norm": 0.823922872543335, | |
| "learning_rate": 5.025125628140704e-06, | |
| "loss": 1.0355, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 2.463022508038585, | |
| "grad_norm": 0.7183964252471924, | |
| "learning_rate": 4.522613065326634e-06, | |
| "loss": 1.2571, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 2.4758842443729905, | |
| "grad_norm": 0.8723146915435791, | |
| "learning_rate": 4.020100502512563e-06, | |
| "loss": 1.0361, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 2.4887459807073955, | |
| "grad_norm": 0.8409279584884644, | |
| "learning_rate": 3.5175879396984926e-06, | |
| "loss": 0.9046, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 2.5016077170418005, | |
| "grad_norm": 0.9092235565185547, | |
| "learning_rate": 3.015075376884422e-06, | |
| "loss": 1.0147, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 2.514469453376206, | |
| "grad_norm": 0.849392831325531, | |
| "learning_rate": 2.512562814070352e-06, | |
| "loss": 0.8241, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 2.527331189710611, | |
| "grad_norm": 0.9866675734519958, | |
| "learning_rate": 2.0100502512562813e-06, | |
| "loss": 0.9559, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 2.540192926045016, | |
| "grad_norm": 0.8281249403953552, | |
| "learning_rate": 1.507537688442211e-06, | |
| "loss": 0.9611, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 2.553054662379421, | |
| "grad_norm": 0.834632158279419, | |
| "learning_rate": 1.0050251256281407e-06, | |
| "loss": 0.9536, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 2.5659163987138265, | |
| "grad_norm": 0.8354447484016418, | |
| "learning_rate": 5.025125628140703e-07, | |
| "loss": 0.9748, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 2.5787781350482315, | |
| "grad_norm": 0.8496025800704956, | |
| "learning_rate": 0.0, | |
| "loss": 0.8724, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 200, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.0719132584574976e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |