Safetensors
qwen3
GenesisGeo / trainer_state.json
twilightsnow's picture
update files
11bd08e verified
raw
history blame
47.3 kB
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.7684295008623486,
"eval_steps": 10000.0,
"global_step": 90000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 8.538105565137207e-06,
"grad_norm": 6.5811052322387695,
"learning_rate": 1.7073587160662455e-08,
"loss": 0.9425575733184814,
"memory(GiB)": 4.21,
"step": 1,
"train_speed(iter/s)": 0.107952
},
{
"epoch": 0.004269052782568603,
"grad_norm": 0.775414764881134,
"learning_rate": 8.536793580331227e-06,
"loss": 0.2293025076030968,
"memory(GiB)": 5.71,
"step": 500,
"train_speed(iter/s)": 0.938128
},
{
"epoch": 0.008538105565137207,
"grad_norm": 0.5124035477638245,
"learning_rate": 1.7073587160662455e-05,
"loss": 0.05007815933227539,
"memory(GiB)": 5.71,
"step": 1000,
"train_speed(iter/s)": 0.946051
},
{
"epoch": 0.012807158347705811,
"grad_norm": 0.34229570627212524,
"learning_rate": 2.5610380740993682e-05,
"loss": 0.036965576171875,
"memory(GiB)": 5.71,
"step": 1500,
"train_speed(iter/s)": 0.980261
},
{
"epoch": 0.017076211130274414,
"grad_norm": 0.29022017121315,
"learning_rate": 3.414717432132491e-05,
"loss": 0.030718547821044922,
"memory(GiB)": 5.71,
"step": 2000,
"train_speed(iter/s)": 1.013563
},
{
"epoch": 0.02134526391284302,
"grad_norm": 0.2701531946659088,
"learning_rate": 4.268396790165614e-05,
"loss": 0.026950979232788087,
"memory(GiB)": 5.71,
"step": 2500,
"train_speed(iter/s)": 1.038614
},
{
"epoch": 0.025614316695411622,
"grad_norm": 0.1944938600063324,
"learning_rate": 5.1220761481987364e-05,
"loss": 0.025618917465209962,
"memory(GiB)": 5.71,
"step": 3000,
"train_speed(iter/s)": 1.081032
},
{
"epoch": 0.029883369477980225,
"grad_norm": 0.15646104514598846,
"learning_rate": 5.9757555062318595e-05,
"loss": 0.0239105281829834,
"memory(GiB)": 5.71,
"step": 3500,
"train_speed(iter/s)": 1.023247
},
{
"epoch": 0.03415242226054883,
"grad_norm": 0.12373720854520798,
"learning_rate": 6.829434864264982e-05,
"loss": 0.021082000732421877,
"memory(GiB)": 5.71,
"step": 4000,
"train_speed(iter/s)": 0.971265
},
{
"epoch": 0.03842147504311743,
"grad_norm": 0.11245039105415344,
"learning_rate": 7.683114222298106e-05,
"loss": 0.020011337280273437,
"memory(GiB)": 5.71,
"step": 4500,
"train_speed(iter/s)": 0.934224
},
{
"epoch": 0.04269052782568604,
"grad_norm": 0.12453507632017136,
"learning_rate": 8.536793580331228e-05,
"loss": 0.019226245880126953,
"memory(GiB)": 5.71,
"step": 5000,
"train_speed(iter/s)": 0.906199
},
{
"epoch": 0.04695958060825464,
"grad_norm": 0.08364573866128922,
"learning_rate": 9.39047293836435e-05,
"loss": 0.018570756912231444,
"memory(GiB)": 5.71,
"step": 5500,
"train_speed(iter/s)": 0.887629
},
{
"epoch": 0.051228633390823244,
"grad_norm": 0.08422534167766571,
"learning_rate": 9.999959243761552e-05,
"loss": 0.018008022308349608,
"memory(GiB)": 5.71,
"step": 6000,
"train_speed(iter/s)": 0.882382
},
{
"epoch": 0.05549768617339185,
"grad_norm": 0.0722428560256958,
"learning_rate": 9.999175989726746e-05,
"loss": 0.016920005798339844,
"memory(GiB)": 5.71,
"step": 6500,
"train_speed(iter/s)": 0.887128
},
{
"epoch": 0.05976673895596045,
"grad_norm": 0.0696873590350151,
"learning_rate": 9.997396381339795e-05,
"loss": 0.016275758743286132,
"memory(GiB)": 5.71,
"step": 7000,
"train_speed(iter/s)": 0.896874
},
{
"epoch": 0.06403579173852905,
"grad_norm": 0.0715385377407074,
"learning_rate": 9.994620773283261e-05,
"loss": 0.03211669921875,
"memory(GiB)": 5.71,
"step": 7500,
"train_speed(iter/s)": 0.906916
},
{
"epoch": 0.06830484452109765,
"grad_norm": 0.07314470410346985,
"learning_rate": 9.990849718746144e-05,
"loss": 0.014735573768615723,
"memory(GiB)": 5.71,
"step": 8000,
"train_speed(iter/s)": 0.915054
},
{
"epoch": 0.07257389730366626,
"grad_norm": 0.0624830424785614,
"learning_rate": 9.986083969313632e-05,
"loss": 0.014247347831726074,
"memory(GiB)": 5.71,
"step": 8500,
"train_speed(iter/s)": 0.923566
},
{
"epoch": 0.07684295008623486,
"grad_norm": 0.054315660148859024,
"learning_rate": 9.980324474817292e-05,
"loss": 0.014038642883300782,
"memory(GiB)": 5.71,
"step": 9000,
"train_speed(iter/s)": 0.931957
},
{
"epoch": 0.08111200286880348,
"grad_norm": 0.04966143146157265,
"learning_rate": 9.973572383145782e-05,
"loss": 0.013697422027587891,
"memory(GiB)": 5.71,
"step": 9500,
"train_speed(iter/s)": 0.939331
},
{
"epoch": 0.08538105565137208,
"grad_norm": 0.055357128381729126,
"learning_rate": 9.965829040016061e-05,
"loss": 0.013380534172058105,
"memory(GiB)": 5.71,
"step": 10000,
"train_speed(iter/s)": 0.946434
},
{
"epoch": 0.08965010843394068,
"grad_norm": 0.05234465003013611,
"learning_rate": 9.957095988705193e-05,
"loss": 0.013177488327026367,
"memory(GiB)": 5.71,
"step": 10500,
"train_speed(iter/s)": 0.960222
},
{
"epoch": 0.09391916121650928,
"grad_norm": 0.053024690598249435,
"learning_rate": 9.947374969742755e-05,
"loss": 0.013030742645263672,
"memory(GiB)": 5.71,
"step": 11000,
"train_speed(iter/s)": 0.973308
},
{
"epoch": 0.09818821399907789,
"grad_norm": 0.04746083542704582,
"learning_rate": 9.936667920563951e-05,
"loss": 0.012802671432495118,
"memory(GiB)": 5.71,
"step": 11500,
"train_speed(iter/s)": 0.985248
},
{
"epoch": 0.10245726678164649,
"grad_norm": 0.045355405658483505,
"learning_rate": 9.924976975123472e-05,
"loss": 0.012366548538208007,
"memory(GiB)": 5.71,
"step": 12000,
"train_speed(iter/s)": 0.996787
},
{
"epoch": 0.10672631956421509,
"grad_norm": 0.0430605448782444,
"learning_rate": 9.912304463470185e-05,
"loss": 0.018324718475341797,
"memory(GiB)": 5.71,
"step": 12500,
"train_speed(iter/s)": 1.007619
},
{
"epoch": 0.1109953723467837,
"grad_norm": 0.03760723024606705,
"learning_rate": 9.89865291128276e-05,
"loss": 0.011288459777832032,
"memory(GiB)": 5.71,
"step": 13000,
"train_speed(iter/s)": 1.017831
},
{
"epoch": 0.1152644251293523,
"grad_norm": 0.04123725742101669,
"learning_rate": 9.884025039366274e-05,
"loss": 0.011067386627197265,
"memory(GiB)": 5.71,
"step": 13500,
"train_speed(iter/s)": 1.027479
},
{
"epoch": 0.1195334779119209,
"grad_norm": 0.046554189175367355,
"learning_rate": 9.868423763109962e-05,
"loss": 0.010972289085388184,
"memory(GiB)": 5.71,
"step": 14000,
"train_speed(iter/s)": 1.0366
},
{
"epoch": 0.1238025306944895,
"grad_norm": 0.04529291018843651,
"learning_rate": 9.851852191906155e-05,
"loss": 0.010918002128601074,
"memory(GiB)": 5.71,
"step": 14500,
"train_speed(iter/s)": 1.045276
},
{
"epoch": 0.1280715834770581,
"grad_norm": 0.039391227066516876,
"learning_rate": 9.834313628530574e-05,
"loss": 0.010901000022888184,
"memory(GiB)": 5.71,
"step": 15000,
"train_speed(iter/s)": 1.053467
},
{
"epoch": 0.13234063625962672,
"grad_norm": 0.050082143396139145,
"learning_rate": 9.81581156848408e-05,
"loss": 0.010822629928588868,
"memory(GiB)": 5.71,
"step": 15500,
"train_speed(iter/s)": 1.061046
},
{
"epoch": 0.1366096890421953,
"grad_norm": 0.04181528836488724,
"learning_rate": 9.79634969929599e-05,
"loss": 0.01061639404296875,
"memory(GiB)": 5.71,
"step": 16000,
"train_speed(iter/s)": 1.068454
},
{
"epoch": 0.14087874182476393,
"grad_norm": 0.037736013531684875,
"learning_rate": 9.775931899789159e-05,
"loss": 0.01050139808654785,
"memory(GiB)": 5.71,
"step": 16500,
"train_speed(iter/s)": 1.075513
},
{
"epoch": 0.14514779460733251,
"grad_norm": 0.03446267917752266,
"learning_rate": 9.754562239306902e-05,
"loss": 0.010301560401916503,
"memory(GiB)": 5.71,
"step": 17000,
"train_speed(iter/s)": 1.08225
},
{
"epoch": 0.14941684738990113,
"grad_norm": 0.035218626260757446,
"learning_rate": 9.732244976901965e-05,
"loss": 0.010299022674560548,
"memory(GiB)": 5.71,
"step": 17500,
"train_speed(iter/s)": 1.088656
},
{
"epoch": 0.15368590017246972,
"grad_norm": 0.034140028059482574,
"learning_rate": 9.708984560487677e-05,
"loss": 0.010085094451904296,
"memory(GiB)": 5.71,
"step": 18000,
"train_speed(iter/s)": 1.094769
},
{
"epoch": 0.15795495295503834,
"grad_norm": 0.040227197110652924,
"learning_rate": 9.684785625951468e-05,
"loss": 0.009981593132019044,
"memory(GiB)": 5.71,
"step": 18500,
"train_speed(iter/s)": 1.100633
},
{
"epoch": 0.16222400573760695,
"grad_norm": 0.03376320004463196,
"learning_rate": 9.659652996230917e-05,
"loss": 0.009874713897705079,
"memory(GiB)": 5.71,
"step": 19000,
"train_speed(iter/s)": 1.106243
},
{
"epoch": 0.16649305852017554,
"grad_norm": 0.03337237238883972,
"learning_rate": 9.633591680352522e-05,
"loss": 0.009621439933776855,
"memory(GiB)": 5.71,
"step": 19500,
"train_speed(iter/s)": 1.111407
},
{
"epoch": 0.17076211130274416,
"grad_norm": 0.03156784921884537,
"learning_rate": 9.606606872433384e-05,
"loss": 0.01175856876373291,
"memory(GiB)": 5.73,
"step": 20000,
"train_speed(iter/s)": 1.116578
},
{
"epoch": 0.17503116408531275,
"grad_norm": 0.06789804250001907,
"learning_rate": 9.578703950645998e-05,
"loss": 0.008876850128173828,
"memory(GiB)": 5.73,
"step": 20500,
"train_speed(iter/s)": 1.121386
},
{
"epoch": 0.17930021686788136,
"grad_norm": 0.029559865593910217,
"learning_rate": 9.549888476146366e-05,
"loss": 0.008808825492858887,
"memory(GiB)": 5.73,
"step": 21000,
"train_speed(iter/s)": 1.126144
},
{
"epoch": 0.18356926965044995,
"grad_norm": 0.027149997651576996,
"learning_rate": 9.52016619196564e-05,
"loss": 0.008746042251586914,
"memory(GiB)": 5.73,
"step": 21500,
"train_speed(iter/s)": 1.130706
},
{
"epoch": 0.18783832243301857,
"grad_norm": 0.031270887702703476,
"learning_rate": 9.489543021865507e-05,
"loss": 0.008727970123291016,
"memory(GiB)": 5.73,
"step": 22000,
"train_speed(iter/s)": 1.135093
},
{
"epoch": 0.19210737521558716,
"grad_norm": 0.03134565427899361,
"learning_rate": 9.458025069157563e-05,
"loss": 0.008822738647460937,
"memory(GiB)": 5.73,
"step": 22500,
"train_speed(iter/s)": 1.139318
},
{
"epoch": 0.19637642799815577,
"grad_norm": 0.02970048598945141,
"learning_rate": 9.425618615486908e-05,
"loss": 0.008724775314331055,
"memory(GiB)": 5.73,
"step": 23000,
"train_speed(iter/s)": 1.143306
},
{
"epoch": 0.20064548078072436,
"grad_norm": 0.037413984537124634,
"learning_rate": 9.392330119580186e-05,
"loss": 0.008617961883544922,
"memory(GiB)": 5.73,
"step": 23500,
"train_speed(iter/s)": 1.147167
},
{
"epoch": 0.20491453356329298,
"grad_norm": 0.031085532158613205,
"learning_rate": 9.358166215958333e-05,
"loss": 0.008613507270812988,
"memory(GiB)": 5.73,
"step": 24000,
"train_speed(iter/s)": 1.150974
},
{
"epoch": 0.2091835863458616,
"grad_norm": 0.030068758875131607,
"learning_rate": 9.323133713614297e-05,
"loss": 0.008516620635986329,
"memory(GiB)": 5.73,
"step": 24500,
"train_speed(iter/s)": 1.154635
},
{
"epoch": 0.21345263912843018,
"grad_norm": 0.030049536377191544,
"learning_rate": 9.287239594655976e-05,
"loss": 0.00915114688873291,
"memory(GiB)": 5.73,
"step": 25000,
"train_speed(iter/s)": 1.158172
},
{
"epoch": 0.2177216919109988,
"grad_norm": 0.03236347809433937,
"learning_rate": 9.250491012914668e-05,
"loss": 0.008387946128845214,
"memory(GiB)": 5.73,
"step": 25500,
"train_speed(iter/s)": 1.161598
},
{
"epoch": 0.2219907446935674,
"grad_norm": 0.028227701783180237,
"learning_rate": 9.212895292519276e-05,
"loss": 0.008091423034667969,
"memory(GiB)": 5.73,
"step": 26000,
"train_speed(iter/s)": 1.164914
},
{
"epoch": 0.226259797476136,
"grad_norm": 0.026642831042408943,
"learning_rate": 9.17445992643658e-05,
"loss": 0.008073025703430176,
"memory(GiB)": 5.73,
"step": 26500,
"train_speed(iter/s)": 1.168129
},
{
"epoch": 0.2305288502587046,
"grad_norm": 0.029216019436717033,
"learning_rate": 9.135192574977873e-05,
"loss": 0.008088951110839843,
"memory(GiB)": 5.73,
"step": 27000,
"train_speed(iter/s)": 1.169593
},
{
"epoch": 0.2347979030412732,
"grad_norm": 0.02682262659072876,
"learning_rate": 9.09510106427222e-05,
"loss": 0.007971211433410645,
"memory(GiB)": 5.73,
"step": 27500,
"train_speed(iter/s)": 1.172618
},
{
"epoch": 0.2390669558238418,
"grad_norm": 0.027296727523207664,
"learning_rate": 9.054193384706688e-05,
"loss": 0.007928550243377686,
"memory(GiB)": 5.73,
"step": 28000,
"train_speed(iter/s)": 1.165921
},
{
"epoch": 0.24333600860641041,
"grad_norm": 0.03066374734044075,
"learning_rate": 9.012477689333834e-05,
"loss": 0.007805256366729736,
"memory(GiB)": 5.73,
"step": 28500,
"train_speed(iter/s)": 1.155767
},
{
"epoch": 0.247605061388979,
"grad_norm": 0.027467776089906693,
"learning_rate": 8.96996229224676e-05,
"loss": 0.007825798034667968,
"memory(GiB)": 5.73,
"step": 29000,
"train_speed(iter/s)": 1.147611
},
{
"epoch": 0.2518741141715476,
"grad_norm": 0.028940001502633095,
"learning_rate": 8.926655666922102e-05,
"loss": 0.007748476028442383,
"memory(GiB)": 5.73,
"step": 29500,
"train_speed(iter/s)": 1.139656
},
{
"epoch": 0.2561431669541162,
"grad_norm": 0.0281364805996418,
"learning_rate": 8.882566444531216e-05,
"loss": 0.007644564628601074,
"memory(GiB)": 5.73,
"step": 30000,
"train_speed(iter/s)": 1.131607
},
{
"epoch": 0.2604122197366848,
"grad_norm": 0.03272758424282074,
"learning_rate": 8.837703412219962e-05,
"loss": 0.007614383697509766,
"memory(GiB)": 5.73,
"step": 30500,
"train_speed(iter/s)": 1.123907
},
{
"epoch": 0.26468127251925344,
"grad_norm": 0.033637482672929764,
"learning_rate": 8.7920755113574e-05,
"loss": 0.007485725402832031,
"memory(GiB)": 5.73,
"step": 31000,
"train_speed(iter/s)": 1.118652
},
{
"epoch": 0.26895032530182206,
"grad_norm": 0.028677962720394135,
"learning_rate": 8.745691835753724e-05,
"loss": 0.007466458320617676,
"memory(GiB)": 5.73,
"step": 31500,
"train_speed(iter/s)": 1.114283
},
{
"epoch": 0.2732193780843906,
"grad_norm": 0.026501238346099854,
"learning_rate": 8.698561629847851e-05,
"loss": 0.00739455795288086,
"memory(GiB)": 5.73,
"step": 32000,
"train_speed(iter/s)": 1.110807
},
{
"epoch": 0.27748843086695923,
"grad_norm": 0.03185174614191055,
"learning_rate": 8.650694286864957e-05,
"loss": 0.007317279815673828,
"memory(GiB)": 5.73,
"step": 32500,
"train_speed(iter/s)": 1.109657
},
{
"epoch": 0.28175748364952785,
"grad_norm": 0.031619079411029816,
"learning_rate": 8.602099346944379e-05,
"loss": 0.007236574649810791,
"memory(GiB)": 5.73,
"step": 33000,
"train_speed(iter/s)": 1.109762
},
{
"epoch": 0.28602653643209647,
"grad_norm": 0.028590602800250053,
"learning_rate": 8.552786495238226e-05,
"loss": 0.00712824535369873,
"memory(GiB)": 5.73,
"step": 33500,
"train_speed(iter/s)": 1.10959
},
{
"epoch": 0.29029558921466503,
"grad_norm": 0.026914609596133232,
"learning_rate": 8.502765559981091e-05,
"loss": 0.007133237838745117,
"memory(GiB)": 5.73,
"step": 34000,
"train_speed(iter/s)": 1.108437
},
{
"epoch": 0.29456464199723364,
"grad_norm": 0.03186658397316933,
"learning_rate": 8.452046510531258e-05,
"loss": 0.00705194091796875,
"memory(GiB)": 5.73,
"step": 34500,
"train_speed(iter/s)": 1.111376
},
{
"epoch": 0.29883369477980226,
"grad_norm": 0.022866345942020416,
"learning_rate": 8.400639455383754e-05,
"loss": 0.006991560935974121,
"memory(GiB)": 5.73,
"step": 35000,
"train_speed(iter/s)": 1.103171
},
{
"epoch": 0.3031027475623709,
"grad_norm": 0.027075253427028656,
"learning_rate": 8.348554640155709e-05,
"loss": 0.006916217803955078,
"memory(GiB)": 5.73,
"step": 35500,
"train_speed(iter/s)": 1.094784
},
{
"epoch": 0.30737180034493944,
"grad_norm": 0.027598075568675995,
"learning_rate": 8.295802445544345e-05,
"loss": 0.0068712844848632815,
"memory(GiB)": 5.73,
"step": 36000,
"train_speed(iter/s)": 1.087037
},
{
"epoch": 0.31164085312750806,
"grad_norm": 0.022508256137371063,
"learning_rate": 8.242393385258083e-05,
"loss": 0.006878099918365479,
"memory(GiB)": 5.73,
"step": 36500,
"train_speed(iter/s)": 1.080097
},
{
"epoch": 0.31590990591007667,
"grad_norm": 0.02388549968600273,
"learning_rate": 8.188338103921109e-05,
"loss": 0.006974416732788086,
"memory(GiB)": 5.73,
"step": 37000,
"train_speed(iter/s)": 1.073446
},
{
"epoch": 0.3201789586926453,
"grad_norm": 0.025459734722971916,
"learning_rate": 8.13364737495187e-05,
"loss": 0.0067239184379577635,
"memory(GiB)": 5.73,
"step": 37500,
"train_speed(iter/s)": 1.067642
},
{
"epoch": 0.3244480114752139,
"grad_norm": 0.023768454790115356,
"learning_rate": 8.078332098415881e-05,
"loss": 0.006635515213012695,
"memory(GiB)": 5.73,
"step": 38000,
"train_speed(iter/s)": 1.067385
},
{
"epoch": 0.32871706425778247,
"grad_norm": 0.028179064393043518,
"learning_rate": 8.022403298853317e-05,
"loss": 0.00661515998840332,
"memory(GiB)": 5.73,
"step": 38500,
"train_speed(iter/s)": 1.067485
},
{
"epoch": 0.3329861170403511,
"grad_norm": 0.026124022901058197,
"learning_rate": 7.965872123081765e-05,
"loss": 0.006523737907409668,
"memory(GiB)": 5.73,
"step": 39000,
"train_speed(iter/s)": 1.067501
},
{
"epoch": 0.3372551698229197,
"grad_norm": 0.02668868564069271,
"learning_rate": 7.908749837974632e-05,
"loss": 0.006474626541137695,
"memory(GiB)": 5.73,
"step": 39500,
"train_speed(iter/s)": 1.067703
},
{
"epoch": 0.3415242226054883,
"grad_norm": 0.02441154234111309,
"learning_rate": 7.851047828215611e-05,
"loss": 0.006419078826904297,
"memory(GiB)": 5.73,
"step": 40000,
"train_speed(iter/s)": 1.06808
},
{
"epoch": 0.3457932753880569,
"grad_norm": 0.0254750307649374,
"learning_rate": 7.792777594029674e-05,
"loss": 0.006350691795349121,
"memory(GiB)": 5.73,
"step": 40500,
"train_speed(iter/s)": 1.068683
},
{
"epoch": 0.3500623281706255,
"grad_norm": 0.027933409437537193,
"learning_rate": 7.73395074889103e-05,
"loss": 0.006355803966522217,
"memory(GiB)": 5.73,
"step": 41000,
"train_speed(iter/s)": 1.064808
},
{
"epoch": 0.3543313809531941,
"grad_norm": 0.022553391754627228,
"learning_rate": 7.67457901720852e-05,
"loss": 0.006336944103240967,
"memory(GiB)": 5.73,
"step": 41500,
"train_speed(iter/s)": 1.060284
},
{
"epoch": 0.3586004337357627,
"grad_norm": 0.027581321075558662,
"learning_rate": 7.614674231988903e-05,
"loss": 0.00619974422454834,
"memory(GiB)": 5.73,
"step": 42000,
"train_speed(iter/s)": 1.055658
},
{
"epoch": 0.3628694865183313,
"grad_norm": 0.02141967974603176,
"learning_rate": 7.554248332478485e-05,
"loss": 0.006249521732330322,
"memory(GiB)": 5.73,
"step": 42500,
"train_speed(iter/s)": 1.051341
},
{
"epoch": 0.3671385393008999,
"grad_norm": 0.025843387469649315,
"learning_rate": 7.49331336178358e-05,
"loss": 0.006162589550018311,
"memory(GiB)": 5.73,
"step": 43000,
"train_speed(iter/s)": 1.046866
},
{
"epoch": 0.3714075920834685,
"grad_norm": 0.02431940846145153,
"learning_rate": 7.431881464470293e-05,
"loss": 0.0060729503631591795,
"memory(GiB)": 5.73,
"step": 43500,
"train_speed(iter/s)": 1.042554
},
{
"epoch": 0.37567664486603713,
"grad_norm": 0.0244905948638916,
"learning_rate": 7.369964884144047e-05,
"loss": 0.006033665180206299,
"memory(GiB)": 5.73,
"step": 44000,
"train_speed(iter/s)": 1.041578
},
{
"epoch": 0.37994569764860575,
"grad_norm": 0.02309691719710827,
"learning_rate": 7.307575961009385e-05,
"loss": 0.006005731582641602,
"memory(GiB)": 5.73,
"step": 44500,
"train_speed(iter/s)": 1.041875
},
{
"epoch": 0.3842147504311743,
"grad_norm": 0.023321352899074554,
"learning_rate": 7.24472712941053e-05,
"loss": 0.005931224346160889,
"memory(GiB)": 5.73,
"step": 45000,
"train_speed(iter/s)": 1.044453
},
{
"epoch": 0.38848380321374293,
"grad_norm": 0.024199847131967545,
"learning_rate": 7.181430915353171e-05,
"loss": 0.0059114408493041995,
"memory(GiB)": 5.73,
"step": 45500,
"train_speed(iter/s)": 1.047147
},
{
"epoch": 0.39275285599631155,
"grad_norm": 0.02660815231502056,
"learning_rate": 7.117699934007987e-05,
"loss": 0.005867915630340576,
"memory(GiB)": 5.73,
"step": 46000,
"train_speed(iter/s)": 1.049798
},
{
"epoch": 0.39702190877888016,
"grad_norm": 0.02538706362247467,
"learning_rate": 7.053546887196391e-05,
"loss": 0.005895719528198242,
"memory(GiB)": 5.73,
"step": 46500,
"train_speed(iter/s)": 1.052318
},
{
"epoch": 0.4012909615614487,
"grad_norm": 0.023992260918021202,
"learning_rate": 6.988984560859009e-05,
"loss": 0.005823767662048339,
"memory(GiB)": 5.73,
"step": 47000,
"train_speed(iter/s)": 1.054874
},
{
"epoch": 0.40556001434401734,
"grad_norm": 0.024961460381746292,
"learning_rate": 6.924025822507398e-05,
"loss": 0.005796549797058105,
"memory(GiB)": 5.73,
"step": 47500,
"train_speed(iter/s)": 1.057392
},
{
"epoch": 0.40982906712658596,
"grad_norm": 0.026839323341846466,
"learning_rate": 6.858683618659509e-05,
"loss": 0.0057229394912719726,
"memory(GiB)": 5.73,
"step": 48000,
"train_speed(iter/s)": 1.059871
},
{
"epoch": 0.41409811990915457,
"grad_norm": 0.026930488646030426,
"learning_rate": 6.792970972259381e-05,
"loss": 0.005688785552978515,
"memory(GiB)": 5.73,
"step": 48500,
"train_speed(iter/s)": 1.062309
},
{
"epoch": 0.4183671726917232,
"grad_norm": 0.024773526936769485,
"learning_rate": 6.726900980081639e-05,
"loss": 0.005612356185913086,
"memory(GiB)": 5.73,
"step": 49000,
"train_speed(iter/s)": 1.06471
},
{
"epoch": 0.42263622547429175,
"grad_norm": 0.025835830718278885,
"learning_rate": 6.660486810121244e-05,
"loss": 0.005570381164550781,
"memory(GiB)": 5.73,
"step": 49500,
"train_speed(iter/s)": 1.067072
},
{
"epoch": 0.42690527825686037,
"grad_norm": 0.028116557747125626,
"learning_rate": 6.593741698969073e-05,
"loss": 0.005553098201751709,
"memory(GiB)": 5.73,
"step": 50000,
"train_speed(iter/s)": 1.069395
},
{
"epoch": 0.431174331039429,
"grad_norm": 0.026658741757273674,
"learning_rate": 6.526678949173808e-05,
"loss": 0.005453477859497071,
"memory(GiB)": 5.73,
"step": 50500,
"train_speed(iter/s)": 1.065559
},
{
"epoch": 0.4354433838219976,
"grad_norm": 0.02522198110818863,
"learning_rate": 6.459311926590695e-05,
"loss": 0.005405562877655029,
"memory(GiB)": 5.73,
"step": 51000,
"train_speed(iter/s)": 1.061202
},
{
"epoch": 0.43971243660456616,
"grad_norm": 0.019938671961426735,
"learning_rate": 6.391654057717676e-05,
"loss": 0.005375346183776855,
"memory(GiB)": 5.73,
"step": 51500,
"train_speed(iter/s)": 1.05697
},
{
"epoch": 0.4439814893871348,
"grad_norm": 0.02449255809187889,
"learning_rate": 6.32371882701944e-05,
"loss": 0.00538975715637207,
"memory(GiB)": 5.73,
"step": 52000,
"train_speed(iter/s)": 1.053086
},
{
"epoch": 0.4482505421697034,
"grad_norm": 0.027349578216671944,
"learning_rate": 6.25551977423992e-05,
"loss": 0.005338613510131836,
"memory(GiB)": 5.73,
"step": 52500,
"train_speed(iter/s)": 1.049655
},
{
"epoch": 0.452519594952272,
"grad_norm": 0.02677008882164955,
"learning_rate": 6.187070491703767e-05,
"loss": 0.005392338752746582,
"memory(GiB)": 5.73,
"step": 53000,
"train_speed(iter/s)": 1.046434
},
{
"epoch": 0.45678864773484057,
"grad_norm": 0.021387379616498947,
"learning_rate": 6.118384621607356e-05,
"loss": 0.0052757196426391605,
"memory(GiB)": 5.73,
"step": 53500,
"train_speed(iter/s)": 1.043484
},
{
"epoch": 0.4610577005174092,
"grad_norm": 0.021920237690210342,
"learning_rate": 6.0494758532998397e-05,
"loss": 0.0052754092216491695,
"memory(GiB)": 5.73,
"step": 54000,
"train_speed(iter/s)": 1.040652
},
{
"epoch": 0.4653267532999778,
"grad_norm": 0.02255011908710003,
"learning_rate": 5.980357920554813e-05,
"loss": 0.005176177024841308,
"memory(GiB)": 5.73,
"step": 54500,
"train_speed(iter/s)": 1.03761
},
{
"epoch": 0.4695958060825464,
"grad_norm": 0.023933693766593933,
"learning_rate": 5.91104459883312e-05,
"loss": 0.00518220043182373,
"memory(GiB)": 5.73,
"step": 55000,
"train_speed(iter/s)": 1.03464
},
{
"epoch": 0.47386485886511504,
"grad_norm": 0.02640974149107933,
"learning_rate": 5.8415497025373545e-05,
"loss": 0.0051289405822753905,
"memory(GiB)": 5.73,
"step": 55500,
"train_speed(iter/s)": 1.032562
},
{
"epoch": 0.4781339116476836,
"grad_norm": 0.027417296543717384,
"learning_rate": 5.771887082258598e-05,
"loss": 0.005091516494750976,
"memory(GiB)": 5.73,
"step": 56000,
"train_speed(iter/s)": 1.031309
},
{
"epoch": 0.4824029644302522,
"grad_norm": 0.02626318484544754,
"learning_rate": 5.7020706220159446e-05,
"loss": 0.005014698505401611,
"memory(GiB)": 5.73,
"step": 56500,
"train_speed(iter/s)": 1.030475
},
{
"epoch": 0.48667201721282083,
"grad_norm": 0.022486470639705658,
"learning_rate": 5.6321142364893655e-05,
"loss": 0.00502289867401123,
"memory(GiB)": 5.73,
"step": 57000,
"train_speed(iter/s)": 1.029867
},
{
"epoch": 0.49094106999538945,
"grad_norm": 0.024762239307165146,
"learning_rate": 5.562031868246459e-05,
"loss": 0.004976710319519043,
"memory(GiB)": 5.73,
"step": 57500,
"train_speed(iter/s)": 1.029133
},
{
"epoch": 0.495210122777958,
"grad_norm": 0.02197747305035591,
"learning_rate": 5.49183748496365e-05,
"loss": 0.004930309295654297,
"memory(GiB)": 5.73,
"step": 58000,
"train_speed(iter/s)": 1.03126
},
{
"epoch": 0.4994791755605266,
"grad_norm": 0.017993444576859474,
"learning_rate": 5.421545076642376e-05,
"loss": 0.004885564804077149,
"memory(GiB)": 5.73,
"step": 58500,
"train_speed(iter/s)": 1.033407
},
{
"epoch": 0.5037482283430952,
"grad_norm": 0.023290056735277176,
"learning_rate": 5.351168652820825e-05,
"loss": 0.004815481662750244,
"memory(GiB)": 5.73,
"step": 59000,
"train_speed(iter/s)": 1.035534
},
{
"epoch": 0.5080172811256638,
"grad_norm": 0.02278745174407959,
"learning_rate": 5.2807222397817946e-05,
"loss": 0.0048018951416015625,
"memory(GiB)": 5.73,
"step": 59500,
"train_speed(iter/s)": 1.037635
},
{
"epoch": 0.5122863339082324,
"grad_norm": 0.01709616929292679,
"learning_rate": 5.210219877757185e-05,
"loss": 0.004790943622589111,
"memory(GiB)": 5.73,
"step": 60000,
"train_speed(iter/s)": 1.039708
},
{
"epoch": 0.516555386690801,
"grad_norm": 0.024141253903508186,
"learning_rate": 5.139675618129741e-05,
"loss": 0.0047971105575561526,
"memory(GiB)": 5.73,
"step": 60500,
"train_speed(iter/s)": 1.041714
},
{
"epoch": 0.5208244394733696,
"grad_norm": 0.025403697043657303,
"learning_rate": 5.069103520632558e-05,
"loss": 0.0046922645568847655,
"memory(GiB)": 5.73,
"step": 61000,
"train_speed(iter/s)": 1.043735
},
{
"epoch": 0.5250934922559383,
"grad_norm": 0.023161958903074265,
"learning_rate": 4.998517650546916e-05,
"loss": 0.0046929998397827145,
"memory(GiB)": 5.73,
"step": 61500,
"train_speed(iter/s)": 1.045718
},
{
"epoch": 0.5293625450385069,
"grad_norm": 0.022052627056837082,
"learning_rate": 4.927932075899032e-05,
"loss": 0.004638696193695068,
"memory(GiB)": 5.73,
"step": 62000,
"train_speed(iter/s)": 1.043334
},
{
"epoch": 0.5336315978210755,
"grad_norm": 0.023017114028334618,
"learning_rate": 4.857360864656229e-05,
"loss": 0.004680471420288086,
"memory(GiB)": 5.73,
"step": 62500,
"train_speed(iter/s)": 1.041039
},
{
"epoch": 0.5379006506036441,
"grad_norm": 0.023162037134170532,
"learning_rate": 4.7868180819231614e-05,
"loss": 0.004635006904602051,
"memory(GiB)": 5.73,
"step": 63000,
"train_speed(iter/s)": 1.039508
},
{
"epoch": 0.5421697033862126,
"grad_norm": 0.02154356613755226,
"learning_rate": 4.7163177871385713e-05,
"loss": 0.004594725131988525,
"memory(GiB)": 5.73,
"step": 63500,
"train_speed(iter/s)": 1.037869
},
{
"epoch": 0.5464387561687812,
"grad_norm": 0.024489399045705795,
"learning_rate": 4.6458740312731915e-05,
"loss": 0.004505970001220703,
"memory(GiB)": 5.73,
"step": 64000,
"train_speed(iter/s)": 1.036001
},
{
"epoch": 0.5507078089513499,
"grad_norm": 0.02230563387274742,
"learning_rate": 4.575500854029343e-05,
"loss": 0.004512208938598633,
"memory(GiB)": 5.73,
"step": 64500,
"train_speed(iter/s)": 1.034042
},
{
"epoch": 0.5549768617339185,
"grad_norm": 0.024222563952207565,
"learning_rate": 4.5052122810427655e-05,
"loss": 0.004453976154327393,
"memory(GiB)": 5.73,
"step": 65000,
"train_speed(iter/s)": 1.033383
},
{
"epoch": 0.5592459145164871,
"grad_norm": 0.024108612909913063,
"learning_rate": 4.435022321087251e-05,
"loss": 0.004433969497680664,
"memory(GiB)": 5.73,
"step": 65500,
"train_speed(iter/s)": 1.031504
},
{
"epoch": 0.5635149672990557,
"grad_norm": 0.02404128573834896,
"learning_rate": 4.3649449632826524e-05,
"loss": 0.004369840621948242,
"memory(GiB)": 5.73,
"step": 66000,
"train_speed(iter/s)": 1.029515
},
{
"epoch": 0.5677840200816243,
"grad_norm": 0.02122694067656994,
"learning_rate": 4.294994174306796e-05,
"loss": 0.00436569881439209,
"memory(GiB)": 5.73,
"step": 66500,
"train_speed(iter/s)": 1.027863
},
{
"epoch": 0.5720530728641929,
"grad_norm": 0.02231895923614502,
"learning_rate": 4.2251838956118646e-05,
"loss": 0.004324491500854492,
"memory(GiB)": 5.73,
"step": 67000,
"train_speed(iter/s)": 1.026447
},
{
"epoch": 0.5763221256467616,
"grad_norm": 0.01995609700679779,
"learning_rate": 4.1555280406458243e-05,
"loss": 0.004273086071014404,
"memory(GiB)": 5.73,
"step": 67500,
"train_speed(iter/s)": 1.025095
},
{
"epoch": 0.5805911784293301,
"grad_norm": 0.023028602823615074,
"learning_rate": 4.086040492079418e-05,
"loss": 0.004247576713562012,
"memory(GiB)": 5.73,
"step": 68000,
"train_speed(iter/s)": 1.024105
},
{
"epoch": 0.5848602312118987,
"grad_norm": 0.02473682351410389,
"learning_rate": 4.016735099039299e-05,
"loss": 0.004212839603424072,
"memory(GiB)": 5.73,
"step": 68500,
"train_speed(iter/s)": 1.025833
},
{
"epoch": 0.5891292839944673,
"grad_norm": 0.02591153420507908,
"learning_rate": 3.947625674347842e-05,
"loss": 0.004188227653503418,
"memory(GiB)": 5.73,
"step": 69000,
"train_speed(iter/s)": 1.027676
},
{
"epoch": 0.5933983367770359,
"grad_norm": 0.022372225299477577,
"learning_rate": 3.878725991770206e-05,
"loss": 0.00420154619216919,
"memory(GiB)": 5.73,
"step": 69500,
"train_speed(iter/s)": 1.029494
},
{
"epoch": 0.5976673895596045,
"grad_norm": 0.020848704501986504,
"learning_rate": 3.810049783269169e-05,
"loss": 0.004149648189544677,
"memory(GiB)": 5.73,
"step": 70000,
"train_speed(iter/s)": 1.031265
},
{
"epoch": 0.6019364423421731,
"grad_norm": 0.020646043121814728,
"learning_rate": 3.7416107362682874e-05,
"loss": 0.004120903968811035,
"memory(GiB)": 5.73,
"step": 70500,
"train_speed(iter/s)": 1.03288
},
{
"epoch": 0.6062054951247418,
"grad_norm": 0.02318960428237915,
"learning_rate": 3.673422490923957e-05,
"loss": 0.004070096492767334,
"memory(GiB)": 5.73,
"step": 71000,
"train_speed(iter/s)": 1.031156
},
{
"epoch": 0.6104745479073104,
"grad_norm": 0.01929691806435585,
"learning_rate": 3.605498637406871e-05,
"loss": 0.0040385212898254395,
"memory(GiB)": 5.73,
"step": 71500,
"train_speed(iter/s)": 1.029013
},
{
"epoch": 0.6147436006898789,
"grad_norm": 0.0221713837236166,
"learning_rate": 3.5378527131934415e-05,
"loss": 0.004040939807891846,
"memory(GiB)": 5.73,
"step": 72000,
"train_speed(iter/s)": 1.027147
},
{
"epoch": 0.6190126534724475,
"grad_norm": 0.026295281946659088,
"learning_rate": 3.470498200367745e-05,
"loss": 0.003968184471130371,
"memory(GiB)": 5.73,
"step": 72500,
"train_speed(iter/s)": 1.025599
},
{
"epoch": 0.6232817062550161,
"grad_norm": 0.022878218442201614,
"learning_rate": 3.403448522934484e-05,
"loss": 0.00394676160812378,
"memory(GiB)": 5.73,
"step": 73000,
"train_speed(iter/s)": 1.024666
},
{
"epoch": 0.6275507590375847,
"grad_norm": 0.017653649672865868,
"learning_rate": 3.3367170441435326e-05,
"loss": 0.003906076669692993,
"memory(GiB)": 5.73,
"step": 73500,
"train_speed(iter/s)": 1.023811
},
{
"epoch": 0.6318198118201533,
"grad_norm": 0.021848097443580627,
"learning_rate": 3.270317063826594e-05,
"loss": 0.0038814377784729005,
"memory(GiB)": 5.73,
"step": 74000,
"train_speed(iter/s)": 1.023855
},
{
"epoch": 0.636088864602722,
"grad_norm": 0.022029753774404526,
"learning_rate": 3.204261815746496e-05,
"loss": 0.003879170894622803,
"memory(GiB)": 5.73,
"step": 74500,
"train_speed(iter/s)": 1.024885
},
{
"epoch": 0.6403579173852906,
"grad_norm": 0.02403407171368599,
"learning_rate": 3.1385644649596445e-05,
"loss": 0.003841569900512695,
"memory(GiB)": 5.73,
"step": 75000,
"train_speed(iter/s)": 1.026583
},
{
"epoch": 0.6446269701678592,
"grad_norm": 0.023609979078173637,
"learning_rate": 3.073238105192191e-05,
"loss": 0.0038005766868591307,
"memory(GiB)": 5.73,
"step": 75500,
"train_speed(iter/s)": 1.028269
},
{
"epoch": 0.6488960229504278,
"grad_norm": 0.01760837249457836,
"learning_rate": 3.008295756230397e-05,
"loss": 0.0037522752285003664,
"memory(GiB)": 5.73,
"step": 76000,
"train_speed(iter/s)": 1.029938
},
{
"epoch": 0.6531650757329963,
"grad_norm": 0.021126747131347656,
"learning_rate": 2.943750361325739e-05,
"loss": 0.003741382837295532,
"memory(GiB)": 5.73,
"step": 76500,
"train_speed(iter/s)": 1.03159
},
{
"epoch": 0.6574341285155649,
"grad_norm": 0.02278253622353077,
"learning_rate": 2.879614784615281e-05,
"loss": 0.0037315216064453126,
"memory(GiB)": 5.73,
"step": 77000,
"train_speed(iter/s)": 1.033224
},
{
"epoch": 0.6617031812981335,
"grad_norm": 0.023599898442626,
"learning_rate": 2.8159018085577936e-05,
"loss": 0.0037167372703552247,
"memory(GiB)": 5.73,
"step": 77500,
"train_speed(iter/s)": 1.034809
},
{
"epoch": 0.6659722340807022,
"grad_norm": 0.02341049537062645,
"learning_rate": 2.752624131386169e-05,
"loss": 0.0036745924949645997,
"memory(GiB)": 5.73,
"step": 78000,
"train_speed(iter/s)": 1.036412
},
{
"epoch": 0.6702412868632708,
"grad_norm": 0.021224385127425194,
"learning_rate": 2.68979436457661e-05,
"loss": 0.0036270735263824465,
"memory(GiB)": 5.73,
"step": 78500,
"train_speed(iter/s)": 1.035171
},
{
"epoch": 0.6745103396458394,
"grad_norm": 0.02113701030611992,
"learning_rate": 2.6274250303351277e-05,
"loss": 0.003653192758560181,
"memory(GiB)": 5.73,
"step": 79000,
"train_speed(iter/s)": 1.033322
},
{
"epoch": 0.678779392428408,
"grad_norm": 0.02273395285010338,
"learning_rate": 2.5655285591018053e-05,
"loss": 0.003600950241088867,
"memory(GiB)": 5.73,
"step": 79500,
"train_speed(iter/s)": 1.03175
},
{
"epoch": 0.6830484452109766,
"grad_norm": 0.01843477226793766,
"learning_rate": 2.5041172870733688e-05,
"loss": 0.003576310634613037,
"memory(GiB)": 5.73,
"step": 80000,
"train_speed(iter/s)": 1.030732
},
{
"epoch": 0.6873174979935452,
"grad_norm": 0.021858269348740578,
"learning_rate": 2.4432034537445504e-05,
"loss": 0.0035532989501953125,
"memory(GiB)": 5.73,
"step": 80500,
"train_speed(iter/s)": 1.030234
},
{
"epoch": 0.6915865507761138,
"grad_norm": 0.02257091924548149,
"learning_rate": 2.3827991994686855e-05,
"loss": 0.0034713072776794435,
"memory(GiB)": 5.73,
"step": 81000,
"train_speed(iter/s)": 1.029758
},
{
"epoch": 0.6958556035586824,
"grad_norm": 0.02086802013218403,
"learning_rate": 2.3229165630381254e-05,
"loss": 0.0035013933181762694,
"memory(GiB)": 5.73,
"step": 81500,
"train_speed(iter/s)": 1.029855
},
{
"epoch": 0.700124656341251,
"grad_norm": 0.02159390039741993,
"learning_rate": 2.263567479284836e-05,
"loss": 0.0034512946605682374,
"memory(GiB)": 5.73,
"step": 82000,
"train_speed(iter/s)": 1.031396
},
{
"epoch": 0.7043937091238196,
"grad_norm": 0.02238837257027626,
"learning_rate": 2.2047637767017594e-05,
"loss": 0.0034342200756073,
"memory(GiB)": 5.73,
"step": 82500,
"train_speed(iter/s)": 1.029892
},
{
"epoch": 0.7086627619063882,
"grad_norm": 0.022957606241106987,
"learning_rate": 2.1465171750853386e-05,
"loss": 0.003412749528884888,
"memory(GiB)": 5.73,
"step": 83000,
"train_speed(iter/s)": 1.02831
},
{
"epoch": 0.7129318146889568,
"grad_norm": 0.019689923152327538,
"learning_rate": 2.0888392831997238e-05,
"loss": 0.00341141414642334,
"memory(GiB)": 5.73,
"step": 83500,
"train_speed(iter/s)": 1.026956
},
{
"epoch": 0.7172008674715254,
"grad_norm": 0.023503178730607033,
"learning_rate": 2.03174159646311e-05,
"loss": 0.0033840060234069822,
"memory(GiB)": 5.73,
"step": 84000,
"train_speed(iter/s)": 1.025661
},
{
"epoch": 0.7214699202540941,
"grad_norm": 0.02037668041884899,
"learning_rate": 1.9752354946566354e-05,
"loss": 0.0033505113124847412,
"memory(GiB)": 5.73,
"step": 84500,
"train_speed(iter/s)": 1.02442
},
{
"epoch": 0.7257389730366626,
"grad_norm": 0.025251047685742378,
"learning_rate": 1.9193322396563785e-05,
"loss": 0.0033303892612457277,
"memory(GiB)": 5.73,
"step": 85000,
"train_speed(iter/s)": 1.023539
},
{
"epoch": 0.7300080258192312,
"grad_norm": 0.01954658329486847,
"learning_rate": 1.8640429731887998e-05,
"loss": 0.003283708333969116,
"memory(GiB)": 5.73,
"step": 85500,
"train_speed(iter/s)": 1.023297
},
{
"epoch": 0.7342770786017998,
"grad_norm": 0.0188963171094656,
"learning_rate": 1.809378714610167e-05,
"loss": 0.003271867275238037,
"memory(GiB)": 5.73,
"step": 86000,
"train_speed(iter/s)": 1.023267
},
{
"epoch": 0.7385461313843684,
"grad_norm": 0.02359418198466301,
"learning_rate": 1.7553503587103505e-05,
"loss": 0.0032482266426086424,
"memory(GiB)": 5.73,
"step": 86500,
"train_speed(iter/s)": 1.023221
},
{
"epoch": 0.742815184166937,
"grad_norm": 0.020105060189962387,
"learning_rate": 1.701968673541458e-05,
"loss": 0.003266146183013916,
"memory(GiB)": 5.73,
"step": 87000,
"train_speed(iter/s)": 1.023208
},
{
"epoch": 0.7470842369495057,
"grad_norm": 0.018053073436021805,
"learning_rate": 1.649244298271714e-05,
"loss": 0.003204747676849365,
"memory(GiB)": 5.73,
"step": 87500,
"train_speed(iter/s)": 1.023206
},
{
"epoch": 0.7513532897320743,
"grad_norm": 0.021992964670062065,
"learning_rate": 1.5971877410650354e-05,
"loss": 0.0031999170780181883,
"memory(GiB)": 5.73,
"step": 88000,
"train_speed(iter/s)": 1.023363
},
{
"epoch": 0.7556223425146429,
"grad_norm": 0.022893013432621956,
"learning_rate": 1.545809376986727e-05,
"loss": 0.0031597645282745363,
"memory(GiB)": 5.73,
"step": 88500,
"train_speed(iter/s)": 1.023915
},
{
"epoch": 0.7598913952972115,
"grad_norm": 0.021105078980326653,
"learning_rate": 1.4951194459356693e-05,
"loss": 0.003171279191970825,
"memory(GiB)": 5.73,
"step": 89000,
"train_speed(iter/s)": 1.024754
},
{
"epoch": 0.76416044807978,
"grad_norm": 0.01919909007847309,
"learning_rate": 1.445128050603493e-05,
"loss": 0.0031237168312072752,
"memory(GiB)": 5.73,
"step": 89500,
"train_speed(iter/s)": 1.026165
},
{
"epoch": 0.7684295008623486,
"grad_norm": 0.02016974799335003,
"learning_rate": 1.39584515446106e-05,
"loss": 0.003100724697113037,
"memory(GiB)": 5.73,
"step": 90000,
"train_speed(iter/s)": 1.027578
}
],
"logging_steps": 500,
"max_steps": 117122,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 10000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.069758781932123e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}