stratplans's picture
Upload fine-tuned X3D generation LoRA adapter
928613c verified
raw
history blame
35.4 kB
{
"best_global_step": 1700,
"best_metric": 0.011158278211951256,
"best_model_checkpoint": "/output/x3d_finetuned_model/checkpoint-1500",
"epoch": 3.0,
"eval_steps": 100,
"global_step": 1782,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016842105263157894,
"grad_norm": 0.2378891110420227,
"learning_rate": 1.8e-05,
"loss": 0.8901,
"step": 10
},
{
"epoch": 0.03368421052631579,
"grad_norm": 0.18149839341640472,
"learning_rate": 3.8e-05,
"loss": 0.7969,
"step": 20
},
{
"epoch": 0.05052631578947368,
"grad_norm": 0.1667049527168274,
"learning_rate": 5.8e-05,
"loss": 0.6762,
"step": 30
},
{
"epoch": 0.06736842105263158,
"grad_norm": 0.15869387984275818,
"learning_rate": 7.800000000000001e-05,
"loss": 0.5698,
"step": 40
},
{
"epoch": 0.08421052631578947,
"grad_norm": 0.16361284255981445,
"learning_rate": 9.8e-05,
"loss": 0.5225,
"step": 50
},
{
"epoch": 0.10105263157894737,
"grad_norm": 0.14269448816776276,
"learning_rate": 0.000118,
"loss": 0.465,
"step": 60
},
{
"epoch": 0.11789473684210526,
"grad_norm": 0.18330006301403046,
"learning_rate": 0.000138,
"loss": 0.4562,
"step": 70
},
{
"epoch": 0.13473684210526315,
"grad_norm": 0.20621822774410248,
"learning_rate": 0.00015800000000000002,
"loss": 0.4291,
"step": 80
},
{
"epoch": 0.15157894736842106,
"grad_norm": 0.21223391592502594,
"learning_rate": 0.00017800000000000002,
"loss": 0.3939,
"step": 90
},
{
"epoch": 0.16842105263157894,
"grad_norm": 0.19762997329235077,
"learning_rate": 0.00019800000000000002,
"loss": 0.3753,
"step": 100
},
{
"epoch": 0.16842105263157894,
"eval_loss": 0.35540345311164856,
"eval_runtime": 224.1519,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 100
},
{
"epoch": 0.18526315789473685,
"grad_norm": 0.18955057859420776,
"learning_rate": 0.00019998587161376442,
"loss": 0.3407,
"step": 110
},
{
"epoch": 0.20210526315789473,
"grad_norm": 0.24190108478069305,
"learning_rate": 0.00019993703787315803,
"loss": 0.346,
"step": 120
},
{
"epoch": 0.21894736842105264,
"grad_norm": 0.1914621889591217,
"learning_rate": 0.00019985334138511237,
"loss": 0.3288,
"step": 130
},
{
"epoch": 0.23578947368421052,
"grad_norm": 0.1650117188692093,
"learning_rate": 0.00019973481134690592,
"loss": 0.2942,
"step": 140
},
{
"epoch": 0.25263157894736843,
"grad_norm": 0.22298051416873932,
"learning_rate": 0.00019958148910740063,
"loss": 0.2904,
"step": 150
},
{
"epoch": 0.2694736842105263,
"grad_norm": 0.24592551589012146,
"learning_rate": 0.0001993934281526176,
"loss": 0.2918,
"step": 160
},
{
"epoch": 0.2863157894736842,
"grad_norm": 0.19984672963619232,
"learning_rate": 0.00019917069408707848,
"loss": 0.2461,
"step": 170
},
{
"epoch": 0.3031578947368421,
"grad_norm": 0.22812137007713318,
"learning_rate": 0.00019891336461091966,
"loss": 0.2577,
"step": 180
},
{
"epoch": 0.32,
"grad_norm": 0.21963553130626678,
"learning_rate": 0.0001986215294927868,
"loss": 0.2267,
"step": 190
},
{
"epoch": 0.3368421052631579,
"grad_norm": 0.24446611106395721,
"learning_rate": 0.00019829529053851919,
"loss": 0.2412,
"step": 200
},
{
"epoch": 0.3368421052631579,
"eval_loss": 0.23451316356658936,
"eval_runtime": 224.1089,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 200
},
{
"epoch": 0.35368421052631577,
"grad_norm": 0.19283448159694672,
"learning_rate": 0.00019793476155563507,
"loss": 0.2324,
"step": 210
},
{
"epoch": 0.3705263157894737,
"grad_norm": 0.2276701033115387,
"learning_rate": 0.00019754006831363042,
"loss": 0.2055,
"step": 220
},
{
"epoch": 0.3873684210526316,
"grad_norm": 0.28741592168807983,
"learning_rate": 0.00019711134850010432,
"loss": 0.2068,
"step": 230
},
{
"epoch": 0.40421052631578946,
"grad_norm": 0.2498358190059662,
"learning_rate": 0.00019664875167272735,
"loss": 0.1945,
"step": 240
},
{
"epoch": 0.42105263157894735,
"grad_norm": 0.21750546991825104,
"learning_rate": 0.00019615243920706853,
"loss": 0.1811,
"step": 250
},
{
"epoch": 0.4378947368421053,
"grad_norm": 0.22553850710391998,
"learning_rate": 0.00019562258424030016,
"loss": 0.18,
"step": 260
},
{
"epoch": 0.45473684210526316,
"grad_norm": 0.2156210094690323,
"learning_rate": 0.00019505937161079927,
"loss": 0.1623,
"step": 270
},
{
"epoch": 0.47157894736842104,
"grad_norm": 0.3091757595539093,
"learning_rate": 0.0001944629977936673,
"loss": 0.1635,
"step": 280
},
{
"epoch": 0.4884210526315789,
"grad_norm": 0.23857639729976654,
"learning_rate": 0.0001938336708321904,
"loss": 0.1458,
"step": 290
},
{
"epoch": 0.5052631578947369,
"grad_norm": 0.2157812863588333,
"learning_rate": 0.0001931716102652641,
"loss": 0.1602,
"step": 300
},
{
"epoch": 0.5052631578947369,
"eval_loss": 0.14944693446159363,
"eval_runtime": 224.1903,
"eval_samples_per_second": 2.23,
"eval_steps_per_second": 2.23,
"step": 300
},
{
"epoch": 0.5221052631578947,
"grad_norm": 0.3185126483440399,
"learning_rate": 0.00019247704705080773,
"loss": 0.1531,
"step": 310
},
{
"epoch": 0.5389473684210526,
"grad_norm": 0.2601878345012665,
"learning_rate": 0.00019175022348519554,
"loss": 0.1403,
"step": 320
},
{
"epoch": 0.5557894736842105,
"grad_norm": 0.22638513147830963,
"learning_rate": 0.00019099139311873215,
"loss": 0.1176,
"step": 330
},
{
"epoch": 0.5726315789473684,
"grad_norm": 0.2953122854232788,
"learning_rate": 0.00019020082066720245,
"loss": 0.1192,
"step": 340
},
{
"epoch": 0.5894736842105263,
"grad_norm": 0.300016313791275,
"learning_rate": 0.00018937878191952606,
"loss": 0.1225,
"step": 350
},
{
"epoch": 0.6063157894736843,
"grad_norm": 0.21774296462535858,
"learning_rate": 0.0001885255636415494,
"loss": 0.1051,
"step": 360
},
{
"epoch": 0.6231578947368421,
"grad_norm": 0.19208350777626038,
"learning_rate": 0.00018764146347600793,
"loss": 0.0987,
"step": 370
},
{
"epoch": 0.64,
"grad_norm": 0.2341982126235962,
"learning_rate": 0.00018672678983869463,
"loss": 0.1071,
"step": 380
},
{
"epoch": 0.6568421052631579,
"grad_norm": 0.22858911752700806,
"learning_rate": 0.00018578186181086997,
"loss": 0.0983,
"step": 390
},
{
"epoch": 0.6736842105263158,
"grad_norm": 0.238422691822052,
"learning_rate": 0.0001848070090279512,
"loss": 0.0997,
"step": 400
},
{
"epoch": 0.6736842105263158,
"eval_loss": 0.09719416499137878,
"eval_runtime": 224.0873,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 400
},
{
"epoch": 0.6905263157894737,
"grad_norm": 0.23865894973278046,
"learning_rate": 0.00018380257156452013,
"loss": 0.0894,
"step": 410
},
{
"epoch": 0.7073684210526315,
"grad_norm": 0.21594248712062836,
"learning_rate": 0.00018276889981568906,
"loss": 0.0871,
"step": 420
},
{
"epoch": 0.7242105263157895,
"grad_norm": 0.26310935616493225,
"learning_rate": 0.0001817063543748664,
"loss": 0.0933,
"step": 430
},
{
"epoch": 0.7410526315789474,
"grad_norm": 0.20604920387268066,
"learning_rate": 0.00018061530590796475,
"loss": 0.0832,
"step": 440
},
{
"epoch": 0.7578947368421053,
"grad_norm": 0.2668643593788147,
"learning_rate": 0.0001794961350240951,
"loss": 0.0822,
"step": 450
},
{
"epoch": 0.7747368421052632,
"grad_norm": 0.1784186065196991,
"learning_rate": 0.00017834923214279268,
"loss": 0.0783,
"step": 460
},
{
"epoch": 0.791578947368421,
"grad_norm": 0.2419017106294632,
"learning_rate": 0.00017717499735781983,
"loss": 0.0727,
"step": 470
},
{
"epoch": 0.8084210526315789,
"grad_norm": 0.25795748829841614,
"learning_rate": 0.00017597384029759475,
"loss": 0.0765,
"step": 480
},
{
"epoch": 0.8252631578947368,
"grad_norm": 0.25549963116645813,
"learning_rate": 0.00017474617998229373,
"loss": 0.0678,
"step": 490
},
{
"epoch": 0.8421052631578947,
"grad_norm": 0.3272872865200043,
"learning_rate": 0.00017349244467767708,
"loss": 0.0741,
"step": 500
},
{
"epoch": 0.8421052631578947,
"eval_loss": 0.0688847005367279,
"eval_runtime": 224.164,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 500
},
{
"epoch": 0.8589473684210527,
"grad_norm": 0.20569807291030884,
"learning_rate": 0.0001722130717456901,
"loss": 0.0648,
"step": 510
},
{
"epoch": 0.8757894736842106,
"grad_norm": 0.2218085080385208,
"learning_rate": 0.00017090850749189022,
"loss": 0.0659,
"step": 520
},
{
"epoch": 0.8926315789473684,
"grad_norm": 0.18904639780521393,
"learning_rate": 0.00016957920700975504,
"loss": 0.0636,
"step": 530
},
{
"epoch": 0.9094736842105263,
"grad_norm": 0.24751749634742737,
"learning_rate": 0.00016822563402192397,
"loss": 0.0612,
"step": 540
},
{
"epoch": 0.9263157894736842,
"grad_norm": 0.19475597143173218,
"learning_rate": 0.00016684826071843004,
"loss": 0.065,
"step": 550
},
{
"epoch": 0.9431578947368421,
"grad_norm": 0.2451489120721817,
"learning_rate": 0.00016544756759197778,
"loss": 0.0588,
"step": 560
},
{
"epoch": 0.96,
"grad_norm": 0.16222433745861053,
"learning_rate": 0.0001640240432703247,
"loss": 0.0585,
"step": 570
},
{
"epoch": 0.9768421052631578,
"grad_norm": 0.13785243034362793,
"learning_rate": 0.0001625781843458249,
"loss": 0.0606,
"step": 580
},
{
"epoch": 0.9936842105263158,
"grad_norm": 0.1904134750366211,
"learning_rate": 0.00016111049520219428,
"loss": 0.0491,
"step": 590
},
{
"epoch": 1.0101052631578948,
"grad_norm": 0.19465769827365875,
"learning_rate": 0.00015962148783855767,
"loss": 0.0535,
"step": 600
},
{
"epoch": 1.0101052631578948,
"eval_loss": 0.053230032324790955,
"eval_runtime": 224.1426,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 600
},
{
"epoch": 1.0269473684210526,
"grad_norm": 0.19065628945827484,
"learning_rate": 0.00015811168169083955,
"loss": 0.05,
"step": 610
},
{
"epoch": 1.0437894736842106,
"grad_norm": 0.20831789076328278,
"learning_rate": 0.00015658160345056023,
"loss": 0.0493,
"step": 620
},
{
"epoch": 1.0606315789473684,
"grad_norm": 0.12762245535850525,
"learning_rate": 0.000155031786881101,
"loss": 0.0461,
"step": 630
},
{
"epoch": 1.0774736842105264,
"grad_norm": 0.14034190773963928,
"learning_rate": 0.0001534627726315023,
"loss": 0.0437,
"step": 640
},
{
"epoch": 1.0943157894736841,
"grad_norm": 0.21654851734638214,
"learning_rate": 0.00015187510804786012,
"loss": 0.0417,
"step": 650
},
{
"epoch": 1.1111578947368421,
"grad_norm": 0.21853309869766235,
"learning_rate": 0.0001502693469823855,
"loss": 0.0416,
"step": 660
},
{
"epoch": 1.1280000000000001,
"grad_norm": 0.1370641142129898,
"learning_rate": 0.00014864604960019504,
"loss": 0.0425,
"step": 670
},
{
"epoch": 1.1448421052631579,
"grad_norm": 0.2183496206998825,
"learning_rate": 0.00014700578218389892,
"loss": 0.0392,
"step": 680
},
{
"epoch": 1.1616842105263159,
"grad_norm": 0.17953278124332428,
"learning_rate": 0.00014534911693605464,
"loss": 0.0407,
"step": 690
},
{
"epoch": 1.1785263157894736,
"grad_norm": 0.17893248796463013,
"learning_rate": 0.00014367663177955605,
"loss": 0.04,
"step": 700
},
{
"epoch": 1.1785263157894736,
"eval_loss": 0.04435127228498459,
"eval_runtime": 224.0979,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 700
},
{
"epoch": 1.1953684210526316,
"grad_norm": 0.15027707815170288,
"learning_rate": 0.00014198891015602646,
"loss": 0.042,
"step": 710
},
{
"epoch": 1.2122105263157894,
"grad_norm": 0.2271127700805664,
"learning_rate": 0.00014028654082228682,
"loss": 0.0413,
"step": 720
},
{
"epoch": 1.2290526315789474,
"grad_norm": 0.16139471530914307,
"learning_rate": 0.00013857011764496942,
"loss": 0.0415,
"step": 730
},
{
"epoch": 1.2458947368421052,
"grad_norm": 0.17120322585105896,
"learning_rate": 0.0001368402393933495,
"loss": 0.0444,
"step": 740
},
{
"epoch": 1.2627368421052632,
"grad_norm": 0.12668217718601227,
"learning_rate": 0.00013509750953046606,
"loss": 0.0359,
"step": 750
},
{
"epoch": 1.279578947368421,
"grad_norm": 0.1568315625190735,
"learning_rate": 0.00013334253600260563,
"loss": 0.0375,
"step": 760
},
{
"epoch": 1.296421052631579,
"grad_norm": 0.1669214367866516,
"learning_rate": 0.00013157593102722205,
"loss": 0.0402,
"step": 770
},
{
"epoch": 1.313263157894737,
"grad_norm": 0.1692507565021515,
"learning_rate": 0.00012979831087936598,
"loss": 0.0353,
"step": 780
},
{
"epoch": 1.3301052631578947,
"grad_norm": 0.16156762838363647,
"learning_rate": 0.00012801029567669926,
"loss": 0.0328,
"step": 790
},
{
"epoch": 1.3469473684210527,
"grad_norm": 0.14826713502407074,
"learning_rate": 0.00012621250916316864,
"loss": 0.0342,
"step": 800
},
{
"epoch": 1.3469473684210527,
"eval_loss": 0.036476925015449524,
"eval_runtime": 224.0704,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 800
},
{
"epoch": 1.3637894736842107,
"grad_norm": 0.10823637247085571,
"learning_rate": 0.00012440557849141448,
"loss": 0.032,
"step": 810
},
{
"epoch": 1.3806315789473684,
"grad_norm": 0.1409875601530075,
"learning_rate": 0.0001225901340039903,
"loss": 0.0359,
"step": 820
},
{
"epoch": 1.3974736842105262,
"grad_norm": 0.174799844622612,
"learning_rate": 0.00012076680901346981,
"loss": 0.034,
"step": 830
},
{
"epoch": 1.4143157894736842,
"grad_norm": 0.14141526818275452,
"learning_rate": 0.00011893623958151732,
"loss": 0.0283,
"step": 840
},
{
"epoch": 1.4311578947368422,
"grad_norm": 0.24268820881843567,
"learning_rate": 0.00011709906429699977,
"loss": 0.0333,
"step": 850
},
{
"epoch": 1.448,
"grad_norm": 0.11979827284812927,
"learning_rate": 0.00011525592405321666,
"loss": 0.0286,
"step": 860
},
{
"epoch": 1.464842105263158,
"grad_norm": 0.12912440299987793,
"learning_rate": 0.00011340746182432633,
"loss": 0.0291,
"step": 870
},
{
"epoch": 1.4816842105263157,
"grad_norm": 0.11993803083896637,
"learning_rate": 0.00011155432244104627,
"loss": 0.0241,
"step": 880
},
{
"epoch": 1.4985263157894737,
"grad_norm": 0.18345041573047638,
"learning_rate": 0.00010969715236570586,
"loss": 0.0293,
"step": 890
},
{
"epoch": 1.5153684210526315,
"grad_norm": 0.1686432659626007,
"learning_rate": 0.00010783659946672964,
"loss": 0.0304,
"step": 900
},
{
"epoch": 1.5153684210526315,
"eval_loss": 0.031156836077570915,
"eval_runtime": 224.1681,
"eval_samples_per_second": 2.23,
"eval_steps_per_second": 2.23,
"step": 900
},
{
"epoch": 1.5322105263157895,
"grad_norm": 0.10962788015604019,
"learning_rate": 0.00010597331279263057,
"loss": 0.0283,
"step": 910
},
{
"epoch": 1.5490526315789475,
"grad_norm": 0.1417897343635559,
"learning_rate": 0.00010410794234559107,
"loss": 0.0271,
"step": 920
},
{
"epoch": 1.5658947368421052,
"grad_norm": 0.1917513608932495,
"learning_rate": 0.00010224113885471183,
"loss": 0.0248,
"step": 930
},
{
"epoch": 1.582736842105263,
"grad_norm": 0.1399783492088318,
"learning_rate": 0.00010037355354900661,
"loss": 0.0239,
"step": 940
},
{
"epoch": 1.5995789473684212,
"grad_norm": 0.11135450005531311,
"learning_rate": 9.850583793022319e-05,
"loss": 0.0244,
"step": 950
},
{
"epoch": 1.616421052631579,
"grad_norm": 0.11133313179016113,
"learning_rate": 9.663864354556858e-05,
"loss": 0.0283,
"step": 960
},
{
"epoch": 1.6332631578947368,
"grad_norm": 0.1533219963312149,
"learning_rate": 9.477262176041883e-05,
"loss": 0.0266,
"step": 970
},
{
"epoch": 1.6501052631578947,
"grad_norm": 0.13194741308689117,
"learning_rate": 9.290842353109195e-05,
"loss": 0.0265,
"step": 980
},
{
"epoch": 1.6669473684210527,
"grad_norm": 0.11617199331521988,
"learning_rate": 9.104669917776383e-05,
"loss": 0.0225,
"step": 990
},
{
"epoch": 1.6837894736842105,
"grad_norm": 0.11771562695503235,
"learning_rate": 8.918809815760585e-05,
"loss": 0.0233,
"step": 1000
},
{
"epoch": 1.6837894736842105,
"eval_loss": 0.02681094780564308,
"eval_runtime": 224.15,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 1000
},
{
"epoch": 1.7006315789473683,
"grad_norm": 0.1440223902463913,
"learning_rate": 8.733326883822356e-05,
"loss": 0.0252,
"step": 1010
},
{
"epoch": 1.7174736842105263,
"grad_norm": 0.15115946531295776,
"learning_rate": 8.548285827147568e-05,
"loss": 0.0244,
"step": 1020
},
{
"epoch": 1.7343157894736843,
"grad_norm": 0.10699623823165894,
"learning_rate": 8.363751196775172e-05,
"loss": 0.0216,
"step": 1030
},
{
"epoch": 1.751157894736842,
"grad_norm": 0.10448223352432251,
"learning_rate": 8.17978736707877e-05,
"loss": 0.0227,
"step": 1040
},
{
"epoch": 1.768,
"grad_norm": 0.12821614742279053,
"learning_rate": 7.996458513309787e-05,
"loss": 0.0198,
"step": 1050
},
{
"epoch": 1.784842105263158,
"grad_norm": 0.1228765919804573,
"learning_rate": 7.813828589210138e-05,
"loss": 0.023,
"step": 1060
},
{
"epoch": 1.8016842105263158,
"grad_norm": 0.1160237044095993,
"learning_rate": 7.631961304702124e-05,
"loss": 0.0235,
"step": 1070
},
{
"epoch": 1.8185263157894735,
"grad_norm": 0.097357377409935,
"learning_rate": 7.450920103663443e-05,
"loss": 0.0219,
"step": 1080
},
{
"epoch": 1.8353684210526315,
"grad_norm": 0.169428288936615,
"learning_rate": 7.270768141794942e-05,
"loss": 0.0207,
"step": 1090
},
{
"epoch": 1.8522105263157895,
"grad_norm": 0.12443847209215164,
"learning_rate": 7.091568264588952e-05,
"loss": 0.0195,
"step": 1100
},
{
"epoch": 1.8522105263157895,
"eval_loss": 0.02066013775765896,
"eval_runtime": 224.1852,
"eval_samples_per_second": 2.23,
"eval_steps_per_second": 2.23,
"step": 1100
},
{
"epoch": 1.8690526315789473,
"grad_norm": 0.11752050369977951,
"learning_rate": 6.913382985405814e-05,
"loss": 0.0186,
"step": 1110
},
{
"epoch": 1.8858947368421053,
"grad_norm": 0.12359823286533356,
"learning_rate": 6.736274463666283e-05,
"loss": 0.0193,
"step": 1120
},
{
"epoch": 1.9027368421052633,
"grad_norm": 0.10313016176223755,
"learning_rate": 6.560304483167368e-05,
"loss": 0.0191,
"step": 1130
},
{
"epoch": 1.919578947368421,
"grad_norm": 0.12194198369979858,
"learning_rate": 6.385534430529275e-05,
"loss": 0.0189,
"step": 1140
},
{
"epoch": 1.9364210526315788,
"grad_norm": 0.1172875314950943,
"learning_rate": 6.212025273780815e-05,
"loss": 0.0155,
"step": 1150
},
{
"epoch": 1.9532631578947368,
"grad_norm": 0.09735918790102005,
"learning_rate": 6.039837541090913e-05,
"loss": 0.0169,
"step": 1160
},
{
"epoch": 1.9701052631578948,
"grad_norm": 0.09256832301616669,
"learning_rate": 5.869031299653518e-05,
"loss": 0.0174,
"step": 1170
},
{
"epoch": 1.9869473684210526,
"grad_norm": 0.10335566103458405,
"learning_rate": 5.699666134733349e-05,
"loss": 0.0171,
"step": 1180
},
{
"epoch": 2.0033684210526315,
"grad_norm": 0.10647857934236526,
"learning_rate": 5.5318011288797237e-05,
"loss": 0.0158,
"step": 1190
},
{
"epoch": 2.0202105263157897,
"grad_norm": 0.12479748576879501,
"learning_rate": 5.365494841315808e-05,
"loss": 0.0136,
"step": 1200
},
{
"epoch": 2.0202105263157897,
"eval_loss": 0.017488960176706314,
"eval_runtime": 224.1495,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 1200
},
{
"epoch": 2.0370526315789474,
"grad_norm": 0.09349500387907028,
"learning_rate": 5.2008052875103886e-05,
"loss": 0.0137,
"step": 1210
},
{
"epoch": 2.053894736842105,
"grad_norm": 0.06680870801210403,
"learning_rate": 5.037789918939335e-05,
"loss": 0.0126,
"step": 1220
},
{
"epoch": 2.070736842105263,
"grad_norm": 0.09055491536855698,
"learning_rate": 4.876505603043826e-05,
"loss": 0.0128,
"step": 1230
},
{
"epoch": 2.087578947368421,
"grad_norm": 0.12399008870124817,
"learning_rate": 4.717008603392318e-05,
"loss": 0.0131,
"step": 1240
},
{
"epoch": 2.104421052631579,
"grad_norm": 0.09043820202350616,
"learning_rate": 4.559354560053135e-05,
"loss": 0.0118,
"step": 1250
},
{
"epoch": 2.1212631578947367,
"grad_norm": 0.07968771457672119,
"learning_rate": 4.4035984701846234e-05,
"loss": 0.0122,
"step": 1260
},
{
"epoch": 2.138105263157895,
"grad_norm": 0.07125072926282883,
"learning_rate": 4.2497946688495184e-05,
"loss": 0.0117,
"step": 1270
},
{
"epoch": 2.1549473684210527,
"grad_norm": 0.07947332412004471,
"learning_rate": 4.097996810060346e-05,
"loss": 0.0112,
"step": 1280
},
{
"epoch": 2.1717894736842105,
"grad_norm": 0.06901393830776215,
"learning_rate": 3.948257848062351e-05,
"loss": 0.0119,
"step": 1290
},
{
"epoch": 2.1886315789473683,
"grad_norm": 0.07422488927841187,
"learning_rate": 3.8006300188605705e-05,
"loss": 0.0111,
"step": 1300
},
{
"epoch": 2.1886315789473683,
"eval_loss": 0.015334118157625198,
"eval_runtime": 224.1161,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 1300
},
{
"epoch": 2.2054736842105265,
"grad_norm": 0.10787979513406754,
"learning_rate": 3.655164821997451e-05,
"loss": 0.0123,
"step": 1310
},
{
"epoch": 2.2223157894736842,
"grad_norm": 0.07865067571401596,
"learning_rate": 3.5119130025874034e-05,
"loss": 0.0107,
"step": 1320
},
{
"epoch": 2.239157894736842,
"grad_norm": 0.07730992138385773,
"learning_rate": 3.3709245336144777e-05,
"loss": 0.0112,
"step": 1330
},
{
"epoch": 2.2560000000000002,
"grad_norm": 0.0723709836602211,
"learning_rate": 3.232248598499459e-05,
"loss": 0.0114,
"step": 1340
},
{
"epoch": 2.272842105263158,
"grad_norm": 0.08348905295133591,
"learning_rate": 3.09593357394236e-05,
"loss": 0.01,
"step": 1350
},
{
"epoch": 2.2896842105263158,
"grad_norm": 0.07650898396968842,
"learning_rate": 2.9620270130463667e-05,
"loss": 0.0109,
"step": 1360
},
{
"epoch": 2.3065263157894735,
"grad_norm": 0.06670452654361725,
"learning_rate": 2.8305756287290484e-05,
"loss": 0.0114,
"step": 1370
},
{
"epoch": 2.3233684210526317,
"grad_norm": 0.07205783575773239,
"learning_rate": 2.7016252774267192e-05,
"loss": 0.0109,
"step": 1380
},
{
"epoch": 2.3402105263157895,
"grad_norm": 0.07499977201223373,
"learning_rate": 2.575220943097526e-05,
"loss": 0.0113,
"step": 1390
},
{
"epoch": 2.3570526315789473,
"grad_norm": 0.07749755680561066,
"learning_rate": 2.4514067215289503e-05,
"loss": 0.0102,
"step": 1400
},
{
"epoch": 2.3570526315789473,
"eval_loss": 0.013460199348628521,
"eval_runtime": 224.1893,
"eval_samples_per_second": 2.23,
"eval_steps_per_second": 2.23,
"step": 1400
},
{
"epoch": 2.3738947368421055,
"grad_norm": 0.07490992546081543,
"learning_rate": 2.3302258049550974e-05,
"loss": 0.0103,
"step": 1410
},
{
"epoch": 2.3907368421052633,
"grad_norm": 0.07824037969112396,
"learning_rate": 2.2117204669892198e-05,
"loss": 0.0104,
"step": 1420
},
{
"epoch": 2.407578947368421,
"grad_norm": 0.07291096448898315,
"learning_rate": 2.0959320478766965e-05,
"loss": 0.0101,
"step": 1430
},
{
"epoch": 2.424421052631579,
"grad_norm": 0.09640032798051834,
"learning_rate": 1.9829009400735977e-05,
"loss": 0.011,
"step": 1440
},
{
"epoch": 2.441263157894737,
"grad_norm": 0.07188956439495087,
"learning_rate": 1.8726665741558923e-05,
"loss": 0.0105,
"step": 1450
},
{
"epoch": 2.458105263157895,
"grad_norm": 0.0596754252910614,
"learning_rate": 1.765267405064206e-05,
"loss": 0.009,
"step": 1460
},
{
"epoch": 2.4749473684210526,
"grad_norm": 0.0846128836274147,
"learning_rate": 1.6607408986889384e-05,
"loss": 0.0096,
"step": 1470
},
{
"epoch": 2.4917894736842103,
"grad_norm": 0.10189133137464523,
"learning_rate": 1.559123518800376e-05,
"loss": 0.0105,
"step": 1480
},
{
"epoch": 2.5086315789473685,
"grad_norm": 0.06304220855236053,
"learning_rate": 1.460450714328423e-05,
"loss": 0.0104,
"step": 1490
},
{
"epoch": 2.5254736842105263,
"grad_norm": 0.06267323344945908,
"learning_rate": 1.364756906996345e-05,
"loss": 0.0095,
"step": 1500
},
{
"epoch": 2.5254736842105263,
"eval_loss": 0.01212456077337265,
"eval_runtime": 224.2724,
"eval_samples_per_second": 2.229,
"eval_steps_per_second": 2.229,
"step": 1500
},
{
"epoch": 2.542315789473684,
"grad_norm": 0.09089558571577072,
"learning_rate": 1.2720754793128464e-05,
"loss": 0.0096,
"step": 1510
},
{
"epoch": 2.559157894736842,
"grad_norm": 0.0663435310125351,
"learning_rate": 1.1824387629266742e-05,
"loss": 0.0094,
"step": 1520
},
{
"epoch": 2.576,
"grad_norm": 0.06648425757884979,
"learning_rate": 1.0958780273478253e-05,
"loss": 0.0095,
"step": 1530
},
{
"epoch": 2.592842105263158,
"grad_norm": 0.09527198225259781,
"learning_rate": 1.0124234690392586e-05,
"loss": 0.0098,
"step": 1540
},
{
"epoch": 2.609684210526316,
"grad_norm": 0.08171885460615158,
"learning_rate": 9.321042008829562e-06,
"loss": 0.0095,
"step": 1550
},
{
"epoch": 2.626526315789474,
"grad_norm": 0.06150711327791214,
"learning_rate": 8.549482420239707e-06,
"loss": 0.0098,
"step": 1560
},
{
"epoch": 2.6433684210526316,
"grad_norm": 0.058401718735694885,
"learning_rate": 7.809825080960276e-06,
"loss": 0.0096,
"step": 1570
},
{
"epoch": 2.6602105263157894,
"grad_norm": 0.0481596402823925,
"learning_rate": 7.102328018320858e-06,
"loss": 0.0096,
"step": 1580
},
{
"epoch": 2.677052631578947,
"grad_norm": 0.07530257105827332,
"learning_rate": 6.4272380406313334e-06,
"loss": 0.0091,
"step": 1590
},
{
"epoch": 2.6938947368421053,
"grad_norm": 0.062257252633571625,
"learning_rate": 5.784790651083405e-06,
"loss": 0.0097,
"step": 1600
},
{
"epoch": 2.6938947368421053,
"eval_loss": 0.011450879275798798,
"eval_runtime": 224.0976,
"eval_samples_per_second": 2.231,
"eval_steps_per_second": 2.231,
"step": 1600
},
{
"epoch": 2.710736842105263,
"grad_norm": 0.0579669252038002,
"learning_rate": 5.175209965596084e-06,
"loss": 0.0094,
"step": 1610
},
{
"epoch": 2.7275789473684213,
"grad_norm": 0.06278079748153687,
"learning_rate": 4.598708634633442e-06,
"loss": 0.009,
"step": 1620
},
{
"epoch": 2.744421052631579,
"grad_norm": 0.05131611227989197,
"learning_rate": 4.055487769022137e-06,
"loss": 0.0095,
"step": 1630
},
{
"epoch": 2.761263157894737,
"grad_norm": 0.05207466334104538,
"learning_rate": 3.545736869794458e-06,
"loss": 0.0092,
"step": 1640
},
{
"epoch": 2.7781052631578946,
"grad_norm": 0.06517709046602249,
"learning_rate": 3.0696337620814364e-06,
"loss": 0.0091,
"step": 1650
},
{
"epoch": 2.7949473684210524,
"grad_norm": 0.06492120027542114,
"learning_rate": 2.6273445330791234e-06,
"loss": 0.0093,
"step": 1660
},
{
"epoch": 2.8117894736842106,
"grad_norm": 0.06431007385253906,
"learning_rate": 2.219023474109483e-06,
"loss": 0.0091,
"step": 1670
},
{
"epoch": 2.8286315789473684,
"grad_norm": 0.05567321926355362,
"learning_rate": 1.8448130267962992e-06,
"loss": 0.0087,
"step": 1680
},
{
"epoch": 2.845473684210526,
"grad_norm": 0.08136210590600967,
"learning_rate": 1.5048437333748833e-06,
"loss": 0.0094,
"step": 1690
},
{
"epoch": 2.8623157894736844,
"grad_norm": 0.08303703367710114,
"learning_rate": 1.1992341911527649e-06,
"loss": 0.0085,
"step": 1700
},
{
"epoch": 2.8623157894736844,
"eval_loss": 0.011158278211951256,
"eval_runtime": 224.1949,
"eval_samples_per_second": 2.23,
"eval_steps_per_second": 2.23,
"step": 1700
},
{
"epoch": 2.879157894736842,
"grad_norm": 0.05584060773253441,
"learning_rate": 9.280910111373553e-07,
"loss": 0.0083,
"step": 1710
},
{
"epoch": 2.896,
"grad_norm": 0.060032352805137634,
"learning_rate": 6.91508780844996e-07,
"loss": 0.0093,
"step": 1720
},
{
"epoch": 2.9128421052631577,
"grad_norm": 0.054556239396333694,
"learning_rate": 4.895700313044315e-07,
"loss": 0.0091,
"step": 1730
},
{
"epoch": 2.929684210526316,
"grad_norm": 0.05352412536740303,
"learning_rate": 3.223452082660394e-07,
"loss": 0.0089,
"step": 1740
},
{
"epoch": 2.9465263157894737,
"grad_norm": 0.0548105351626873,
"learning_rate": 1.898926476270546e-07,
"loss": 0.0083,
"step": 1750
},
{
"epoch": 2.9633684210526314,
"grad_norm": 0.0906362310051918,
"learning_rate": 9.225855508123715e-08,
"loss": 0.009,
"step": 1760
},
{
"epoch": 2.9802105263157896,
"grad_norm": 0.053922925144433975,
"learning_rate": 2.947699000021009e-08,
"loss": 0.0091,
"step": 1770
},
{
"epoch": 2.9970526315789474,
"grad_norm": 0.08347595483064651,
"learning_rate": 1.5698535518748892e-09,
"loss": 0.0086,
"step": 1780
}
],
"logging_steps": 10,
"max_steps": 1782,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.504463303573504e+18,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}