| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.196174595389897, | |
| "eval_steps": 5000, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00392349190779794, | |
| "grad_norm": 1.34375, | |
| "learning_rate": 1.153846153846154e-06, | |
| "logits/chosen": -2.9852147102355957, | |
| "logits/rejected": -2.9852118492126465, | |
| "logps/chosen": -0.2645491361618042, | |
| "logps/rejected": -3.2006759643554688, | |
| "loss": 0.2782, | |
| "odds_ratio_loss": 1.0742381811141968, | |
| "rewards/accuracies": 0.953125, | |
| "rewards/chosen": -0.02645491622388363, | |
| "rewards/margins": 0.29361265897750854, | |
| "rewards/rejected": -0.3200675845146179, | |
| "sft_loss": 0.17080938816070557, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.00784698381559588, | |
| "grad_norm": 1.390625, | |
| "learning_rate": 2.307692307692308e-06, | |
| "logits/chosen": -2.9859261512756348, | |
| "logits/rejected": -2.9859182834625244, | |
| "logps/chosen": -0.2948899567127228, | |
| "logps/rejected": -3.0603623390197754, | |
| "loss": 0.3098, | |
| "odds_ratio_loss": 1.081099033355713, | |
| "rewards/accuracies": 0.947265625, | |
| "rewards/chosen": -0.029488995671272278, | |
| "rewards/margins": 0.27654725313186646, | |
| "rewards/rejected": -0.30603623390197754, | |
| "sft_loss": 0.20172670483589172, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.011770475723393821, | |
| "grad_norm": 1.6484375, | |
| "learning_rate": 3.4615384615384617e-06, | |
| "logits/chosen": -2.9848945140838623, | |
| "logits/rejected": -2.984874963760376, | |
| "logps/chosen": -0.30154696106910706, | |
| "logps/rejected": -3.0100128650665283, | |
| "loss": 0.3208, | |
| "odds_ratio_loss": 1.2148220539093018, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.030154697597026825, | |
| "rewards/margins": 0.27084657549858093, | |
| "rewards/rejected": -0.30100125074386597, | |
| "sft_loss": 0.1993543654680252, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.01569396763119176, | |
| "grad_norm": 1.453125, | |
| "learning_rate": 4.615384615384616e-06, | |
| "logits/chosen": -2.983187675476074, | |
| "logits/rejected": -2.983184576034546, | |
| "logps/chosen": -0.24826335906982422, | |
| "logps/rejected": -3.0838959217071533, | |
| "loss": 0.2632, | |
| "odds_ratio_loss": 1.0045406818389893, | |
| "rewards/accuracies": 0.9443359375, | |
| "rewards/chosen": -0.02482633665204048, | |
| "rewards/margins": 0.2835632264614105, | |
| "rewards/rejected": -0.3083895742893219, | |
| "sft_loss": 0.16278076171875, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0196174595389897, | |
| "grad_norm": 1.2890625, | |
| "learning_rate": 5.76923076923077e-06, | |
| "logits/chosen": -2.9843859672546387, | |
| "logits/rejected": -2.9843790531158447, | |
| "logps/chosen": -0.26491791009902954, | |
| "logps/rejected": -3.030116081237793, | |
| "loss": 0.2815, | |
| "odds_ratio_loss": 1.1153428554534912, | |
| "rewards/accuracies": 0.939453125, | |
| "rewards/chosen": -0.026491792872548103, | |
| "rewards/margins": 0.276519775390625, | |
| "rewards/rejected": -0.30301156640052795, | |
| "sft_loss": 0.16996388137340546, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.023540951446787643, | |
| "grad_norm": 0.90234375, | |
| "learning_rate": 6.923076923076923e-06, | |
| "logits/chosen": -2.9830667972564697, | |
| "logits/rejected": -2.9830613136291504, | |
| "logps/chosen": -0.25957372784614563, | |
| "logps/rejected": -3.0386571884155273, | |
| "loss": 0.2731, | |
| "odds_ratio_loss": 1.0385775566101074, | |
| "rewards/accuracies": 0.9404296875, | |
| "rewards/chosen": -0.025957372039556503, | |
| "rewards/margins": 0.2779083251953125, | |
| "rewards/rejected": -0.3038657009601593, | |
| "sft_loss": 0.16922664642333984, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.02746444335458558, | |
| "grad_norm": 1.2421875, | |
| "learning_rate": 8.076923076923077e-06, | |
| "logits/chosen": -2.9781432151794434, | |
| "logits/rejected": -2.9781363010406494, | |
| "logps/chosen": -0.24075695872306824, | |
| "logps/rejected": -3.1340367794036865, | |
| "loss": 0.2557, | |
| "odds_ratio_loss": 1.0184863805770874, | |
| "rewards/accuracies": 0.9462890625, | |
| "rewards/chosen": -0.024075698107481003, | |
| "rewards/margins": 0.289328008890152, | |
| "rewards/rejected": -0.3134036958217621, | |
| "sft_loss": 0.15385302901268005, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.03138793526238352, | |
| "grad_norm": 0.84765625, | |
| "learning_rate": 9.230769230769232e-06, | |
| "logits/chosen": -2.9820947647094727, | |
| "logits/rejected": -2.982091188430786, | |
| "logps/chosen": -0.2752811908721924, | |
| "logps/rejected": -3.0845463275909424, | |
| "loss": 0.2907, | |
| "odds_ratio_loss": 1.0758346319198608, | |
| "rewards/accuracies": 0.9384765625, | |
| "rewards/chosen": -0.02752811834216118, | |
| "rewards/margins": 0.28092649579048157, | |
| "rewards/rejected": -0.30845463275909424, | |
| "sft_loss": 0.18306681513786316, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.03531142717018146, | |
| "grad_norm": 1.109375, | |
| "learning_rate": 1.0384615384615384e-05, | |
| "logits/chosen": -2.980917453765869, | |
| "logits/rejected": -2.9809200763702393, | |
| "logps/chosen": -0.25007593631744385, | |
| "logps/rejected": -3.1210172176361084, | |
| "loss": 0.2663, | |
| "odds_ratio_loss": 0.9918112754821777, | |
| "rewards/accuracies": 0.9404296875, | |
| "rewards/chosen": -0.025007594376802444, | |
| "rewards/margins": 0.2870941162109375, | |
| "rewards/rejected": -0.31210172176361084, | |
| "sft_loss": 0.16709110140800476, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0392349190779794, | |
| "grad_norm": 0.98828125, | |
| "learning_rate": 1.153846153846154e-05, | |
| "logits/chosen": -2.976473569869995, | |
| "logits/rejected": -2.976468086242676, | |
| "logps/chosen": -0.23895932734012604, | |
| "logps/rejected": -3.23227596282959, | |
| "loss": 0.2498, | |
| "odds_ratio_loss": 0.9731241464614868, | |
| "rewards/accuracies": 0.951171875, | |
| "rewards/chosen": -0.023895932361483574, | |
| "rewards/margins": 0.2993316650390625, | |
| "rewards/rejected": -0.3232276141643524, | |
| "sft_loss": 0.1525370478630066, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04315841098577734, | |
| "grad_norm": 1.7890625, | |
| "learning_rate": 1.2692307692307693e-05, | |
| "logits/chosen": -2.973320960998535, | |
| "logits/rejected": -2.973318576812744, | |
| "logps/chosen": -0.2526458501815796, | |
| "logps/rejected": -3.23311448097229, | |
| "loss": 0.2657, | |
| "odds_ratio_loss": 1.0119987726211548, | |
| "rewards/accuracies": 0.9423828125, | |
| "rewards/chosen": -0.02526458539068699, | |
| "rewards/margins": 0.29804685711860657, | |
| "rewards/rejected": -0.323311448097229, | |
| "sft_loss": 0.16451705992221832, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.047081902893575285, | |
| "grad_norm": 0.95703125, | |
| "learning_rate": 1.3846153846153847e-05, | |
| "logits/chosen": -2.9654839038848877, | |
| "logits/rejected": -2.965482711791992, | |
| "logps/chosen": -0.23671554028987885, | |
| "logps/rejected": -3.1521968841552734, | |
| "loss": 0.2482, | |
| "odds_ratio_loss": 0.9054768085479736, | |
| "rewards/accuracies": 0.9580078125, | |
| "rewards/chosen": -0.023671552538871765, | |
| "rewards/margins": 0.2915481626987457, | |
| "rewards/rejected": -0.3152197301387787, | |
| "sft_loss": 0.15767298638820648, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.05100539480137322, | |
| "grad_norm": 1.078125, | |
| "learning_rate": 1.5e-05, | |
| "logits/chosen": -2.965122699737549, | |
| "logits/rejected": -2.965108633041382, | |
| "logps/chosen": -0.27447113394737244, | |
| "logps/rejected": -3.1599385738372803, | |
| "loss": 0.2891, | |
| "odds_ratio_loss": 0.9649019241333008, | |
| "rewards/accuracies": 0.9453125, | |
| "rewards/chosen": -0.027447111904621124, | |
| "rewards/margins": 0.28854674100875854, | |
| "rewards/rejected": -0.31599387526512146, | |
| "sft_loss": 0.19265753030776978, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.05492888670917116, | |
| "grad_norm": 0.93359375, | |
| "learning_rate": 1.6153846153846154e-05, | |
| "logits/chosen": -2.9600794315338135, | |
| "logits/rejected": -2.9600796699523926, | |
| "logps/chosen": -0.248041033744812, | |
| "logps/rejected": -3.1299993991851807, | |
| "loss": 0.2603, | |
| "odds_ratio_loss": 0.9177720546722412, | |
| "rewards/accuracies": 0.9580078125, | |
| "rewards/chosen": -0.02480410598218441, | |
| "rewards/margins": 0.28819578886032104, | |
| "rewards/rejected": -0.3129999339580536, | |
| "sft_loss": 0.16850169003009796, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.058852378616969105, | |
| "grad_norm": 0.76171875, | |
| "learning_rate": 1.7307692307692306e-05, | |
| "logits/chosen": -2.9557976722717285, | |
| "logits/rejected": -2.9557933807373047, | |
| "logps/chosen": -0.20853769779205322, | |
| "logps/rejected": -3.0895190238952637, | |
| "loss": 0.2192, | |
| "odds_ratio_loss": 0.8289995789527893, | |
| "rewards/accuracies": 0.9609375, | |
| "rewards/chosen": -0.020853767171502113, | |
| "rewards/margins": 0.28809815645217896, | |
| "rewards/rejected": -0.30895188450813293, | |
| "sft_loss": 0.13633377850055695, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.06277587052476705, | |
| "grad_norm": 0.890625, | |
| "learning_rate": 1.8461538461538465e-05, | |
| "logits/chosen": -2.95212984085083, | |
| "logits/rejected": -2.952127456665039, | |
| "logps/chosen": -0.24015557765960693, | |
| "logps/rejected": -3.1777472496032715, | |
| "loss": 0.253, | |
| "odds_ratio_loss": 0.8741129040718079, | |
| "rewards/accuracies": 0.9541015625, | |
| "rewards/chosen": -0.024015557020902634, | |
| "rewards/margins": 0.29375916719436646, | |
| "rewards/rejected": -0.3177747428417206, | |
| "sft_loss": 0.16556787490844727, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.06669936243256498, | |
| "grad_norm": 0.66015625, | |
| "learning_rate": 1.9615384615384617e-05, | |
| "logits/chosen": -2.9487290382385254, | |
| "logits/rejected": -2.948725461959839, | |
| "logps/chosen": -0.22024491429328918, | |
| "logps/rejected": -3.160766124725342, | |
| "loss": 0.2323, | |
| "odds_ratio_loss": 0.8908261656761169, | |
| "rewards/accuracies": 0.958984375, | |
| "rewards/chosen": -0.02202449180185795, | |
| "rewards/margins": 0.2940521240234375, | |
| "rewards/rejected": -0.3160766065120697, | |
| "sft_loss": 0.14318522810935974, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.07062285434036292, | |
| "grad_norm": 0.59375, | |
| "learning_rate": 2.076923076923077e-05, | |
| "logits/chosen": -2.9467873573303223, | |
| "logits/rejected": -2.9467759132385254, | |
| "logps/chosen": -0.1970514953136444, | |
| "logps/rejected": -3.207763195037842, | |
| "loss": 0.2066, | |
| "odds_ratio_loss": 0.8174700140953064, | |
| "rewards/accuracies": 0.96484375, | |
| "rewards/chosen": -0.0197051502764225, | |
| "rewards/margins": 0.3010711669921875, | |
| "rewards/rejected": -0.3207763433456421, | |
| "sft_loss": 0.12490144371986389, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.07454634624816087, | |
| "grad_norm": 1.0, | |
| "learning_rate": 2.1923076923076924e-05, | |
| "logits/chosen": -2.9435489177703857, | |
| "logits/rejected": -2.9435434341430664, | |
| "logps/chosen": -0.2126975655555725, | |
| "logps/rejected": -3.2167563438415527, | |
| "loss": 0.2224, | |
| "odds_ratio_loss": 0.8775886297225952, | |
| "rewards/accuracies": 0.966796875, | |
| "rewards/chosen": -0.02126975730061531, | |
| "rewards/margins": 0.30040591955184937, | |
| "rewards/rejected": -0.3216756284236908, | |
| "sft_loss": 0.13463406264781952, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0784698381559588, | |
| "grad_norm": 0.8203125, | |
| "learning_rate": 2.307692307692308e-05, | |
| "logits/chosen": -2.939420223236084, | |
| "logits/rejected": -2.939415216445923, | |
| "logps/chosen": -0.21315571665763855, | |
| "logps/rejected": -3.131246328353882, | |
| "loss": 0.2229, | |
| "odds_ratio_loss": 0.8950154781341553, | |
| "rewards/accuracies": 0.9619140625, | |
| "rewards/chosen": -0.021315572783350945, | |
| "rewards/margins": 0.29180908203125, | |
| "rewards/rejected": -0.3131246566772461, | |
| "sft_loss": 0.13337606191635132, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08239333006375674, | |
| "grad_norm": 0.69140625, | |
| "learning_rate": 2.423076923076923e-05, | |
| "logits/chosen": -2.9376261234283447, | |
| "logits/rejected": -2.9376211166381836, | |
| "logps/chosen": -0.2413405179977417, | |
| "logps/rejected": -3.1577987670898438, | |
| "loss": 0.2519, | |
| "odds_ratio_loss": 0.9613508582115173, | |
| "rewards/accuracies": 0.94921875, | |
| "rewards/chosen": -0.02413405105471611, | |
| "rewards/margins": 0.2916458249092102, | |
| "rewards/rejected": -0.3157798647880554, | |
| "sft_loss": 0.15580427646636963, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.08631682197155469, | |
| "grad_norm": 1.125, | |
| "learning_rate": 2.5384615384615386e-05, | |
| "logits/chosen": -2.9448554515838623, | |
| "logits/rejected": -2.944854974746704, | |
| "logps/chosen": -0.2407849133014679, | |
| "logps/rejected": -3.2201244831085205, | |
| "loss": 0.252, | |
| "odds_ratio_loss": 0.9313341379165649, | |
| "rewards/accuracies": 0.962890625, | |
| "rewards/chosen": -0.02407849207520485, | |
| "rewards/margins": 0.297933965921402, | |
| "rewards/rejected": -0.32201245427131653, | |
| "sft_loss": 0.15885500609874725, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.09024031387935262, | |
| "grad_norm": 1.4375, | |
| "learning_rate": 2.6538461538461538e-05, | |
| "logits/chosen": -2.946904182434082, | |
| "logits/rejected": -2.946908473968506, | |
| "logps/chosen": -0.2284041941165924, | |
| "logps/rejected": -3.2244067192077637, | |
| "loss": 0.242, | |
| "odds_ratio_loss": 1.0077147483825684, | |
| "rewards/accuracies": 0.947265625, | |
| "rewards/chosen": -0.02284042164683342, | |
| "rewards/margins": 0.2996002435684204, | |
| "rewards/rejected": -0.32244062423706055, | |
| "sft_loss": 0.14126919209957123, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.09416380578715057, | |
| "grad_norm": 0.8046875, | |
| "learning_rate": 2.7692307692307694e-05, | |
| "logits/chosen": -2.9473867416381836, | |
| "logits/rejected": -2.947389602661133, | |
| "logps/chosen": -0.22177951037883759, | |
| "logps/rejected": -3.1969687938690186, | |
| "loss": 0.2362, | |
| "odds_ratio_loss": 0.8804333209991455, | |
| "rewards/accuracies": 0.94140625, | |
| "rewards/chosen": -0.02217794954776764, | |
| "rewards/margins": 0.29751890897750854, | |
| "rewards/rejected": -0.31969690322875977, | |
| "sft_loss": 0.14815130829811096, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.0980872976949485, | |
| "grad_norm": 0.9921875, | |
| "learning_rate": 2.884615384615385e-05, | |
| "logits/chosen": -2.955061197280884, | |
| "logits/rejected": -2.9550623893737793, | |
| "logps/chosen": -0.22592617571353912, | |
| "logps/rejected": -3.314274549484253, | |
| "loss": 0.2371, | |
| "odds_ratio_loss": 0.9324527978897095, | |
| "rewards/accuracies": 0.962890625, | |
| "rewards/chosen": -0.022592617198824883, | |
| "rewards/margins": 0.30883485078811646, | |
| "rewards/rejected": -0.3314274549484253, | |
| "sft_loss": 0.14389845728874207, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.10201078960274644, | |
| "grad_norm": 0.75, | |
| "learning_rate": 3e-05, | |
| "logits/chosen": -2.958292245864868, | |
| "logits/rejected": -2.958291530609131, | |
| "logps/chosen": -0.2314324676990509, | |
| "logps/rejected": -3.2401301860809326, | |
| "loss": 0.2422, | |
| "odds_ratio_loss": 0.9014456272125244, | |
| "rewards/accuracies": 0.955078125, | |
| "rewards/chosen": -0.02314324676990509, | |
| "rewards/margins": 0.30086976289749146, | |
| "rewards/rejected": -0.32401302456855774, | |
| "sft_loss": 0.15201067924499512, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.10593428151054439, | |
| "grad_norm": 0.97265625, | |
| "learning_rate": 2.9998576083758987e-05, | |
| "logits/chosen": -2.970607280731201, | |
| "logits/rejected": -2.970606803894043, | |
| "logps/chosen": -0.23627859354019165, | |
| "logps/rejected": -3.213726043701172, | |
| "loss": 0.2479, | |
| "odds_ratio_loss": 0.8589389324188232, | |
| "rewards/accuracies": 0.955078125, | |
| "rewards/chosen": -0.023627860471606255, | |
| "rewards/margins": 0.2977447509765625, | |
| "rewards/rejected": -0.3213726282119751, | |
| "sft_loss": 0.16202089190483093, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.10985777341834232, | |
| "grad_norm": 1.046875, | |
| "learning_rate": 2.999430460537427e-05, | |
| "logits/chosen": -2.9736104011535645, | |
| "logits/rejected": -2.9736101627349854, | |
| "logps/chosen": -0.25192198157310486, | |
| "logps/rejected": -3.2232658863067627, | |
| "loss": 0.2654, | |
| "odds_ratio_loss": 0.9881945848464966, | |
| "rewards/accuracies": 0.9521484375, | |
| "rewards/chosen": -0.025192195549607277, | |
| "rewards/margins": 0.2971344292163849, | |
| "rewards/rejected": -0.3223266005516052, | |
| "sft_loss": 0.16653719544410706, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.11378126532614026, | |
| "grad_norm": 0.85546875, | |
| "learning_rate": 2.9987186375809513e-05, | |
| "logits/chosen": -2.979647159576416, | |
| "logits/rejected": -2.9796371459960938, | |
| "logps/chosen": -0.20289547741413116, | |
| "logps/rejected": -3.250380754470825, | |
| "loss": 0.2129, | |
| "odds_ratio_loss": 0.7935500144958496, | |
| "rewards/accuracies": 0.96484375, | |
| "rewards/chosen": -0.020289547741413116, | |
| "rewards/margins": 0.3047485649585724, | |
| "rewards/rejected": -0.32503804564476013, | |
| "sft_loss": 0.13351328670978546, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.11770475723393821, | |
| "grad_norm": 0.7265625, | |
| "learning_rate": 2.997722274649974e-05, | |
| "logits/chosen": -2.9907119274139404, | |
| "logits/rejected": -2.990703582763672, | |
| "logps/chosen": -0.22006310522556305, | |
| "logps/rejected": -3.3016364574432373, | |
| "loss": 0.2316, | |
| "odds_ratio_loss": 0.9098002910614014, | |
| "rewards/accuracies": 0.9619140625, | |
| "rewards/chosen": -0.022006310522556305, | |
| "rewards/margins": 0.308157354593277, | |
| "rewards/rejected": -0.3301636874675751, | |
| "sft_loss": 0.14064472913742065, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12162824914173614, | |
| "grad_norm": 0.703125, | |
| "learning_rate": 2.9964415609094767e-05, | |
| "logits/chosen": -2.999443769454956, | |
| "logits/rejected": -2.9994404315948486, | |
| "logps/chosen": -0.21595218777656555, | |
| "logps/rejected": -3.3530983924865723, | |
| "loss": 0.2264, | |
| "odds_ratio_loss": 0.8591948747634888, | |
| "rewards/accuracies": 0.9619140625, | |
| "rewards/chosen": -0.021595221012830734, | |
| "rewards/margins": 0.3137146234512329, | |
| "rewards/rejected": -0.33530986309051514, | |
| "sft_loss": 0.14047320187091827, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.1255517410495341, | |
| "grad_norm": 0.8359375, | |
| "learning_rate": 2.994876739510005e-05, | |
| "logits/chosen": -3.005934953689575, | |
| "logits/rejected": -3.005929470062256, | |
| "logps/chosen": -0.23634353280067444, | |
| "logps/rejected": -3.2525179386138916, | |
| "loss": 0.2487, | |
| "odds_ratio_loss": 0.8775915503501892, | |
| "rewards/accuracies": 0.955078125, | |
| "rewards/chosen": -0.023634355515241623, | |
| "rewards/margins": 0.30161744356155396, | |
| "rewards/rejected": -0.3252517879009247, | |
| "sft_loss": 0.16090597212314606, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.12947523295733201, | |
| "grad_norm": 0.734375, | |
| "learning_rate": 2.993028107541506e-05, | |
| "logits/chosen": -3.0072615146636963, | |
| "logits/rejected": -3.0072617530822754, | |
| "logps/chosen": -0.20827826857566833, | |
| "logps/rejected": -3.354121685028076, | |
| "loss": 0.2175, | |
| "odds_ratio_loss": 0.7500962018966675, | |
| "rewards/accuracies": 0.9658203125, | |
| "rewards/chosen": -0.020827826112508774, | |
| "rewards/margins": 0.314584344625473, | |
| "rewards/rejected": -0.3354122042655945, | |
| "sft_loss": 0.1424492597579956, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.13339872486512996, | |
| "grad_norm": 0.6875, | |
| "learning_rate": 2.9908960159769243e-05, | |
| "logits/chosen": -3.0059030055999756, | |
| "logits/rejected": -3.0059022903442383, | |
| "logps/chosen": -0.21530942618846893, | |
| "logps/rejected": -3.425773859024048, | |
| "loss": 0.2248, | |
| "odds_ratio_loss": 0.8348552584648132, | |
| "rewards/accuracies": 0.9599609375, | |
| "rewards/chosen": -0.021530942991375923, | |
| "rewards/margins": 0.3210464417934418, | |
| "rewards/rejected": -0.34257739782333374, | |
| "sft_loss": 0.14131930470466614, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.1373222167729279, | |
| "grad_norm": 0.5859375, | |
| "learning_rate": 2.9884808696055675e-05, | |
| "logits/chosen": -3.0060172080993652, | |
| "logits/rejected": -3.0060067176818848, | |
| "logps/chosen": -0.20461970567703247, | |
| "logps/rejected": -3.3735952377319336, | |
| "loss": 0.2132, | |
| "odds_ratio_loss": 0.7768919467926025, | |
| "rewards/accuracies": 0.96484375, | |
| "rewards/chosen": -0.020461970940232277, | |
| "rewards/margins": 0.31689757108688354, | |
| "rewards/rejected": -0.33735954761505127, | |
| "sft_loss": 0.13550838828086853, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.14124570868072583, | |
| "grad_norm": 1.0234375, | |
| "learning_rate": 2.985783126956255e-05, | |
| "logits/chosen": -3.008485794067383, | |
| "logits/rejected": -3.008484125137329, | |
| "logps/chosen": -0.22423098981380463, | |
| "logps/rejected": -3.372241258621216, | |
| "loss": 0.2359, | |
| "odds_ratio_loss": 0.8862612247467041, | |
| "rewards/accuracies": 0.9541015625, | |
| "rewards/chosen": -0.022423099726438522, | |
| "rewards/margins": 0.31480103731155396, | |
| "rewards/rejected": -0.3372241258621216, | |
| "sft_loss": 0.1473112255334854, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.14516920058852378, | |
| "grad_norm": 0.70703125, | |
| "learning_rate": 2.9828033002102624e-05, | |
| "logits/chosen": -3.004643201828003, | |
| "logits/rejected": -3.004650831222534, | |
| "logps/chosen": -0.23787802457809448, | |
| "logps/rejected": -3.5523312091827393, | |
| "loss": 0.249, | |
| "odds_ratio_loss": 0.8607804775238037, | |
| "rewards/accuracies": 0.96484375, | |
| "rewards/chosen": -0.02378780208528042, | |
| "rewards/margins": 0.3314453065395355, | |
| "rewards/rejected": -0.3552331328392029, | |
| "sft_loss": 0.16296614706516266, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.14909269249632173, | |
| "grad_norm": 2.671875, | |
| "learning_rate": 2.9795419551040836e-05, | |
| "logits/chosen": -3.001986026763916, | |
| "logits/rejected": -3.001978635787964, | |
| "logps/chosen": -0.2463943511247635, | |
| "logps/rejected": -3.445491075515747, | |
| "loss": 0.2589, | |
| "odds_ratio_loss": 0.8702651262283325, | |
| "rewards/accuracies": 0.947265625, | |
| "rewards/chosen": -0.024639436975121498, | |
| "rewards/margins": 0.3199096620082855, | |
| "rewards/rejected": -0.34454911947250366, | |
| "sft_loss": 0.17186668515205383, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.15301618440411965, | |
| "grad_norm": 0.71484375, | |
| "learning_rate": 2.9759997108220197e-05, | |
| "logits/chosen": -3.000178813934326, | |
| "logits/rejected": -3.0001754760742188, | |
| "logps/chosen": -0.2103407084941864, | |
| "logps/rejected": -3.4846935272216797, | |
| "loss": 0.2238, | |
| "odds_ratio_loss": 0.8533927202224731, | |
| "rewards/accuracies": 0.943359375, | |
| "rewards/chosen": -0.02103407122194767, | |
| "rewards/margins": 0.32743528485298157, | |
| "rewards/rejected": -0.3484693765640259, | |
| "sft_loss": 0.13847582042217255, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.1569396763119176, | |
| "grad_norm": 0.8515625, | |
| "learning_rate": 2.972177239878627e-05, | |
| "logits/chosen": -2.9975767135620117, | |
| "logits/rejected": -2.997580051422119, | |
| "logps/chosen": -0.18677090108394623, | |
| "logps/rejected": -3.4546234607696533, | |
| "loss": 0.1964, | |
| "odds_ratio_loss": 0.719777524471283, | |
| "rewards/accuracies": 0.9580078125, | |
| "rewards/chosen": -0.018677091225981712, | |
| "rewards/margins": 0.32678529620170593, | |
| "rewards/rejected": -0.3454623818397522, | |
| "sft_loss": 0.12438130378723145, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.16086316821971555, | |
| "grad_norm": 0.734375, | |
| "learning_rate": 2.968075267991032e-05, | |
| "logits/chosen": -2.9942257404327393, | |
| "logits/rejected": -2.9942216873168945, | |
| "logps/chosen": -0.19570019841194153, | |
| "logps/rejected": -3.47206711769104, | |
| "loss": 0.2066, | |
| "odds_ratio_loss": 0.8000987768173218, | |
| "rewards/accuracies": 0.95703125, | |
| "rewards/chosen": -0.019570019096136093, | |
| "rewards/margins": 0.3276367783546448, | |
| "rewards/rejected": -0.3472067415714264, | |
| "sft_loss": 0.12656894326210022, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.16478666012751347, | |
| "grad_norm": 0.578125, | |
| "learning_rate": 2.9636945739411533e-05, | |
| "logits/chosen": -2.9919114112854004, | |
| "logits/rejected": -2.9919145107269287, | |
| "logps/chosen": -0.19992399215698242, | |
| "logps/rejected": -3.5652499198913574, | |
| "loss": 0.2114, | |
| "odds_ratio_loss": 0.8476812243461609, | |
| "rewards/accuracies": 0.9580078125, | |
| "rewards/chosen": -0.019992398098111153, | |
| "rewards/margins": 0.3365326225757599, | |
| "rewards/rejected": -0.3565250039100647, | |
| "sft_loss": 0.12663182616233826, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.16871015203531142, | |
| "grad_norm": 0.84375, | |
| "learning_rate": 2.9590359894278458e-05, | |
| "logits/chosen": -2.989424705505371, | |
| "logits/rejected": -2.989422559738159, | |
| "logps/chosen": -0.18934307992458344, | |
| "logps/rejected": -3.50559663772583, | |
| "loss": 0.1983, | |
| "odds_ratio_loss": 0.784630298614502, | |
| "rewards/accuracies": 0.95703125, | |
| "rewards/chosen": -0.018934309482574463, | |
| "rewards/margins": 0.331625372171402, | |
| "rewards/rejected": -0.35055968165397644, | |
| "sft_loss": 0.11984920501708984, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.17263364394310937, | |
| "grad_norm": 0.96875, | |
| "learning_rate": 2.9541003989089956e-05, | |
| "logits/chosen": -2.990034818649292, | |
| "logits/rejected": -2.990030288696289, | |
| "logps/chosen": -0.1884869933128357, | |
| "logps/rejected": -3.5475568771362305, | |
| "loss": 0.1982, | |
| "odds_ratio_loss": 0.7193607687950134, | |
| "rewards/accuracies": 0.9609375, | |
| "rewards/chosen": -0.01884870044887066, | |
| "rewards/margins": 0.335906982421875, | |
| "rewards/rejected": -0.3547556400299072, | |
| "sft_loss": 0.1262793391942978, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.17655713585090732, | |
| "grad_norm": 0.57421875, | |
| "learning_rate": 2.9488887394336025e-05, | |
| "logits/chosen": -2.995880126953125, | |
| "logits/rejected": -2.9958770275115967, | |
| "logps/chosen": -0.19902820885181427, | |
| "logps/rejected": -3.553673028945923, | |
| "loss": 0.2075, | |
| "odds_ratio_loss": 0.8336374163627625, | |
| "rewards/accuracies": 0.9580078125, | |
| "rewards/chosen": -0.019902819767594337, | |
| "rewards/margins": 0.3354644775390625, | |
| "rewards/rejected": -0.3553672730922699, | |
| "sft_loss": 0.12417693436145782, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.18048062775870524, | |
| "grad_norm": 0.74609375, | |
| "learning_rate": 2.9434020004638757e-05, | |
| "logits/chosen": -2.991523265838623, | |
| "logits/rejected": -2.991513252258301, | |
| "logps/chosen": -0.19020922482013702, | |
| "logps/rejected": -3.644005537033081, | |
| "loss": 0.2015, | |
| "odds_ratio_loss": 0.7555862069129944, | |
| "rewards/accuracies": 0.9638671875, | |
| "rewards/chosen": -0.019020922482013702, | |
| "rewards/margins": 0.34537965059280396, | |
| "rewards/rejected": -0.36440056562423706, | |
| "sft_loss": 0.12592323124408722, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.1844041196665032, | |
| "grad_norm": 0.96484375, | |
| "learning_rate": 2.9376412236873792e-05, | |
| "logits/chosen": -2.986288547515869, | |
| "logits/rejected": -2.9862923622131348, | |
| "logps/chosen": -0.18654456734657288, | |
| "logps/rejected": -3.63527512550354, | |
| "loss": 0.1949, | |
| "odds_ratio_loss": 0.8158512711524963, | |
| "rewards/accuracies": 0.9658203125, | |
| "rewards/chosen": -0.018654460087418556, | |
| "rewards/margins": 0.3448730409145355, | |
| "rewards/rejected": -0.3635275065898895, | |
| "sft_loss": 0.1132911741733551, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.18832761157430114, | |
| "grad_norm": 0.796875, | |
| "learning_rate": 2.931607502819261e-05, | |
| "logits/chosen": -2.997182846069336, | |
| "logits/rejected": -2.9971840381622314, | |
| "logps/chosen": -0.20562656223773956, | |
| "logps/rejected": -3.5754082202911377, | |
| "loss": 0.2192, | |
| "odds_ratio_loss": 0.8831424713134766, | |
| "rewards/accuracies": 0.9541015625, | |
| "rewards/chosen": -0.020562658086419106, | |
| "rewards/margins": 0.33697813749313354, | |
| "rewards/rejected": -0.3575408458709717, | |
| "sft_loss": 0.1309242844581604, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.19225110348209906, | |
| "grad_norm": 0.8046875, | |
| "learning_rate": 2.925301983394607e-05, | |
| "logits/chosen": -2.9951722621917725, | |
| "logits/rejected": -2.9951741695404053, | |
| "logps/chosen": -0.19311097264289856, | |
| "logps/rejected": -3.7165184020996094, | |
| "loss": 0.2027, | |
| "odds_ratio_loss": 0.7433779239654541, | |
| "rewards/accuracies": 0.9638671875, | |
| "rewards/chosen": -0.019311096519231796, | |
| "rewards/margins": 0.3523406982421875, | |
| "rewards/rejected": -0.371651828289032, | |
| "sft_loss": 0.12833088636398315, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.196174595389897, | |
| "grad_norm": 0.859375, | |
| "learning_rate": 2.9187258625509518e-05, | |
| "logits/chosen": -2.9987900257110596, | |
| "logits/rejected": -2.998793601989746, | |
| "logps/chosen": -0.2426375150680542, | |
| "logps/rejected": -3.5826642513275146, | |
| "loss": 0.2549, | |
| "odds_ratio_loss": 0.9904361963272095, | |
| "rewards/accuracies": 0.9501953125, | |
| "rewards/chosen": -0.02426375262439251, | |
| "rewards/margins": 0.33400267362594604, | |
| "rewards/rejected": -0.3582664132118225, | |
| "sft_loss": 0.1558777540922165, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 254, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 10, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.909585740421202e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |