| { | |
| "best_metric": 0.0194552019238472, | |
| "best_model_checkpoint": "emotional-impact-longformer/checkpoint-2660", | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 2660, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03759398496240601, | |
| "grad_norm": 3.7252769470214844, | |
| "learning_rate": 4.229323308270677e-06, | |
| "loss": 0.2706, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.07518796992481203, | |
| "grad_norm": 1.5272068977355957, | |
| "learning_rate": 8.458646616541353e-06, | |
| "loss": 0.0476, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11278195488721804, | |
| "grad_norm": 0.4288886785507202, | |
| "learning_rate": 1.2687969924812032e-05, | |
| "loss": 0.0315, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.15037593984962405, | |
| "grad_norm": 0.7325922250747681, | |
| "learning_rate": 1.6917293233082707e-05, | |
| "loss": 0.0287, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.18796992481203006, | |
| "grad_norm": 1.2668706178665161, | |
| "learning_rate": 2.1146616541353385e-05, | |
| "loss": 0.0297, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.22556390977443608, | |
| "grad_norm": 0.8078019618988037, | |
| "learning_rate": 2.5375939849624064e-05, | |
| "loss": 0.0293, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2631578947368421, | |
| "grad_norm": 2.1185989379882812, | |
| "learning_rate": 2.9605263157894742e-05, | |
| "loss": 0.0296, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.3007518796992481, | |
| "grad_norm": 0.4720282554626465, | |
| "learning_rate": 3.3665413533834594e-05, | |
| "loss": 0.03, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3383458646616541, | |
| "grad_norm": 0.7764217853546143, | |
| "learning_rate": 3.789473684210526e-05, | |
| "loss": 0.0278, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.37593984962406013, | |
| "grad_norm": 0.7980750799179077, | |
| "learning_rate": 4.2124060150375944e-05, | |
| "loss": 0.0291, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.41353383458646614, | |
| "grad_norm": 0.26071611046791077, | |
| "learning_rate": 4.484962406015038e-05, | |
| "loss": 0.0262, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.45112781954887216, | |
| "grad_norm": 0.589393675327301, | |
| "learning_rate": 4.43796992481203e-05, | |
| "loss": 0.0255, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.48872180451127817, | |
| "grad_norm": 0.5992129445075989, | |
| "learning_rate": 4.3909774436090224e-05, | |
| "loss": 0.0253, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.5263157894736842, | |
| "grad_norm": 0.5669745206832886, | |
| "learning_rate": 4.3439849624060155e-05, | |
| "loss": 0.0249, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5639097744360902, | |
| "grad_norm": 1.6321195363998413, | |
| "learning_rate": 4.296992481203008e-05, | |
| "loss": 0.0253, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.6015037593984962, | |
| "grad_norm": 0.1369141936302185, | |
| "learning_rate": 4.25e-05, | |
| "loss": 0.0256, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6390977443609023, | |
| "grad_norm": 0.2838519811630249, | |
| "learning_rate": 4.203007518796993e-05, | |
| "loss": 0.0246, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.6766917293233082, | |
| "grad_norm": 0.626377284526825, | |
| "learning_rate": 4.156015037593985e-05, | |
| "loss": 0.0241, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.5266003012657166, | |
| "learning_rate": 4.1090225563909776e-05, | |
| "loss": 0.0259, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.7518796992481203, | |
| "grad_norm": 0.3502209484577179, | |
| "learning_rate": 4.06203007518797e-05, | |
| "loss": 0.025, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7894736842105263, | |
| "grad_norm": 0.9167695641517639, | |
| "learning_rate": 4.0150375939849624e-05, | |
| "loss": 0.0236, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.8270676691729323, | |
| "grad_norm": 0.648853600025177, | |
| "learning_rate": 3.9680451127819555e-05, | |
| "loss": 0.0236, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8646616541353384, | |
| "grad_norm": 0.2568742334842682, | |
| "learning_rate": 3.921052631578948e-05, | |
| "loss": 0.0236, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.9022556390977443, | |
| "grad_norm": 1.0824393033981323, | |
| "learning_rate": 3.87406015037594e-05, | |
| "loss": 0.0237, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9398496240601504, | |
| "grad_norm": 0.20589441061019897, | |
| "learning_rate": 3.827067669172932e-05, | |
| "loss": 0.0239, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.9774436090225563, | |
| "grad_norm": 0.19859573245048523, | |
| "learning_rate": 3.780075187969925e-05, | |
| "loss": 0.0234, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_explained_variance": 0.14621615409851074, | |
| "eval_loss": 0.020824678242206573, | |
| "eval_mae": 0.10323476046323776, | |
| "eval_mse": 0.02082563191652298, | |
| "eval_r2": 0.09602558612823486, | |
| "eval_rmse": 0.14431088634099293, | |
| "eval_runtime": 333.5476, | |
| "eval_samples_per_second": 63.784, | |
| "eval_steps_per_second": 0.998, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 1.0150375939849625, | |
| "grad_norm": 0.23966658115386963, | |
| "learning_rate": 3.7330827067669176e-05, | |
| "loss": 0.0217, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 1.0526315789473684, | |
| "grad_norm": 0.3200964033603668, | |
| "learning_rate": 3.68609022556391e-05, | |
| "loss": 0.0228, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0902255639097744, | |
| "grad_norm": 1.072204351425171, | |
| "learning_rate": 3.6390977443609025e-05, | |
| "loss": 0.0228, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 1.1278195488721805, | |
| "grad_norm": 0.8375339508056641, | |
| "learning_rate": 3.592105263157895e-05, | |
| "loss": 0.0228, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1654135338345863, | |
| "grad_norm": 0.3468254804611206, | |
| "learning_rate": 3.5451127819548873e-05, | |
| "loss": 0.0247, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 1.2030075187969924, | |
| "grad_norm": 1.3668121099472046, | |
| "learning_rate": 3.49812030075188e-05, | |
| "loss": 0.0243, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2406015037593985, | |
| "grad_norm": 0.5203770995140076, | |
| "learning_rate": 3.451127819548872e-05, | |
| "loss": 0.023, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 1.2781954887218046, | |
| "grad_norm": 0.22468727827072144, | |
| "learning_rate": 3.404135338345865e-05, | |
| "loss": 0.0226, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.3157894736842106, | |
| "grad_norm": 0.3588065207004547, | |
| "learning_rate": 3.357142857142858e-05, | |
| "loss": 0.0231, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 1.3533834586466165, | |
| "grad_norm": 0.49149370193481445, | |
| "learning_rate": 3.3101503759398495e-05, | |
| "loss": 0.0223, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3909774436090225, | |
| "grad_norm": 0.2891516089439392, | |
| "learning_rate": 3.263157894736842e-05, | |
| "loss": 0.0227, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 0.15670672059059143, | |
| "learning_rate": 3.216165413533835e-05, | |
| "loss": 0.0213, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4661654135338344, | |
| "grad_norm": 0.12459678947925568, | |
| "learning_rate": 3.1691729323308274e-05, | |
| "loss": 0.0231, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 1.5037593984962405, | |
| "grad_norm": 0.8225300908088684, | |
| "learning_rate": 3.12218045112782e-05, | |
| "loss": 0.0229, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5413533834586466, | |
| "grad_norm": 0.3865518569946289, | |
| "learning_rate": 3.075187969924812e-05, | |
| "loss": 0.022, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 1.5789473684210527, | |
| "grad_norm": 0.8772854804992676, | |
| "learning_rate": 3.028195488721805e-05, | |
| "loss": 0.0229, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.6165413533834587, | |
| "grad_norm": 0.3904133141040802, | |
| "learning_rate": 2.981203007518797e-05, | |
| "loss": 0.0224, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 1.6541353383458648, | |
| "grad_norm": 0.7952344417572021, | |
| "learning_rate": 2.9342105263157895e-05, | |
| "loss": 0.0216, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.6917293233082706, | |
| "grad_norm": 0.2614936828613281, | |
| "learning_rate": 2.887218045112782e-05, | |
| "loss": 0.0219, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 1.7293233082706767, | |
| "grad_norm": 0.3101685643196106, | |
| "learning_rate": 2.8402255639097747e-05, | |
| "loss": 0.0213, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7669172932330826, | |
| "grad_norm": 0.2712872326374054, | |
| "learning_rate": 2.793233082706767e-05, | |
| "loss": 0.0221, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 1.8045112781954886, | |
| "grad_norm": 1.1345945596694946, | |
| "learning_rate": 2.7462406015037596e-05, | |
| "loss": 0.0231, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8421052631578947, | |
| "grad_norm": 0.2056145817041397, | |
| "learning_rate": 2.699248120300752e-05, | |
| "loss": 0.0229, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 1.8796992481203008, | |
| "grad_norm": 0.24201074242591858, | |
| "learning_rate": 2.6522556390977448e-05, | |
| "loss": 0.021, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.9172932330827068, | |
| "grad_norm": 0.6122403740882874, | |
| "learning_rate": 2.6052631578947372e-05, | |
| "loss": 0.0217, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 1.954887218045113, | |
| "grad_norm": 0.7876213192939758, | |
| "learning_rate": 2.5582706766917296e-05, | |
| "loss": 0.0227, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9924812030075187, | |
| "grad_norm": 0.605512797832489, | |
| "learning_rate": 2.5112781954887217e-05, | |
| "loss": 0.0223, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_explained_variance": 0.14240312576293945, | |
| "eval_loss": 0.019838958978652954, | |
| "eval_mae": 0.10281991213560104, | |
| "eval_mse": 0.019839677959680557, | |
| "eval_r2": 0.1388227343559265, | |
| "eval_rmse": 0.14085339172231728, | |
| "eval_runtime": 332.693, | |
| "eval_samples_per_second": 63.948, | |
| "eval_steps_per_second": 1.001, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.030075187969925, | |
| "grad_norm": 0.347207635641098, | |
| "learning_rate": 2.4642857142857148e-05, | |
| "loss": 0.0203, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.0676691729323307, | |
| "grad_norm": 0.178135484457016, | |
| "learning_rate": 2.417293233082707e-05, | |
| "loss": 0.0211, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 2.1052631578947367, | |
| "grad_norm": 0.5986231565475464, | |
| "learning_rate": 2.3703007518796993e-05, | |
| "loss": 0.0214, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.142857142857143, | |
| "grad_norm": 0.6835535764694214, | |
| "learning_rate": 2.3233082706766917e-05, | |
| "loss": 0.0216, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 2.180451127819549, | |
| "grad_norm": 0.6227976083755493, | |
| "learning_rate": 2.2763157894736845e-05, | |
| "loss": 0.021, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.218045112781955, | |
| "grad_norm": 0.17263726890087128, | |
| "learning_rate": 2.2293233082706766e-05, | |
| "loss": 0.0209, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 2.255639097744361, | |
| "grad_norm": 0.3358646035194397, | |
| "learning_rate": 2.1823308270676693e-05, | |
| "loss": 0.021, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.293233082706767, | |
| "grad_norm": 0.5184768438339233, | |
| "learning_rate": 2.1353383458646618e-05, | |
| "loss": 0.0211, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 2.3308270676691727, | |
| "grad_norm": 0.18221434950828552, | |
| "learning_rate": 2.0883458646616542e-05, | |
| "loss": 0.0203, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.3684210526315788, | |
| "grad_norm": 0.8772348761558533, | |
| "learning_rate": 2.0413533834586466e-05, | |
| "loss": 0.0208, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 2.406015037593985, | |
| "grad_norm": 0.47695019841194153, | |
| "learning_rate": 1.9943609022556394e-05, | |
| "loss": 0.0202, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.443609022556391, | |
| "grad_norm": 0.13762958347797394, | |
| "learning_rate": 1.9473684210526315e-05, | |
| "loss": 0.021, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 2.481203007518797, | |
| "grad_norm": 0.8466396927833557, | |
| "learning_rate": 1.9003759398496242e-05, | |
| "loss": 0.0209, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.518796992481203, | |
| "grad_norm": 1.1149111986160278, | |
| "learning_rate": 1.8533834586466166e-05, | |
| "loss": 0.0207, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 2.556390977443609, | |
| "grad_norm": 0.7794457674026489, | |
| "learning_rate": 1.806390977443609e-05, | |
| "loss": 0.0209, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.593984962406015, | |
| "grad_norm": 0.6543653607368469, | |
| "learning_rate": 1.7593984962406015e-05, | |
| "loss": 0.022, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 2.6315789473684212, | |
| "grad_norm": 0.3373096287250519, | |
| "learning_rate": 1.7124060150375943e-05, | |
| "loss": 0.0202, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.6691729323308273, | |
| "grad_norm": 0.8596007227897644, | |
| "learning_rate": 1.6654135338345863e-05, | |
| "loss": 0.0199, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 2.706766917293233, | |
| "grad_norm": 0.14692935347557068, | |
| "learning_rate": 1.618421052631579e-05, | |
| "loss": 0.0205, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.744360902255639, | |
| "grad_norm": 0.4790925979614258, | |
| "learning_rate": 1.5714285714285715e-05, | |
| "loss": 0.0211, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 2.781954887218045, | |
| "grad_norm": 0.17605140805244446, | |
| "learning_rate": 1.5244360902255641e-05, | |
| "loss": 0.0204, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.819548872180451, | |
| "grad_norm": 0.5374957919120789, | |
| "learning_rate": 1.4774436090225564e-05, | |
| "loss": 0.0213, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 2.857142857142857, | |
| "grad_norm": 0.16991238296031952, | |
| "learning_rate": 1.430451127819549e-05, | |
| "loss": 0.0213, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.8947368421052633, | |
| "grad_norm": 0.4746359884738922, | |
| "learning_rate": 1.3834586466165414e-05, | |
| "loss": 0.0214, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 2.932330827067669, | |
| "grad_norm": 0.8064231872558594, | |
| "learning_rate": 1.336466165413534e-05, | |
| "loss": 0.02, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.969924812030075, | |
| "grad_norm": 0.5129567384719849, | |
| "learning_rate": 1.2894736842105262e-05, | |
| "loss": 0.0201, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_explained_variance": 0.1511455774307251, | |
| "eval_loss": 0.019669942557811737, | |
| "eval_mae": 0.10461708903312683, | |
| "eval_mse": 0.019670462235808372, | |
| "eval_r2": 0.1461678147315979, | |
| "eval_rmse": 0.14025142507585572, | |
| "eval_runtime": 332.723, | |
| "eval_samples_per_second": 63.942, | |
| "eval_steps_per_second": 1.001, | |
| "step": 1995 | |
| }, | |
| { | |
| "epoch": 3.007518796992481, | |
| "grad_norm": 0.18976610898971558, | |
| "learning_rate": 1.242481203007519e-05, | |
| "loss": 0.0198, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.045112781954887, | |
| "grad_norm": 0.3412296175956726, | |
| "learning_rate": 1.1954887218045113e-05, | |
| "loss": 0.0194, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 3.082706766917293, | |
| "grad_norm": 0.19771774113178253, | |
| "learning_rate": 1.1484962406015039e-05, | |
| "loss": 0.02, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.1203007518796992, | |
| "grad_norm": 0.17749685049057007, | |
| "learning_rate": 1.1015037593984963e-05, | |
| "loss": 0.021, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 3.1578947368421053, | |
| "grad_norm": 0.9430246949195862, | |
| "learning_rate": 1.0545112781954887e-05, | |
| "loss": 0.0206, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1954887218045114, | |
| "grad_norm": 0.2554938495159149, | |
| "learning_rate": 1.0075187969924811e-05, | |
| "loss": 0.0189, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 3.2330827067669174, | |
| "grad_norm": 0.1475767195224762, | |
| "learning_rate": 9.605263157894737e-06, | |
| "loss": 0.0193, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.2706766917293235, | |
| "grad_norm": 0.2444714456796646, | |
| "learning_rate": 9.135338345864661e-06, | |
| "loss": 0.02, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 3.308270676691729, | |
| "grad_norm": 0.4087791442871094, | |
| "learning_rate": 8.665413533834586e-06, | |
| "loss": 0.0185, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.345864661654135, | |
| "grad_norm": 0.33857977390289307, | |
| "learning_rate": 8.195488721804512e-06, | |
| "loss": 0.0194, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 3.3834586466165413, | |
| "grad_norm": 0.1465982049703598, | |
| "learning_rate": 7.725563909774436e-06, | |
| "loss": 0.0198, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.4210526315789473, | |
| "grad_norm": 0.4856254756450653, | |
| "learning_rate": 7.255639097744361e-06, | |
| "loss": 0.0196, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 3.4586466165413534, | |
| "grad_norm": 0.38655972480773926, | |
| "learning_rate": 6.785714285714286e-06, | |
| "loss": 0.0194, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.4962406015037595, | |
| "grad_norm": 0.4781357944011688, | |
| "learning_rate": 6.31578947368421e-06, | |
| "loss": 0.0197, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 3.5338345864661656, | |
| "grad_norm": 0.22151640057563782, | |
| "learning_rate": 5.845864661654135e-06, | |
| "loss": 0.0199, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.571428571428571, | |
| "grad_norm": 0.443062424659729, | |
| "learning_rate": 5.3759398496240605e-06, | |
| "loss": 0.0201, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 3.6090225563909772, | |
| "grad_norm": 0.27976560592651367, | |
| "learning_rate": 4.906015037593985e-06, | |
| "loss": 0.0192, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6466165413533833, | |
| "grad_norm": 0.19204473495483398, | |
| "learning_rate": 4.43609022556391e-06, | |
| "loss": 0.0193, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 3.6842105263157894, | |
| "grad_norm": 0.38657328486442566, | |
| "learning_rate": 3.966165413533835e-06, | |
| "loss": 0.02, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.7218045112781954, | |
| "grad_norm": 0.4538291096687317, | |
| "learning_rate": 3.4962406015037596e-06, | |
| "loss": 0.0201, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 3.7593984962406015, | |
| "grad_norm": 0.5408753156661987, | |
| "learning_rate": 3.0263157894736843e-06, | |
| "loss": 0.0197, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.7969924812030076, | |
| "grad_norm": 0.18860125541687012, | |
| "learning_rate": 2.556390977443609e-06, | |
| "loss": 0.02, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 3.8345864661654137, | |
| "grad_norm": 0.2737857699394226, | |
| "learning_rate": 2.086466165413534e-06, | |
| "loss": 0.0188, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8721804511278197, | |
| "grad_norm": 0.1512097716331482, | |
| "learning_rate": 1.6165413533834587e-06, | |
| "loss": 0.0194, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 3.909774436090226, | |
| "grad_norm": 0.1916394680738449, | |
| "learning_rate": 1.1466165413533836e-06, | |
| "loss": 0.0199, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9473684210526314, | |
| "grad_norm": 0.29134517908096313, | |
| "learning_rate": 6.766917293233083e-07, | |
| "loss": 0.0184, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 3.9849624060150375, | |
| "grad_norm": 0.5952789783477783, | |
| "learning_rate": 2.067669172932331e-07, | |
| "loss": 0.0196, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_explained_variance": 0.16475754976272583, | |
| "eval_loss": 0.0194552019238472, | |
| "eval_mae": 0.10345309227705002, | |
| "eval_mse": 0.01945589855313301, | |
| "eval_r2": 0.15548139810562134, | |
| "eval_rmse": 0.13948440254427377, | |
| "eval_runtime": 334.1338, | |
| "eval_samples_per_second": 63.672, | |
| "eval_steps_per_second": 0.997, | |
| "step": 2660 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 2660, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 5, | |
| "early_stopping_threshold": 0.01 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 3 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.945729038555546e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |