| { | |
| "best_global_step": 21000, | |
| "best_metric": 0.0023173103109002113, | |
| "best_model_checkpoint": "./t5gemma-finetuned_full_dataset_small\\checkpoint-21000", | |
| "epoch": 0.40385634733569, | |
| "eval_steps": 7000, | |
| "global_step": 21000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03846250927006571, | |
| "grad_norm": 5.125, | |
| "learning_rate": 1.992715874723852e-05, | |
| "loss": 0.049, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.07692501854013142, | |
| "grad_norm": 1.0078125, | |
| "learning_rate": 1.97095516230889e-05, | |
| "loss": 0.0128, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.11538752781019714, | |
| "grad_norm": 3.0, | |
| "learning_rate": 1.9350351933080317e-05, | |
| "loss": 0.0082, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.13461878244522998, | |
| "eval_loss": 0.00584625406190753, | |
| "eval_runtime": 9078.4693, | |
| "eval_samples_per_second": 22.911, | |
| "eval_steps_per_second": 22.911, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.15385003708026285, | |
| "grad_norm": 0.59375, | |
| "learning_rate": 1.885479781876022e-05, | |
| "loss": 0.0061, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.19231254635032854, | |
| "grad_norm": 1.125, | |
| "learning_rate": 1.823011585261026e-05, | |
| "loss": 0.0051, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.23077505562039427, | |
| "grad_norm": 0.369140625, | |
| "learning_rate": 1.748541565429842e-05, | |
| "loss": 0.0044, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.26923756489045997, | |
| "grad_norm": 1.1171875, | |
| "learning_rate": 1.6631557046808075e-05, | |
| "loss": 0.0039, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.26923756489045997, | |
| "eval_loss": 0.003004432423040271, | |
| "eval_runtime": 9508.738, | |
| "eval_samples_per_second": 21.874, | |
| "eval_steps_per_second": 21.874, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.3077000741605257, | |
| "grad_norm": 0.25390625, | |
| "learning_rate": 1.568099168968113e-05, | |
| "loss": 0.0037, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.3461625834305914, | |
| "grad_norm": 0.1611328125, | |
| "learning_rate": 1.464758149880794e-05, | |
| "loss": 0.0034, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.3846250927006571, | |
| "grad_norm": 1.125, | |
| "learning_rate": 1.3546396500713988e-05, | |
| "loss": 0.0032, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.40385634733569, | |
| "eval_loss": 0.0023173103109002113, | |
| "eval_runtime": 9393.4852, | |
| "eval_samples_per_second": 22.142, | |
| "eval_steps_per_second": 22.142, | |
| "step": 21000 | |
| } | |
| ], | |
| "logging_steps": 2000, | |
| "max_steps": 51999, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 7000, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 2, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.393692126347264e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |