Theoreticallyhugo's picture
Training in progress, epoch 1, checkpoint
611a6d2 verified
raw
history blame
2.23 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 41,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 1.0,
"eval_B-Claim": {
"f1-score": 0.0,
"precision": 0.0,
"recall": 0.0,
"support": 284.0
},
"eval_B-MajorClaim": {
"f1-score": 0.0,
"precision": 0.0,
"recall": 0.0,
"support": 141.0
},
"eval_B-Premise": {
"f1-score": 0.0028169014084507044,
"precision": 0.5,
"recall": 0.0014124293785310734,
"support": 708.0
},
"eval_I-Claim": {
"f1-score": 0.2423290994719566,
"precision": 0.28976109215017065,
"recall": 0.20824135393671817,
"support": 4077.0
},
"eval_I-MajorClaim": {
"f1-score": 0.30434782608695654,
"precision": 0.5460358056265985,
"recall": 0.21096837944664032,
"support": 2024.0
},
"eval_I-Premise": {
"f1-score": 0.8170132059567293,
"precision": 0.7161945812807882,
"recall": 0.9508665794637018,
"support": 12232.0
},
"eval_O": {
"f1-score": 0.8384247714048214,
"precision": 0.8602345415778252,
"recall": 0.8176935549250102,
"support": 9868.0
},
"eval_accuracy": 0.7151087475284653,
"eval_loss": 0.7737340927124023,
"eval_macro avg": {
"f1-score": 0.31499025776127354,
"precision": 0.41603228866219755,
"recall": 0.31274032816437164,
"support": 29334.0
},
"eval_runtime": 1.417,
"eval_samples_per_second": 56.458,
"eval_steps_per_second": 7.057,
"eval_weighted avg": {
"f1-score": 0.6774818055577092,
"precision": 0.6780466018204494,
"recall": 0.7151087475284653,
"support": 29334.0
},
"step": 41
}
],
"logging_steps": 500,
"max_steps": 205,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"total_flos": 143790812718000.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}