abarbosa commited on
Commit
0465fbe
·
verified ·
1 Parent(s): ab54230

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: meta-llama/Llama-3.1-8B
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: peft
15
+ model-index:
16
+ - name: llama31_8b-balanced-C1
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1
28
+ type: F1
29
+ value: 0.4357443349423437
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.6043890865954924
33
+ ---
34
+ # Model ID: llama31_8b-balanced-C1
35
+ ## Results
36
+ | | test_data |
37
+ |:--------------|------------:|
38
+ | eval_accuracy | 0.65942 |
39
+ | eval_RMSE | 25.9319 |
40
+ | eval_QWK | 0.604389 |
41
+ | eval_Macro_F1 | 0.435744 |
42
+ | eval_HDIV | 0.00724638 |
43
+
adapter_config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "meta-llama/Llama-3.1-8B",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 16,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.05,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": [
21
+ "classifier",
22
+ "score"
23
+ ],
24
+ "peft_type": "LORA",
25
+ "r": 8,
26
+ "rank_pattern": {},
27
+ "revision": null,
28
+ "target_modules": [
29
+ "o_proj",
30
+ "gate_proj",
31
+ "v_proj",
32
+ "down_proj",
33
+ "up_proj",
34
+ "q_proj",
35
+ "k_proj"
36
+ ],
37
+ "task_type": "SEQ_CLS",
38
+ "use_dora": false,
39
+ "use_rslora": false
40
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6397979039118a3634b35deb1598ac9506c0cc7ceca748624eca7a92dbeec49
3
+ size 83994544
run_experiment.log ADDED
@@ -0,0 +1,1147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-03-16 13:15:47,890][__main__][INFO] - cache_dir: /media/data/tmp
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ post_training_results:
12
+ model_path: /workspace/jbcs2025/outputs/2025-03-16/08-36-36
13
+ experiments:
14
+ model:
15
+ name: meta-llama/Llama-3.1-8B
16
+ type: llama31_classification_lora
17
+ num_labels: 6
18
+ output_dir: ./results/llama31_8b-balanced/C1
19
+ logging_dir: ./logs/llama31_8b-balanced/C1
20
+ best_model_dir: ./results/llama31_8b-balanced/C1/best_model
21
+ lora_r: 8
22
+ lora_dropout: 0.05
23
+ lora_alpha: 16
24
+ lora_target_modules: all-linear
25
+ dataset:
26
+ grade_index: 0
27
+ training_id: llama31_8b-balanced-C1
28
+ training_params:
29
+ weight_decay: 0.01
30
+ warmup_ratio: 0.1
31
+ learning_rate: 5.0e-05
32
+ train_batch_size: 1
33
+ eval_batch_size: 2
34
+ gradient_accumulation_steps: 16
35
+ gradient_checkpointing: false
36
+
37
+ [2025-03-16 13:15:47,891][__main__][INFO] - Starting the Fine Tuning training process.
38
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /media/data/tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer.json
39
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at None
40
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
41
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /media/data/tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/special_tokens_map.json
42
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /media/data/tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/tokenizer_config.json
43
+ [2025-03-16 13:15:51,975][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
44
+ [2025-03-16 13:15:52,167][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
45
+ [2025-03-16 13:15:52,172][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False
46
+ [2025-03-16 13:15:52,783][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /media/data/tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
47
+ [2025-03-16 13:15:52,786][transformers.configuration_utils][INFO] - Model config LlamaConfig {
48
+ "_name_or_path": "meta-llama/Llama-3.1-8B",
49
+ "architectures": [
50
+ "LlamaForCausalLM"
51
+ ],
52
+ "attention_bias": false,
53
+ "attention_dropout": 0.0,
54
+ "bos_token_id": 128000,
55
+ "eos_token_id": 128001,
56
+ "head_dim": 128,
57
+ "hidden_act": "silu",
58
+ "hidden_size": 4096,
59
+ "id2label": {
60
+ "0": 0,
61
+ "1": 40,
62
+ "2": 80,
63
+ "3": 120,
64
+ "4": 160,
65
+ "5": 200
66
+ },
67
+ "initializer_range": 0.02,
68
+ "intermediate_size": 14336,
69
+ "label2id": {
70
+ "0": 0,
71
+ "40": 1,
72
+ "80": 2,
73
+ "120": 3,
74
+ "160": 4,
75
+ "200": 5
76
+ },
77
+ "max_position_embeddings": 131072,
78
+ "mlp_bias": false,
79
+ "model_type": "llama",
80
+ "num_attention_heads": 32,
81
+ "num_hidden_layers": 32,
82
+ "num_key_value_heads": 8,
83
+ "pretraining_tp": 1,
84
+ "rms_norm_eps": 1e-05,
85
+ "rope_scaling": {
86
+ "factor": 8.0,
87
+ "high_freq_factor": 4.0,
88
+ "low_freq_factor": 1.0,
89
+ "original_max_position_embeddings": 8192,
90
+ "rope_type": "llama3"
91
+ },
92
+ "rope_theta": 500000.0,
93
+ "tie_word_embeddings": false,
94
+ "torch_dtype": "bfloat16",
95
+ "transformers_version": "4.49.0",
96
+ "use_cache": true,
97
+ "vocab_size": 128256
98
+ }
99
+
100
+ [2025-03-16 13:15:52,800][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /media/data/tmp/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/model.safetensors.index.json
101
+ [2025-03-16 13:15:52,801][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
102
+ [2025-03-16 13:15:52,801][transformers.modeling_utils][INFO] - Instantiating LlamaForSequenceClassification model under default dtype torch.bfloat16.
103
+ [2025-03-16 13:15:56,600][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at meta-llama/Llama-3.1-8B were not used when initializing LlamaForSequenceClassification: {'lm_head.weight'}
104
+ - This IS expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
105
+ - This IS NOT expected if you are initializing LlamaForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
106
+ [2025-03-16 13:15:56,601][transformers.modeling_utils][WARNING] - Some weights of LlamaForSequenceClassification were not initialized from the model checkpoint at meta-llama/Llama-3.1-8B and are newly initialized: ['score.weight']
107
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
108
+ [2025-03-16 13:15:56,832][__main__][INFO] - None
109
+ [2025-03-16 13:15:56,833][transformers.training_args][INFO] - PyTorch: setting up devices
110
+ [2025-03-16 13:15:56,871][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
111
+ [2025-03-16 13:15:56,880][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
112
+ [2025-03-16 13:15:56,893][transformers.trainer][INFO] - Using auto half precision backend
113
+ [2025-03-16 13:15:56,893][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
114
+ [2025-03-16 13:15:56,912][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
115
+ [2025-03-16 13:15:56,925][transformers.trainer][INFO] -
116
+ ***** Running Evaluation *****
117
+ [2025-03-16 13:15:56,925][transformers.trainer][INFO] - Num examples = 132
118
+ [2025-03-16 13:15:56,925][transformers.trainer][INFO] - Batch size = 2
119
+ [2025-03-16 13:16:58,143][transformers][INFO] - {'accuracy': 0.29545454545454547, 'RMSE': 46.056618647183825, 'QWK': 0.03250125649187485, 'HDIV': 0.015151515151515138, 'Macro F1': 0.07692307692307693}
120
+ [2025-03-16 13:16:58,145][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
121
+ [2025-03-16 13:16:58,289][transformers.trainer][INFO] - The following columns in the training set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
122
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - ***** Running training *****
123
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Num examples = 500
124
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Num Epochs = 20
125
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Instantaneous batch size per device = 1
126
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
127
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Gradient Accumulation steps = 16
128
+ [2025-03-16 13:16:58,325][transformers.trainer][INFO] - Total optimization steps = 620
129
+ [2025-03-16 13:16:58,328][transformers.trainer][INFO] - Number of trainable parameters = 20,996,096
130
+ [2025-03-16 13:30:53,496][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
131
+ [2025-03-16 13:30:53,497][transformers.trainer][INFO] -
132
+ ***** Running Evaluation *****
133
+ [2025-03-16 13:30:53,497][transformers.trainer][INFO] - Num examples = 132
134
+ [2025-03-16 13:30:53,498][transformers.trainer][INFO] - Batch size = 2
135
+ [2025-03-16 13:31:57,120][transformers][INFO] - {'accuracy': 0.3939393939393939, 'RMSE': 41.046905910780715, 'QWK': 0.07965489566613149, 'HDIV': 0.007575757575757569, 'Macro F1': 0.1734968771164121}
136
+ [2025-03-16 13:31:57,120][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
137
+ [2025-03-16 13:31:57,122][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-32
138
+ [2025-03-16 13:31:57,604][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
139
+ [2025-03-16 13:31:57,606][transformers.configuration_utils][INFO] - Model config LlamaConfig {
140
+ "architectures": [
141
+ "LlamaForCausalLM"
142
+ ],
143
+ "attention_bias": false,
144
+ "attention_dropout": 0.0,
145
+ "bos_token_id": 128000,
146
+ "eos_token_id": 128001,
147
+ "head_dim": 128,
148
+ "hidden_act": "silu",
149
+ "hidden_size": 4096,
150
+ "initializer_range": 0.02,
151
+ "intermediate_size": 14336,
152
+ "max_position_embeddings": 131072,
153
+ "mlp_bias": false,
154
+ "model_type": "llama",
155
+ "num_attention_heads": 32,
156
+ "num_hidden_layers": 32,
157
+ "num_key_value_heads": 8,
158
+ "pretraining_tp": 1,
159
+ "rms_norm_eps": 1e-05,
160
+ "rope_scaling": {
161
+ "factor": 8.0,
162
+ "high_freq_factor": 4.0,
163
+ "low_freq_factor": 1.0,
164
+ "original_max_position_embeddings": 8192,
165
+ "rope_type": "llama3"
166
+ },
167
+ "rope_theta": 500000.0,
168
+ "tie_word_embeddings": false,
169
+ "torch_dtype": "bfloat16",
170
+ "transformers_version": "4.49.0",
171
+ "use_cache": true,
172
+ "vocab_size": 128256
173
+ }
174
+
175
+ [2025-03-16 13:45:58,066][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
176
+ [2025-03-16 13:45:58,067][transformers.trainer][INFO] -
177
+ ***** Running Evaluation *****
178
+ [2025-03-16 13:45:58,067][transformers.trainer][INFO] - Num examples = 132
179
+ [2025-03-16 13:45:58,068][transformers.trainer][INFO] - Batch size = 2
180
+ [2025-03-16 13:47:01,924][transformers][INFO] - {'accuracy': 0.4696969696969697, 'RMSE': 33.574882386580704, 'QWK': 0.25437317784256563, 'HDIV': 0.007575757575757569, 'Macro F1': 0.19694989106753813}
181
+ [2025-03-16 13:47:01,924][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
182
+ [2025-03-16 13:47:01,926][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-64
183
+ [2025-03-16 13:47:02,387][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
184
+ [2025-03-16 13:47:02,390][transformers.configuration_utils][INFO] - Model config LlamaConfig {
185
+ "architectures": [
186
+ "LlamaForCausalLM"
187
+ ],
188
+ "attention_bias": false,
189
+ "attention_dropout": 0.0,
190
+ "bos_token_id": 128000,
191
+ "eos_token_id": 128001,
192
+ "head_dim": 128,
193
+ "hidden_act": "silu",
194
+ "hidden_size": 4096,
195
+ "initializer_range": 0.02,
196
+ "intermediate_size": 14336,
197
+ "max_position_embeddings": 131072,
198
+ "mlp_bias": false,
199
+ "model_type": "llama",
200
+ "num_attention_heads": 32,
201
+ "num_hidden_layers": 32,
202
+ "num_key_value_heads": 8,
203
+ "pretraining_tp": 1,
204
+ "rms_norm_eps": 1e-05,
205
+ "rope_scaling": {
206
+ "factor": 8.0,
207
+ "high_freq_factor": 4.0,
208
+ "low_freq_factor": 1.0,
209
+ "original_max_position_embeddings": 8192,
210
+ "rope_type": "llama3"
211
+ },
212
+ "rope_theta": 500000.0,
213
+ "tie_word_embeddings": false,
214
+ "torch_dtype": "bfloat16",
215
+ "transformers_version": "4.49.0",
216
+ "use_cache": true,
217
+ "vocab_size": 128256
218
+ }
219
+
220
+ [2025-03-16 13:47:02,559][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-32] due to args.save_total_limit
221
+ [2025-03-16 14:01:02,989][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
222
+ [2025-03-16 14:01:02,991][transformers.trainer][INFO] -
223
+ ***** Running Evaluation *****
224
+ [2025-03-16 14:01:02,991][transformers.trainer][INFO] - Num examples = 132
225
+ [2025-03-16 14:01:02,991][transformers.trainer][INFO] - Batch size = 2
226
+ [2025-03-16 14:02:06,865][transformers][INFO] - {'accuracy': 0.4772727272727273, 'RMSE': 32.84490643597388, 'QWK': 0.3022095509622237, 'HDIV': 0.007575757575757569, 'Macro F1': 0.2089608257095942}
227
+ [2025-03-16 14:02:06,866][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
228
+ [2025-03-16 14:02:06,867][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-96
229
+ [2025-03-16 14:02:07,358][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
230
+ [2025-03-16 14:02:07,360][transformers.configuration_utils][INFO] - Model config LlamaConfig {
231
+ "architectures": [
232
+ "LlamaForCausalLM"
233
+ ],
234
+ "attention_bias": false,
235
+ "attention_dropout": 0.0,
236
+ "bos_token_id": 128000,
237
+ "eos_token_id": 128001,
238
+ "head_dim": 128,
239
+ "hidden_act": "silu",
240
+ "hidden_size": 4096,
241
+ "initializer_range": 0.02,
242
+ "intermediate_size": 14336,
243
+ "max_position_embeddings": 131072,
244
+ "mlp_bias": false,
245
+ "model_type": "llama",
246
+ "num_attention_heads": 32,
247
+ "num_hidden_layers": 32,
248
+ "num_key_value_heads": 8,
249
+ "pretraining_tp": 1,
250
+ "rms_norm_eps": 1e-05,
251
+ "rope_scaling": {
252
+ "factor": 8.0,
253
+ "high_freq_factor": 4.0,
254
+ "low_freq_factor": 1.0,
255
+ "original_max_position_embeddings": 8192,
256
+ "rope_type": "llama3"
257
+ },
258
+ "rope_theta": 500000.0,
259
+ "tie_word_embeddings": false,
260
+ "torch_dtype": "bfloat16",
261
+ "transformers_version": "4.49.0",
262
+ "use_cache": true,
263
+ "vocab_size": 128256
264
+ }
265
+
266
+ [2025-03-16 14:02:07,530][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-64] due to args.save_total_limit
267
+ [2025-03-16 14:16:07,909][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
268
+ [2025-03-16 14:16:07,910][transformers.trainer][INFO] -
269
+ ***** Running Evaluation *****
270
+ [2025-03-16 14:16:07,910][transformers.trainer][INFO] - Num examples = 132
271
+ [2025-03-16 14:16:07,910][transformers.trainer][INFO] - Batch size = 2
272
+ [2025-03-16 14:17:11,741][transformers][INFO] - {'accuracy': 0.45454545454545453, 'RMSE': 38.92494720807615, 'QWK': 0.2276727204643325, 'HDIV': 0.007575757575757569, 'Macro F1': 0.30417862838915466}
273
+ [2025-03-16 14:17:11,741][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
274
+ [2025-03-16 14:17:11,742][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-128
275
+ [2025-03-16 14:17:12,265][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
276
+ [2025-03-16 14:17:12,267][transformers.configuration_utils][INFO] - Model config LlamaConfig {
277
+ "architectures": [
278
+ "LlamaForCausalLM"
279
+ ],
280
+ "attention_bias": false,
281
+ "attention_dropout": 0.0,
282
+ "bos_token_id": 128000,
283
+ "eos_token_id": 128001,
284
+ "head_dim": 128,
285
+ "hidden_act": "silu",
286
+ "hidden_size": 4096,
287
+ "initializer_range": 0.02,
288
+ "intermediate_size": 14336,
289
+ "max_position_embeddings": 131072,
290
+ "mlp_bias": false,
291
+ "model_type": "llama",
292
+ "num_attention_heads": 32,
293
+ "num_hidden_layers": 32,
294
+ "num_key_value_heads": 8,
295
+ "pretraining_tp": 1,
296
+ "rms_norm_eps": 1e-05,
297
+ "rope_scaling": {
298
+ "factor": 8.0,
299
+ "high_freq_factor": 4.0,
300
+ "low_freq_factor": 1.0,
301
+ "original_max_position_embeddings": 8192,
302
+ "rope_type": "llama3"
303
+ },
304
+ "rope_theta": 500000.0,
305
+ "tie_word_embeddings": false,
306
+ "torch_dtype": "bfloat16",
307
+ "transformers_version": "4.49.0",
308
+ "use_cache": true,
309
+ "vocab_size": 128256
310
+ }
311
+
312
+ [2025-03-16 14:31:12,254][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
313
+ [2025-03-16 14:31:12,256][transformers.trainer][INFO] -
314
+ ***** Running Evaluation *****
315
+ [2025-03-16 14:31:12,256][transformers.trainer][INFO] - Num examples = 132
316
+ [2025-03-16 14:31:12,256][transformers.trainer][INFO] - Batch size = 2
317
+ [2025-03-16 14:32:16,023][transformers][INFO] - {'accuracy': 0.5, 'RMSE': 33.574882386580704, 'QWK': 0.16648560564910375, 'HDIV': 0.007575757575757569, 'Macro F1': 0.183216929010737}
318
+ [2025-03-16 14:32:16,023][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
319
+ [2025-03-16 14:32:16,025][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-160
320
+ [2025-03-16 14:32:16,566][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
321
+ [2025-03-16 14:32:16,568][transformers.configuration_utils][INFO] - Model config LlamaConfig {
322
+ "architectures": [
323
+ "LlamaForCausalLM"
324
+ ],
325
+ "attention_bias": false,
326
+ "attention_dropout": 0.0,
327
+ "bos_token_id": 128000,
328
+ "eos_token_id": 128001,
329
+ "head_dim": 128,
330
+ "hidden_act": "silu",
331
+ "hidden_size": 4096,
332
+ "initializer_range": 0.02,
333
+ "intermediate_size": 14336,
334
+ "max_position_embeddings": 131072,
335
+ "mlp_bias": false,
336
+ "model_type": "llama",
337
+ "num_attention_heads": 32,
338
+ "num_hidden_layers": 32,
339
+ "num_key_value_heads": 8,
340
+ "pretraining_tp": 1,
341
+ "rms_norm_eps": 1e-05,
342
+ "rope_scaling": {
343
+ "factor": 8.0,
344
+ "high_freq_factor": 4.0,
345
+ "low_freq_factor": 1.0,
346
+ "original_max_position_embeddings": 8192,
347
+ "rope_type": "llama3"
348
+ },
349
+ "rope_theta": 500000.0,
350
+ "tie_word_embeddings": false,
351
+ "torch_dtype": "bfloat16",
352
+ "transformers_version": "4.49.0",
353
+ "use_cache": true,
354
+ "vocab_size": 128256
355
+ }
356
+
357
+ [2025-03-16 14:32:16,736][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-128] due to args.save_total_limit
358
+ [2025-03-16 14:46:17,000][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
359
+ [2025-03-16 14:46:17,001][transformers.trainer][INFO] -
360
+ ***** Running Evaluation *****
361
+ [2025-03-16 14:46:17,001][transformers.trainer][INFO] - Num examples = 132
362
+ [2025-03-16 14:46:17,001][transformers.trainer][INFO] - Batch size = 2
363
+ [2025-03-16 14:47:20,843][transformers][INFO] - {'accuracy': 0.5984848484848485, 'RMSE': 28.91995221924885, 'QWK': 0.5161495962600935, 'HDIV': 0.015151515151515138, 'Macro F1': 0.2924253285543608}
364
+ [2025-03-16 14:47:20,843][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
365
+ [2025-03-16 14:47:20,844][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-192
366
+ [2025-03-16 14:47:21,310][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
367
+ [2025-03-16 14:47:21,313][transformers.configuration_utils][INFO] - Model config LlamaConfig {
368
+ "architectures": [
369
+ "LlamaForCausalLM"
370
+ ],
371
+ "attention_bias": false,
372
+ "attention_dropout": 0.0,
373
+ "bos_token_id": 128000,
374
+ "eos_token_id": 128001,
375
+ "head_dim": 128,
376
+ "hidden_act": "silu",
377
+ "hidden_size": 4096,
378
+ "initializer_range": 0.02,
379
+ "intermediate_size": 14336,
380
+ "max_position_embeddings": 131072,
381
+ "mlp_bias": false,
382
+ "model_type": "llama",
383
+ "num_attention_heads": 32,
384
+ "num_hidden_layers": 32,
385
+ "num_key_value_heads": 8,
386
+ "pretraining_tp": 1,
387
+ "rms_norm_eps": 1e-05,
388
+ "rope_scaling": {
389
+ "factor": 8.0,
390
+ "high_freq_factor": 4.0,
391
+ "low_freq_factor": 1.0,
392
+ "original_max_position_embeddings": 8192,
393
+ "rope_type": "llama3"
394
+ },
395
+ "rope_theta": 500000.0,
396
+ "tie_word_embeddings": false,
397
+ "torch_dtype": "bfloat16",
398
+ "transformers_version": "4.49.0",
399
+ "use_cache": true,
400
+ "vocab_size": 128256
401
+ }
402
+
403
+ [2025-03-16 14:47:21,478][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-96] due to args.save_total_limit
404
+ [2025-03-16 14:47:21,490][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-160] due to args.save_total_limit
405
+ [2025-03-16 15:01:21,920][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
406
+ [2025-03-16 15:01:21,921][transformers.trainer][INFO] -
407
+ ***** Running Evaluation *****
408
+ [2025-03-16 15:01:21,921][transformers.trainer][INFO] - Num examples = 132
409
+ [2025-03-16 15:01:21,921][transformers.trainer][INFO] - Batch size = 2
410
+ [2025-03-16 15:02:25,838][transformers][INFO] - {'accuracy': 0.5757575757575758, 'RMSE': 32.84490643597388, 'QWK': 0.276957163958641, 'HDIV': 0.007575757575757569, 'Macro F1': 0.2738722188201674}
411
+ [2025-03-16 15:02:25,838][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
412
+ [2025-03-16 15:02:25,839][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-224
413
+ [2025-03-16 15:02:26,368][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
414
+ [2025-03-16 15:02:26,370][transformers.configuration_utils][INFO] - Model config LlamaConfig {
415
+ "architectures": [
416
+ "LlamaForCausalLM"
417
+ ],
418
+ "attention_bias": false,
419
+ "attention_dropout": 0.0,
420
+ "bos_token_id": 128000,
421
+ "eos_token_id": 128001,
422
+ "head_dim": 128,
423
+ "hidden_act": "silu",
424
+ "hidden_size": 4096,
425
+ "initializer_range": 0.02,
426
+ "intermediate_size": 14336,
427
+ "max_position_embeddings": 131072,
428
+ "mlp_bias": false,
429
+ "model_type": "llama",
430
+ "num_attention_heads": 32,
431
+ "num_hidden_layers": 32,
432
+ "num_key_value_heads": 8,
433
+ "pretraining_tp": 1,
434
+ "rms_norm_eps": 1e-05,
435
+ "rope_scaling": {
436
+ "factor": 8.0,
437
+ "high_freq_factor": 4.0,
438
+ "low_freq_factor": 1.0,
439
+ "original_max_position_embeddings": 8192,
440
+ "rope_type": "llama3"
441
+ },
442
+ "rope_theta": 500000.0,
443
+ "tie_word_embeddings": false,
444
+ "torch_dtype": "bfloat16",
445
+ "transformers_version": "4.49.0",
446
+ "use_cache": true,
447
+ "vocab_size": 128256
448
+ }
449
+
450
+ [2025-03-16 15:16:27,446][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
451
+ [2025-03-16 15:16:27,447][transformers.trainer][INFO] -
452
+ ***** Running Evaluation *****
453
+ [2025-03-16 15:16:27,447][transformers.trainer][INFO] - Num examples = 132
454
+ [2025-03-16 15:16:27,447][transformers.trainer][INFO] - Batch size = 2
455
+ [2025-03-16 15:17:31,369][transformers][INFO] - {'accuracy': 0.5909090909090909, 'RMSE': 29.336088024923512, 'QWK': 0.5172058520502782, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3985035687797945}
456
+ [2025-03-16 15:17:31,369][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
457
+ [2025-03-16 15:17:31,371][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-256
458
+ [2025-03-16 15:17:31,878][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
459
+ [2025-03-16 15:17:31,881][transformers.configuration_utils][INFO] - Model config LlamaConfig {
460
+ "architectures": [
461
+ "LlamaForCausalLM"
462
+ ],
463
+ "attention_bias": false,
464
+ "attention_dropout": 0.0,
465
+ "bos_token_id": 128000,
466
+ "eos_token_id": 128001,
467
+ "head_dim": 128,
468
+ "hidden_act": "silu",
469
+ "hidden_size": 4096,
470
+ "initializer_range": 0.02,
471
+ "intermediate_size": 14336,
472
+ "max_position_embeddings": 131072,
473
+ "mlp_bias": false,
474
+ "model_type": "llama",
475
+ "num_attention_heads": 32,
476
+ "num_hidden_layers": 32,
477
+ "num_key_value_heads": 8,
478
+ "pretraining_tp": 1,
479
+ "rms_norm_eps": 1e-05,
480
+ "rope_scaling": {
481
+ "factor": 8.0,
482
+ "high_freq_factor": 4.0,
483
+ "low_freq_factor": 1.0,
484
+ "original_max_position_embeddings": 8192,
485
+ "rope_type": "llama3"
486
+ },
487
+ "rope_theta": 500000.0,
488
+ "tie_word_embeddings": false,
489
+ "torch_dtype": "bfloat16",
490
+ "transformers_version": "4.49.0",
491
+ "use_cache": true,
492
+ "vocab_size": 128256
493
+ }
494
+
495
+ [2025-03-16 15:17:32,051][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-192] due to args.save_total_limit
496
+ [2025-03-16 15:17:32,063][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-224] due to args.save_total_limit
497
+ [2025-03-16 15:31:34,746][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
498
+ [2025-03-16 15:31:34,747][transformers.trainer][INFO] -
499
+ ***** Running Evaluation *****
500
+ [2025-03-16 15:31:34,747][transformers.trainer][INFO] - Num examples = 132
501
+ [2025-03-16 15:31:34,747][transformers.trainer][INFO] - Batch size = 2
502
+ [2025-03-16 15:32:39,461][transformers][INFO] - {'accuracy': 0.5833333333333334, 'RMSE': 28.91995221924885, 'QWK': 0.4639830508474576, 'HDIV': 0.007575757575757569, 'Macro F1': 0.28377402226355086}
503
+ [2025-03-16 15:32:39,462][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
504
+ [2025-03-16 15:32:39,463][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-288
505
+ [2025-03-16 15:32:39,927][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
506
+ [2025-03-16 15:32:39,929][transformers.configuration_utils][INFO] - Model config LlamaConfig {
507
+ "architectures": [
508
+ "LlamaForCausalLM"
509
+ ],
510
+ "attention_bias": false,
511
+ "attention_dropout": 0.0,
512
+ "bos_token_id": 128000,
513
+ "eos_token_id": 128001,
514
+ "head_dim": 128,
515
+ "hidden_act": "silu",
516
+ "hidden_size": 4096,
517
+ "initializer_range": 0.02,
518
+ "intermediate_size": 14336,
519
+ "max_position_embeddings": 131072,
520
+ "mlp_bias": false,
521
+ "model_type": "llama",
522
+ "num_attention_heads": 32,
523
+ "num_hidden_layers": 32,
524
+ "num_key_value_heads": 8,
525
+ "pretraining_tp": 1,
526
+ "rms_norm_eps": 1e-05,
527
+ "rope_scaling": {
528
+ "factor": 8.0,
529
+ "high_freq_factor": 4.0,
530
+ "low_freq_factor": 1.0,
531
+ "original_max_position_embeddings": 8192,
532
+ "rope_type": "llama3"
533
+ },
534
+ "rope_theta": 500000.0,
535
+ "tie_word_embeddings": false,
536
+ "torch_dtype": "bfloat16",
537
+ "transformers_version": "4.49.0",
538
+ "use_cache": true,
539
+ "vocab_size": 128256
540
+ }
541
+
542
+ [2025-03-16 15:46:43,172][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
543
+ [2025-03-16 15:46:43,174][transformers.trainer][INFO] -
544
+ ***** Running Evaluation *****
545
+ [2025-03-16 15:46:43,174][transformers.trainer][INFO] - Num examples = 132
546
+ [2025-03-16 15:46:43,174][transformers.trainer][INFO] - Batch size = 2
547
+ [2025-03-16 15:47:47,303][transformers][INFO] - {'accuracy': 0.5833333333333334, 'RMSE': 27.633971188310298, 'QWK': 0.510593220338983, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3399487967229903}
548
+ [2025-03-16 15:47:47,303][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
549
+ [2025-03-16 15:47:47,304][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-320
550
+ [2025-03-16 15:47:47,769][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
551
+ [2025-03-16 15:47:47,771][transformers.configuration_utils][INFO] - Model config LlamaConfig {
552
+ "architectures": [
553
+ "LlamaForCausalLM"
554
+ ],
555
+ "attention_bias": false,
556
+ "attention_dropout": 0.0,
557
+ "bos_token_id": 128000,
558
+ "eos_token_id": 128001,
559
+ "head_dim": 128,
560
+ "hidden_act": "silu",
561
+ "hidden_size": 4096,
562
+ "initializer_range": 0.02,
563
+ "intermediate_size": 14336,
564
+ "max_position_embeddings": 131072,
565
+ "mlp_bias": false,
566
+ "model_type": "llama",
567
+ "num_attention_heads": 32,
568
+ "num_hidden_layers": 32,
569
+ "num_key_value_heads": 8,
570
+ "pretraining_tp": 1,
571
+ "rms_norm_eps": 1e-05,
572
+ "rope_scaling": {
573
+ "factor": 8.0,
574
+ "high_freq_factor": 4.0,
575
+ "low_freq_factor": 1.0,
576
+ "original_max_position_embeddings": 8192,
577
+ "rope_type": "llama3"
578
+ },
579
+ "rope_theta": 500000.0,
580
+ "tie_word_embeddings": false,
581
+ "torch_dtype": "bfloat16",
582
+ "transformers_version": "4.49.0",
583
+ "use_cache": true,
584
+ "vocab_size": 128256
585
+ }
586
+
587
+ [2025-03-16 15:47:47,938][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-288] due to args.save_total_limit
588
+ [2025-03-16 16:01:51,129][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
589
+ [2025-03-16 16:01:51,130][transformers.trainer][INFO] -
590
+ ***** Running Evaluation *****
591
+ [2025-03-16 16:01:51,130][transformers.trainer][INFO] - Num examples = 132
592
+ [2025-03-16 16:01:51,130][transformers.trainer][INFO] - Batch size = 2
593
+ [2025-03-16 16:02:55,323][transformers][INFO] - {'accuracy': 0.5984848484848485, 'RMSE': 27.19179912021158, 'QWK': 0.5295629820051413, 'HDIV': 0.007575757575757569, 'Macro F1': 0.34896996773392897}
594
+ [2025-03-16 16:02:55,323][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
595
+ [2025-03-16 16:02:55,324][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-352
596
+ [2025-03-16 16:02:55,847][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
597
+ [2025-03-16 16:02:55,849][transformers.configuration_utils][INFO] - Model config LlamaConfig {
598
+ "architectures": [
599
+ "LlamaForCausalLM"
600
+ ],
601
+ "attention_bias": false,
602
+ "attention_dropout": 0.0,
603
+ "bos_token_id": 128000,
604
+ "eos_token_id": 128001,
605
+ "head_dim": 128,
606
+ "hidden_act": "silu",
607
+ "hidden_size": 4096,
608
+ "initializer_range": 0.02,
609
+ "intermediate_size": 14336,
610
+ "max_position_embeddings": 131072,
611
+ "mlp_bias": false,
612
+ "model_type": "llama",
613
+ "num_attention_heads": 32,
614
+ "num_hidden_layers": 32,
615
+ "num_key_value_heads": 8,
616
+ "pretraining_tp": 1,
617
+ "rms_norm_eps": 1e-05,
618
+ "rope_scaling": {
619
+ "factor": 8.0,
620
+ "high_freq_factor": 4.0,
621
+ "low_freq_factor": 1.0,
622
+ "original_max_position_embeddings": 8192,
623
+ "rope_type": "llama3"
624
+ },
625
+ "rope_theta": 500000.0,
626
+ "tie_word_embeddings": false,
627
+ "torch_dtype": "bfloat16",
628
+ "transformers_version": "4.49.0",
629
+ "use_cache": true,
630
+ "vocab_size": 128256
631
+ }
632
+
633
+ [2025-03-16 16:02:56,019][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-256] due to args.save_total_limit
634
+ [2025-03-16 16:02:56,031][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-320] due to args.save_total_limit
635
+ [2025-03-16 16:17:00,650][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
636
+ [2025-03-16 16:17:00,651][transformers.trainer][INFO] -
637
+ ***** Running Evaluation *****
638
+ [2025-03-16 16:17:00,651][transformers.trainer][INFO] - Num examples = 132
639
+ [2025-03-16 16:17:00,651][transformers.trainer][INFO] - Batch size = 2
640
+ [2025-03-16 16:18:05,272][transformers][INFO] - {'accuracy': 0.553030303030303, 'RMSE': 30.944720996896347, 'QWK': 0.4280386134269418, 'HDIV': 0.007575757575757569, 'Macro F1': 0.27696997064800183}
641
+ [2025-03-16 16:18:05,273][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
642
+ [2025-03-16 16:18:05,274][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-384
643
+ [2025-03-16 16:18:05,734][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
644
+ [2025-03-16 16:18:05,736][transformers.configuration_utils][INFO] - Model config LlamaConfig {
645
+ "architectures": [
646
+ "LlamaForCausalLM"
647
+ ],
648
+ "attention_bias": false,
649
+ "attention_dropout": 0.0,
650
+ "bos_token_id": 128000,
651
+ "eos_token_id": 128001,
652
+ "head_dim": 128,
653
+ "hidden_act": "silu",
654
+ "hidden_size": 4096,
655
+ "initializer_range": 0.02,
656
+ "intermediate_size": 14336,
657
+ "max_position_embeddings": 131072,
658
+ "mlp_bias": false,
659
+ "model_type": "llama",
660
+ "num_attention_heads": 32,
661
+ "num_hidden_layers": 32,
662
+ "num_key_value_heads": 8,
663
+ "pretraining_tp": 1,
664
+ "rms_norm_eps": 1e-05,
665
+ "rope_scaling": {
666
+ "factor": 8.0,
667
+ "high_freq_factor": 4.0,
668
+ "low_freq_factor": 1.0,
669
+ "original_max_position_embeddings": 8192,
670
+ "rope_type": "llama3"
671
+ },
672
+ "rope_theta": 500000.0,
673
+ "tie_word_embeddings": false,
674
+ "torch_dtype": "bfloat16",
675
+ "transformers_version": "4.49.0",
676
+ "use_cache": true,
677
+ "vocab_size": 128256
678
+ }
679
+
680
+ [2025-03-16 16:32:09,622][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
681
+ [2025-03-16 16:32:09,623][transformers.trainer][INFO] -
682
+ ***** Running Evaluation *****
683
+ [2025-03-16 16:32:09,623][transformers.trainer][INFO] - Num examples = 132
684
+ [2025-03-16 16:32:09,623][transformers.trainer][INFO] - Batch size = 2
685
+ [2025-03-16 16:33:13,955][transformers][INFO] - {'accuracy': 0.5833333333333334, 'RMSE': 28.91995221924885, 'QWK': 0.49003359462486007, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3452991452991453}
686
+ [2025-03-16 16:33:13,955][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
687
+ [2025-03-16 16:33:13,956][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-416
688
+ [2025-03-16 16:33:14,443][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
689
+ [2025-03-16 16:33:14,445][transformers.configuration_utils][INFO] - Model config LlamaConfig {
690
+ "architectures": [
691
+ "LlamaForCausalLM"
692
+ ],
693
+ "attention_bias": false,
694
+ "attention_dropout": 0.0,
695
+ "bos_token_id": 128000,
696
+ "eos_token_id": 128001,
697
+ "head_dim": 128,
698
+ "hidden_act": "silu",
699
+ "hidden_size": 4096,
700
+ "initializer_range": 0.02,
701
+ "intermediate_size": 14336,
702
+ "max_position_embeddings": 131072,
703
+ "mlp_bias": false,
704
+ "model_type": "llama",
705
+ "num_attention_heads": 32,
706
+ "num_hidden_layers": 32,
707
+ "num_key_value_heads": 8,
708
+ "pretraining_tp": 1,
709
+ "rms_norm_eps": 1e-05,
710
+ "rope_scaling": {
711
+ "factor": 8.0,
712
+ "high_freq_factor": 4.0,
713
+ "low_freq_factor": 1.0,
714
+ "original_max_position_embeddings": 8192,
715
+ "rope_type": "llama3"
716
+ },
717
+ "rope_theta": 500000.0,
718
+ "tie_word_embeddings": false,
719
+ "torch_dtype": "bfloat16",
720
+ "transformers_version": "4.49.0",
721
+ "use_cache": true,
722
+ "vocab_size": 128256
723
+ }
724
+
725
+ [2025-03-16 16:33:14,611][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-384] due to args.save_total_limit
726
+ [2025-03-16 16:47:19,993][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
727
+ [2025-03-16 16:47:19,994][transformers.trainer][INFO] -
728
+ ***** Running Evaluation *****
729
+ [2025-03-16 16:47:19,994][transformers.trainer][INFO] - Num examples = 132
730
+ [2025-03-16 16:47:19,994][transformers.trainer][INFO] - Batch size = 2
731
+ [2025-03-16 16:48:24,194][transformers][INFO] - {'accuracy': 0.5227272727272727, 'RMSE': 32.84490643597388, 'QWK': 0.4285992217898833, 'HDIV': 0.007575757575757569, 'Macro F1': 0.36892246283550634}
732
+ [2025-03-16 16:48:24,194][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
733
+ [2025-03-16 16:48:24,196][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-448
734
+ [2025-03-16 16:48:24,730][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
735
+ [2025-03-16 16:48:24,732][transformers.configuration_utils][INFO] - Model config LlamaConfig {
736
+ "architectures": [
737
+ "LlamaForCausalLM"
738
+ ],
739
+ "attention_bias": false,
740
+ "attention_dropout": 0.0,
741
+ "bos_token_id": 128000,
742
+ "eos_token_id": 128001,
743
+ "head_dim": 128,
744
+ "hidden_act": "silu",
745
+ "hidden_size": 4096,
746
+ "initializer_range": 0.02,
747
+ "intermediate_size": 14336,
748
+ "max_position_embeddings": 131072,
749
+ "mlp_bias": false,
750
+ "model_type": "llama",
751
+ "num_attention_heads": 32,
752
+ "num_hidden_layers": 32,
753
+ "num_key_value_heads": 8,
754
+ "pretraining_tp": 1,
755
+ "rms_norm_eps": 1e-05,
756
+ "rope_scaling": {
757
+ "factor": 8.0,
758
+ "high_freq_factor": 4.0,
759
+ "low_freq_factor": 1.0,
760
+ "original_max_position_embeddings": 8192,
761
+ "rope_type": "llama3"
762
+ },
763
+ "rope_theta": 500000.0,
764
+ "tie_word_embeddings": false,
765
+ "torch_dtype": "bfloat16",
766
+ "transformers_version": "4.49.0",
767
+ "use_cache": true,
768
+ "vocab_size": 128256
769
+ }
770
+
771
+ [2025-03-16 16:48:24,895][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-416] due to args.save_total_limit
772
+ [2025-03-16 17:02:29,963][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
773
+ [2025-03-16 17:02:29,964][transformers.trainer][INFO] -
774
+ ***** Running Evaluation *****
775
+ [2025-03-16 17:02:29,964][transformers.trainer][INFO] - Num examples = 132
776
+ [2025-03-16 17:02:29,964][transformers.trainer][INFO] - Batch size = 2
777
+ [2025-03-16 17:03:34,106][transformers][INFO] - {'accuracy': 0.6136363636363636, 'RMSE': 26.74231693686086, 'QWK': 0.5616839261593878, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3411934470758}
778
+ [2025-03-16 17:03:34,106][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
779
+ [2025-03-16 17:03:34,107][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-480
780
+ [2025-03-16 17:03:34,596][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
781
+ [2025-03-16 17:03:34,598][transformers.configuration_utils][INFO] - Model config LlamaConfig {
782
+ "architectures": [
783
+ "LlamaForCausalLM"
784
+ ],
785
+ "attention_bias": false,
786
+ "attention_dropout": 0.0,
787
+ "bos_token_id": 128000,
788
+ "eos_token_id": 128001,
789
+ "head_dim": 128,
790
+ "hidden_act": "silu",
791
+ "hidden_size": 4096,
792
+ "initializer_range": 0.02,
793
+ "intermediate_size": 14336,
794
+ "max_position_embeddings": 131072,
795
+ "mlp_bias": false,
796
+ "model_type": "llama",
797
+ "num_attention_heads": 32,
798
+ "num_hidden_layers": 32,
799
+ "num_key_value_heads": 8,
800
+ "pretraining_tp": 1,
801
+ "rms_norm_eps": 1e-05,
802
+ "rope_scaling": {
803
+ "factor": 8.0,
804
+ "high_freq_factor": 4.0,
805
+ "low_freq_factor": 1.0,
806
+ "original_max_position_embeddings": 8192,
807
+ "rope_type": "llama3"
808
+ },
809
+ "rope_theta": 500000.0,
810
+ "tie_word_embeddings": false,
811
+ "torch_dtype": "bfloat16",
812
+ "transformers_version": "4.49.0",
813
+ "use_cache": true,
814
+ "vocab_size": 128256
815
+ }
816
+
817
+ [2025-03-16 17:03:34,762][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-352] due to args.save_total_limit
818
+ [2025-03-16 17:03:34,774][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-448] due to args.save_total_limit
819
+ [2025-03-16 17:17:38,703][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
820
+ [2025-03-16 17:17:38,704][transformers.trainer][INFO] -
821
+ ***** Running Evaluation *****
822
+ [2025-03-16 17:17:38,704][transformers.trainer][INFO] - Num examples = 132
823
+ [2025-03-16 17:17:38,704][transformers.trainer][INFO] - Batch size = 2
824
+ [2025-03-16 17:18:43,041][transformers][INFO] - {'accuracy': 0.5606060606060606, 'RMSE': 30.15113445777636, 'QWK': 0.49084550504011537, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3855266713094572}
825
+ [2025-03-16 17:18:43,042][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
826
+ [2025-03-16 17:18:43,043][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-512
827
+ [2025-03-16 17:18:43,576][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
828
+ [2025-03-16 17:18:43,578][transformers.configuration_utils][INFO] - Model config LlamaConfig {
829
+ "architectures": [
830
+ "LlamaForCausalLM"
831
+ ],
832
+ "attention_bias": false,
833
+ "attention_dropout": 0.0,
834
+ "bos_token_id": 128000,
835
+ "eos_token_id": 128001,
836
+ "head_dim": 128,
837
+ "hidden_act": "silu",
838
+ "hidden_size": 4096,
839
+ "initializer_range": 0.02,
840
+ "intermediate_size": 14336,
841
+ "max_position_embeddings": 131072,
842
+ "mlp_bias": false,
843
+ "model_type": "llama",
844
+ "num_attention_heads": 32,
845
+ "num_hidden_layers": 32,
846
+ "num_key_value_heads": 8,
847
+ "pretraining_tp": 1,
848
+ "rms_norm_eps": 1e-05,
849
+ "rope_scaling": {
850
+ "factor": 8.0,
851
+ "high_freq_factor": 4.0,
852
+ "low_freq_factor": 1.0,
853
+ "original_max_position_embeddings": 8192,
854
+ "rope_type": "llama3"
855
+ },
856
+ "rope_theta": 500000.0,
857
+ "tie_word_embeddings": false,
858
+ "torch_dtype": "bfloat16",
859
+ "transformers_version": "4.49.0",
860
+ "use_cache": true,
861
+ "vocab_size": 128256
862
+ }
863
+
864
+ [2025-03-16 17:32:49,532][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
865
+ [2025-03-16 17:32:49,533][transformers.trainer][INFO] -
866
+ ***** Running Evaluation *****
867
+ [2025-03-16 17:32:49,533][transformers.trainer][INFO] - Num examples = 132
868
+ [2025-03-16 17:32:49,533][transformers.trainer][INFO] - Batch size = 2
869
+ [2025-03-16 17:33:53,775][transformers][INFO] - {'accuracy': 0.5909090909090909, 'RMSE': 29.336088024923512, 'QWK': 0.5077731092436977, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3968880268150341}
870
+ [2025-03-16 17:33:53,776][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
871
+ [2025-03-16 17:33:53,777][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-544
872
+ [2025-03-16 17:33:54,264][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
873
+ [2025-03-16 17:33:54,266][transformers.configuration_utils][INFO] - Model config LlamaConfig {
874
+ "architectures": [
875
+ "LlamaForCausalLM"
876
+ ],
877
+ "attention_bias": false,
878
+ "attention_dropout": 0.0,
879
+ "bos_token_id": 128000,
880
+ "eos_token_id": 128001,
881
+ "head_dim": 128,
882
+ "hidden_act": "silu",
883
+ "hidden_size": 4096,
884
+ "initializer_range": 0.02,
885
+ "intermediate_size": 14336,
886
+ "max_position_embeddings": 131072,
887
+ "mlp_bias": false,
888
+ "model_type": "llama",
889
+ "num_attention_heads": 32,
890
+ "num_hidden_layers": 32,
891
+ "num_key_value_heads": 8,
892
+ "pretraining_tp": 1,
893
+ "rms_norm_eps": 1e-05,
894
+ "rope_scaling": {
895
+ "factor": 8.0,
896
+ "high_freq_factor": 4.0,
897
+ "low_freq_factor": 1.0,
898
+ "original_max_position_embeddings": 8192,
899
+ "rope_type": "llama3"
900
+ },
901
+ "rope_theta": 500000.0,
902
+ "tie_word_embeddings": false,
903
+ "torch_dtype": "bfloat16",
904
+ "transformers_version": "4.49.0",
905
+ "use_cache": true,
906
+ "vocab_size": 128256
907
+ }
908
+
909
+ [2025-03-16 17:33:54,431][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-512] due to args.save_total_limit
910
+ [2025-03-16 17:47:58,410][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
911
+ [2025-03-16 17:47:58,412][transformers.trainer][INFO] -
912
+ ***** Running Evaluation *****
913
+ [2025-03-16 17:47:58,412][transformers.trainer][INFO] - Num examples = 132
914
+ [2025-03-16 17:47:58,412][transformers.trainer][INFO] - Batch size = 2
915
+ [2025-03-16 17:49:02,705][transformers][INFO] - {'accuracy': 0.6287878787878788, 'RMSE': 26.285149626910837, 'QWK': 0.579476861167002, 'HDIV': 0.007575757575757569, 'Macro F1': 0.394676583276989}
916
+ [2025-03-16 17:49:02,706][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
917
+ [2025-03-16 17:49:02,707][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-576
918
+ [2025-03-16 17:49:03,279][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
919
+ [2025-03-16 17:49:03,281][transformers.configuration_utils][INFO] - Model config LlamaConfig {
920
+ "architectures": [
921
+ "LlamaForCausalLM"
922
+ ],
923
+ "attention_bias": false,
924
+ "attention_dropout": 0.0,
925
+ "bos_token_id": 128000,
926
+ "eos_token_id": 128001,
927
+ "head_dim": 128,
928
+ "hidden_act": "silu",
929
+ "hidden_size": 4096,
930
+ "initializer_range": 0.02,
931
+ "intermediate_size": 14336,
932
+ "max_position_embeddings": 131072,
933
+ "mlp_bias": false,
934
+ "model_type": "llama",
935
+ "num_attention_heads": 32,
936
+ "num_hidden_layers": 32,
937
+ "num_key_value_heads": 8,
938
+ "pretraining_tp": 1,
939
+ "rms_norm_eps": 1e-05,
940
+ "rope_scaling": {
941
+ "factor": 8.0,
942
+ "high_freq_factor": 4.0,
943
+ "low_freq_factor": 1.0,
944
+ "original_max_position_embeddings": 8192,
945
+ "rope_type": "llama3"
946
+ },
947
+ "rope_theta": 500000.0,
948
+ "tie_word_embeddings": false,
949
+ "torch_dtype": "bfloat16",
950
+ "transformers_version": "4.49.0",
951
+ "use_cache": true,
952
+ "vocab_size": 128256
953
+ }
954
+
955
+ [2025-03-16 17:49:03,447][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-480] due to args.save_total_limit
956
+ [2025-03-16 17:49:03,459][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-544] due to args.save_total_limit
957
+ [2025-03-16 18:03:09,551][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
958
+ [2025-03-16 18:03:09,553][transformers.trainer][INFO] -
959
+ ***** Running Evaluation *****
960
+ [2025-03-16 18:03:09,553][transformers.trainer][INFO] - Num examples = 132
961
+ [2025-03-16 18:03:09,553][transformers.trainer][INFO] - Batch size = 2
962
+ [2025-03-16 18:04:13,897][transformers][INFO] - {'accuracy': 0.5909090909090909, 'RMSE': 28.06917861068948, 'QWK': 0.5302233902759528, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3776732460185698}
963
+ [2025-03-16 18:04:13,897][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
964
+ [2025-03-16 18:04:13,899][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-608
965
+ [2025-03-16 18:04:14,388][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
966
+ [2025-03-16 18:04:14,390][transformers.configuration_utils][INFO] - Model config LlamaConfig {
967
+ "architectures": [
968
+ "LlamaForCausalLM"
969
+ ],
970
+ "attention_bias": false,
971
+ "attention_dropout": 0.0,
972
+ "bos_token_id": 128000,
973
+ "eos_token_id": 128001,
974
+ "head_dim": 128,
975
+ "hidden_act": "silu",
976
+ "hidden_size": 4096,
977
+ "initializer_range": 0.02,
978
+ "intermediate_size": 14336,
979
+ "max_position_embeddings": 131072,
980
+ "mlp_bias": false,
981
+ "model_type": "llama",
982
+ "num_attention_heads": 32,
983
+ "num_hidden_layers": 32,
984
+ "num_key_value_heads": 8,
985
+ "pretraining_tp": 1,
986
+ "rms_norm_eps": 1e-05,
987
+ "rope_scaling": {
988
+ "factor": 8.0,
989
+ "high_freq_factor": 4.0,
990
+ "low_freq_factor": 1.0,
991
+ "original_max_position_embeddings": 8192,
992
+ "rope_type": "llama3"
993
+ },
994
+ "rope_theta": 500000.0,
995
+ "tie_word_embeddings": false,
996
+ "torch_dtype": "bfloat16",
997
+ "transformers_version": "4.49.0",
998
+ "use_cache": true,
999
+ "vocab_size": 128256
1000
+ }
1001
+
1002
+ [2025-03-16 18:09:39,235][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-620
1003
+ [2025-03-16 18:09:39,701][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
1004
+ [2025-03-16 18:09:39,703][transformers.configuration_utils][INFO] - Model config LlamaConfig {
1005
+ "architectures": [
1006
+ "LlamaForCausalLM"
1007
+ ],
1008
+ "attention_bias": false,
1009
+ "attention_dropout": 0.0,
1010
+ "bos_token_id": 128000,
1011
+ "eos_token_id": 128001,
1012
+ "head_dim": 128,
1013
+ "hidden_act": "silu",
1014
+ "hidden_size": 4096,
1015
+ "initializer_range": 0.02,
1016
+ "intermediate_size": 14336,
1017
+ "max_position_embeddings": 131072,
1018
+ "mlp_bias": false,
1019
+ "model_type": "llama",
1020
+ "num_attention_heads": 32,
1021
+ "num_hidden_layers": 32,
1022
+ "num_key_value_heads": 8,
1023
+ "pretraining_tp": 1,
1024
+ "rms_norm_eps": 1e-05,
1025
+ "rope_scaling": {
1026
+ "factor": 8.0,
1027
+ "high_freq_factor": 4.0,
1028
+ "low_freq_factor": 1.0,
1029
+ "original_max_position_embeddings": 8192,
1030
+ "rope_type": "llama3"
1031
+ },
1032
+ "rope_theta": 500000.0,
1033
+ "tie_word_embeddings": false,
1034
+ "torch_dtype": "bfloat16",
1035
+ "transformers_version": "4.49.0",
1036
+ "use_cache": true,
1037
+ "vocab_size": 128256
1038
+ }
1039
+
1040
+ [2025-03-16 18:09:39,859][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-608] due to args.save_total_limit
1041
+ [2025-03-16 18:09:39,871][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1042
+ [2025-03-16 18:09:39,873][transformers.trainer][INFO] -
1043
+ ***** Running Evaluation *****
1044
+ [2025-03-16 18:09:39,873][transformers.trainer][INFO] - Num examples = 132
1045
+ [2025-03-16 18:09:39,873][transformers.trainer][INFO] - Batch size = 2
1046
+ [2025-03-16 18:10:44,058][transformers][INFO] - {'accuracy': 0.5909090909090909, 'RMSE': 28.06917861068948, 'QWK': 0.5302233902759528, 'HDIV': 0.007575757575757569, 'Macro F1': 0.3776732460185698}
1047
+ [2025-03-16 18:10:44,059][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
1048
+ [2025-03-16 18:10:44,060][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-620
1049
+ [2025-03-16 18:10:44,398][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
1050
+ [2025-03-16 18:10:44,400][transformers.configuration_utils][INFO] - Model config LlamaConfig {
1051
+ "architectures": [
1052
+ "LlamaForCausalLM"
1053
+ ],
1054
+ "attention_bias": false,
1055
+ "attention_dropout": 0.0,
1056
+ "bos_token_id": 128000,
1057
+ "eos_token_id": 128001,
1058
+ "head_dim": 128,
1059
+ "hidden_act": "silu",
1060
+ "hidden_size": 4096,
1061
+ "initializer_range": 0.02,
1062
+ "intermediate_size": 14336,
1063
+ "max_position_embeddings": 131072,
1064
+ "mlp_bias": false,
1065
+ "model_type": "llama",
1066
+ "num_attention_heads": 32,
1067
+ "num_hidden_layers": 32,
1068
+ "num_key_value_heads": 8,
1069
+ "pretraining_tp": 1,
1070
+ "rms_norm_eps": 1e-05,
1071
+ "rope_scaling": {
1072
+ "factor": 8.0,
1073
+ "high_freq_factor": 4.0,
1074
+ "low_freq_factor": 1.0,
1075
+ "original_max_position_embeddings": 8192,
1076
+ "rope_type": "llama3"
1077
+ },
1078
+ "rope_theta": 500000.0,
1079
+ "tie_word_embeddings": false,
1080
+ "torch_dtype": "bfloat16",
1081
+ "transformers_version": "4.49.0",
1082
+ "use_cache": true,
1083
+ "vocab_size": 128256
1084
+ }
1085
+
1086
+ [2025-03-16 18:10:44,605][transformers.trainer][INFO] -
1087
+
1088
+ Training completed. Do not forget to share your model on huggingface.co/models =)
1089
+
1090
+
1091
+ [2025-03-16 18:10:44,605][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-576 (score: 0.579476861167002).
1092
+ [2025-03-16 18:10:44,655][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-03-16/13-15-47/results/llama31_8b-balanced/C1/checkpoint-620] due to args.save_total_limit
1093
+ [2025-03-16 18:10:44,668][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1094
+ [2025-03-16 18:10:44,669][transformers.trainer][INFO] -
1095
+ ***** Running Evaluation *****
1096
+ [2025-03-16 18:10:44,669][transformers.trainer][INFO] - Num examples = 132
1097
+ [2025-03-16 18:10:44,669][transformers.trainer][INFO] - Batch size = 2
1098
+ [2025-03-16 18:11:49,145][transformers][INFO] - {'accuracy': 0.6287878787878788, 'RMSE': 26.285149626910837, 'QWK': 0.579476861167002, 'HDIV': 0.007575757575757569, 'Macro F1': 0.394676583276989}
1099
+ [2025-03-16 18:11:49,147][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
1100
+ [2025-03-16 18:11:49,148][__main__][INFO] - Training completed successfully.
1101
+ [2025-03-16 18:11:49,148][__main__][INFO] - Running on Test
1102
+ [2025-03-16 18:11:49,148][transformers.trainer][INFO] - The following columns in the evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text. If essay_year, grades, prompt, id_prompt, reference, essay_text, id, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1103
+ [2025-03-16 18:11:49,149][transformers.trainer][INFO] -
1104
+ ***** Running Evaluation *****
1105
+ [2025-03-16 18:11:49,149][transformers.trainer][INFO] - Num examples = 138
1106
+ [2025-03-16 18:11:49,149][transformers.trainer][INFO] - Batch size = 2
1107
+ [2025-03-16 18:12:58,833][transformers][INFO] - {'accuracy': 0.6594202898550725, 'RMSE': 25.931906372573962, 'QWK': 0.6043890865954924, 'HDIV': 0.007246376811594235, 'Macro F1': 0.43574433494234377}
1108
+ [2025-03-16 18:12:58,833][tensorboardX.summary][INFO] - Summary name eval/Macro F1 is illegal; using eval/Macro_F1 instead.
1109
+ [2025-03-16 18:12:58,834][transformers.trainer][INFO] - Saving model checkpoint to ./results/llama31_8b-balanced/C1/best_model
1110
+ [2025-03-16 18:12:59,177][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Llama-3.1-8B/snapshots/d04e592bb4f6aa9cfee91e2e20afa771667e1d4b/config.json
1111
+ [2025-03-16 18:12:59,179][transformers.configuration_utils][INFO] - Model config LlamaConfig {
1112
+ "architectures": [
1113
+ "LlamaForCausalLM"
1114
+ ],
1115
+ "attention_bias": false,
1116
+ "attention_dropout": 0.0,
1117
+ "bos_token_id": 128000,
1118
+ "eos_token_id": 128001,
1119
+ "head_dim": 128,
1120
+ "hidden_act": "silu",
1121
+ "hidden_size": 4096,
1122
+ "initializer_range": 0.02,
1123
+ "intermediate_size": 14336,
1124
+ "max_position_embeddings": 131072,
1125
+ "mlp_bias": false,
1126
+ "model_type": "llama",
1127
+ "num_attention_heads": 32,
1128
+ "num_hidden_layers": 32,
1129
+ "num_key_value_heads": 8,
1130
+ "pretraining_tp": 1,
1131
+ "rms_norm_eps": 1e-05,
1132
+ "rope_scaling": {
1133
+ "factor": 8.0,
1134
+ "high_freq_factor": 4.0,
1135
+ "low_freq_factor": 1.0,
1136
+ "original_max_position_embeddings": 8192,
1137
+ "rope_type": "llama3"
1138
+ },
1139
+ "rope_theta": 500000.0,
1140
+ "tie_word_embeddings": false,
1141
+ "torch_dtype": "bfloat16",
1142
+ "transformers_version": "4.49.0",
1143
+ "use_cache": true,
1144
+ "vocab_size": 128256
1145
+ }
1146
+
1147
+ [2025-03-16 18:12:59,231][__main__][INFO] - Fine Tuning Finished.
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:09e90d4b9670a78bba6dc5a3aa13e2057dac9e19edd2862943e50e1fd309c025
3
+ size 5432