abarbosa commited on
Commit
c8b174c
·
verified ·
1 Parent(s): 235d622

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: ricardoz/BERTugues-base-portuguese-cased
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: transformers
15
+ model-index:
16
+ - name: BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1
28
+ type: f1
29
+ value: 0.3873376623376623
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.6139860139860139
33
+ - name: Weighted Macro F1
34
+ type: f1
35
+ value: 0.5620203065855239
36
+ ---
37
+ # Model ID: BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only
38
+ ## Results
39
+ | | test_data |
40
+ |:-----------------|------------:|
41
+ | eval_accuracy | 0.543478 |
42
+ | eval_RMSE | 30.4555 |
43
+ | eval_QWK | 0.613986 |
44
+ | eval_Macro_F1 | 0.387338 |
45
+ | eval_Weighted_F1 | 0.56202 |
46
+ | eval_Micro_F1 | 0.543478 |
47
+ | eval_HDIV | 0.00724638 |
48
+
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "BertForSequenceClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "classifier_dropout": null,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "id2label": {
11
+ "0": 0,
12
+ "1": 40,
13
+ "2": 80,
14
+ "3": 120,
15
+ "4": 160,
16
+ "5": 200
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "0": 0,
22
+ "40": 1,
23
+ "80": 2,
24
+ "120": 3,
25
+ "160": 4,
26
+ "200": 5
27
+ },
28
+ "layer_norm_eps": 1e-12,
29
+ "max_position_embeddings": 512,
30
+ "model_type": "bert",
31
+ "num_attention_heads": 12,
32
+ "num_hidden_layers": 12,
33
+ "pad_token_id": 0,
34
+ "position_embedding_type": "absolute",
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.53.1",
37
+ "type_vocab_size": 2,
38
+ "use_cache": true,
39
+ "vocab_size": 30522
40
+ }
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2025-07-09T17:02:55,jbcs2025,95d1c6d6-6591-4a90-a0f8-7b597d9e768e,BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only,76.78956367401406,0.0015437702652992163,2.010390724256238e-05,49.199999999999996,210.83072035434986,58.0,0.0009759866625364993,0.004228010604628629,0.0012127913891373382,0.006416788656302467,Romania,ROU,gorj county,,,Linux-5.15.0-143-generic-x86_64-with-glibc2.35,3.12.11,3.0.2,36,Intel(R) Xeon(R) Gold 6248R CPU @ 3.00GHz,1,1 x NVIDIA RTX A6000,23.2904,45.0489,393.6063117980957,machine,N,1.0
evaluation_results.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss,eval_model_preparation_time,eval_accuracy,eval_RMSE,eval_QWK,eval_HDIV,eval_Macro_F1,eval_Micro_F1,eval_Weighted_F1,eval_TP_0,eval_TN_0,eval_FP_0,eval_FN_0,eval_TP_1,eval_TN_1,eval_FP_1,eval_FN_1,eval_TP_2,eval_TN_2,eval_FP_2,eval_FN_2,eval_TP_3,eval_TN_3,eval_FP_3,eval_FN_3,eval_TP_4,eval_TN_4,eval_FP_4,eval_FN_4,eval_TP_5,eval_TN_5,eval_FP_5,eval_FN_5,eval_runtime,eval_samples_per_second,eval_steps_per_second,epoch,reference,timestamp,id
2
+ 1.7795836925506592,0.0025,0.4166666666666667,54.9379815626841,-0.17570467878094131,0.06818181818181823,0.12154696132596685,0.4166666666666667,0.2900552486187845,0,125,6,1,0,132,0,0,0,117,8,7,0,93,0,39,55,6,63,8,0,110,0,22,0.7633,172.929,11.791,-1,validation_before_training,2025-07-09 17:01:48,BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only
3
+ 1.4637502431869507,0.0025,0.44696969696969696,37.335497772755005,0.5213772228528187,0.0,0.355808371275661,0.44696969696969696,0.4545482079360584,0,131,0,1,0,132,0,0,6,111,14,1,16,83,10,23,25,50,19,38,12,80,30,10,0.4211,313.471,21.373,9.0,validation_after_training,2025-07-09 17:01:48,BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only
4
+ 1.247100830078125,0.0025,0.5434782608695652,30.45547950507524,0.6139860139860139,0.007246376811594235,0.38733766233766237,0.5434782608695652,0.5620203065855239,0,137,0,1,0,138,0,0,7,109,19,3,35,61,11,31,28,67,20,23,5,115,13,5,0.42,328.555,21.427,9.0,test_results,2025-07-09 17:01:48,BERTugues-base-portuguese-cased-encoder_classification-C1-essay_only
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64259312cfc1470926a6fc2d19809fe7f3812d38cf2fa96690278d9bb26ff5ae
3
+ size 437970952
run_experiment.log ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-07-09 17:01:35,568][__main__][INFO] - cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ bootstrap:
12
+ enabled: true
13
+ n_bootstrap: 10000
14
+ bootstrap_seed: 42
15
+ metrics:
16
+ - QWK
17
+ - Macro_F1
18
+ - Weighted_F1
19
+ post_training_results:
20
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
21
+ experiments:
22
+ model:
23
+ name: ricardoz/BERTugues-base-portuguese-cased
24
+ type: encoder_classification
25
+ num_labels: 6
26
+ output_dir: ./results/
27
+ logging_dir: ./logs/
28
+ best_model_dir: ./results/best_model
29
+ tokenizer:
30
+ name: ricardoz/BERTugues-base-portuguese-cased
31
+ dataset:
32
+ grade_index: 0
33
+ use_full_context: false
34
+ training_params:
35
+ weight_decay: 0.01
36
+ warmup_ratio: 0.1
37
+ learning_rate: 5.0e-05
38
+ train_batch_size: 16
39
+ eval_batch_size: 16
40
+ gradient_accumulation_steps: 1
41
+ gradient_checkpointing: false
42
+
43
+ [2025-07-09 17:01:39,460][__main__][INFO] - GPU 0: NVIDIA RTX A6000 | TDP ≈ 300 W
44
+ [2025-07-09 17:01:39,460][__main__][INFO] - Starting the Fine Tuning training process.
45
+ [2025-07-09 17:01:44,526][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
46
+ [2025-07-09 17:01:44,528][transformers.configuration_utils][INFO] - Model config BertConfig {
47
+ "architectures": [
48
+ "BertForPreTraining"
49
+ ],
50
+ "attention_probs_dropout_prob": 0.1,
51
+ "classifier_dropout": null,
52
+ "hidden_act": "gelu",
53
+ "hidden_dropout_prob": 0.1,
54
+ "hidden_size": 768,
55
+ "initializer_range": 0.02,
56
+ "intermediate_size": 3072,
57
+ "layer_norm_eps": 1e-12,
58
+ "max_position_embeddings": 512,
59
+ "model_type": "bert",
60
+ "num_attention_heads": 12,
61
+ "num_hidden_layers": 12,
62
+ "pad_token_id": 0,
63
+ "position_embedding_type": "absolute",
64
+ "torch_dtype": "float32",
65
+ "transformers_version": "4.53.1",
66
+ "type_vocab_size": 2,
67
+ "use_cache": true,
68
+ "vocab_size": 30522
69
+ }
70
+
71
+ [2025-07-09 17:01:44,746][transformers.models.auto.tokenization_auto][INFO] - Could not locate the tokenizer configuration file, will try to use the model config instead.
72
+ [2025-07-09 17:01:44,949][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
73
+ [2025-07-09 17:01:44,950][transformers.configuration_utils][INFO] - Model config BertConfig {
74
+ "architectures": [
75
+ "BertForPreTraining"
76
+ ],
77
+ "attention_probs_dropout_prob": 0.1,
78
+ "classifier_dropout": null,
79
+ "hidden_act": "gelu",
80
+ "hidden_dropout_prob": 0.1,
81
+ "hidden_size": 768,
82
+ "initializer_range": 0.02,
83
+ "intermediate_size": 3072,
84
+ "layer_norm_eps": 1e-12,
85
+ "max_position_embeddings": 512,
86
+ "model_type": "bert",
87
+ "num_attention_heads": 12,
88
+ "num_hidden_layers": 12,
89
+ "pad_token_id": 0,
90
+ "position_embedding_type": "absolute",
91
+ "torch_dtype": "float32",
92
+ "transformers_version": "4.53.1",
93
+ "type_vocab_size": 2,
94
+ "use_cache": true,
95
+ "vocab_size": 30522
96
+ }
97
+
98
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/vocab.txt
99
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None
100
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None
101
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None
102
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at None
103
+ [2025-07-09 17:01:45,587][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
104
+ [2025-07-09 17:01:45,588][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
105
+ [2025-07-09 17:01:45,588][transformers.configuration_utils][INFO] - Model config BertConfig {
106
+ "architectures": [
107
+ "BertForPreTraining"
108
+ ],
109
+ "attention_probs_dropout_prob": 0.1,
110
+ "classifier_dropout": null,
111
+ "hidden_act": "gelu",
112
+ "hidden_dropout_prob": 0.1,
113
+ "hidden_size": 768,
114
+ "initializer_range": 0.02,
115
+ "intermediate_size": 3072,
116
+ "layer_norm_eps": 1e-12,
117
+ "max_position_embeddings": 512,
118
+ "model_type": "bert",
119
+ "num_attention_heads": 12,
120
+ "num_hidden_layers": 12,
121
+ "pad_token_id": 0,
122
+ "position_embedding_type": "absolute",
123
+ "torch_dtype": "float32",
124
+ "transformers_version": "4.53.1",
125
+ "type_vocab_size": 2,
126
+ "use_cache": true,
127
+ "vocab_size": 30522
128
+ }
129
+
130
+ [2025-07-09 17:01:45,622][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
131
+ [2025-07-09 17:01:45,622][transformers.configuration_utils][INFO] - Model config BertConfig {
132
+ "architectures": [
133
+ "BertForPreTraining"
134
+ ],
135
+ "attention_probs_dropout_prob": 0.1,
136
+ "classifier_dropout": null,
137
+ "hidden_act": "gelu",
138
+ "hidden_dropout_prob": 0.1,
139
+ "hidden_size": 768,
140
+ "initializer_range": 0.02,
141
+ "intermediate_size": 3072,
142
+ "layer_norm_eps": 1e-12,
143
+ "max_position_embeddings": 512,
144
+ "model_type": "bert",
145
+ "num_attention_heads": 12,
146
+ "num_hidden_layers": 12,
147
+ "pad_token_id": 0,
148
+ "position_embedding_type": "absolute",
149
+ "torch_dtype": "float32",
150
+ "transformers_version": "4.53.1",
151
+ "type_vocab_size": 2,
152
+ "use_cache": true,
153
+ "vocab_size": 30522
154
+ }
155
+
156
+ [2025-07-09 17:01:45,641][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: True; Use Full Context: False
157
+ [2025-07-09 17:01:46,310][__main__][INFO] -
158
+ Token statistics for 'train' split:
159
+ [2025-07-09 17:01:46,310][__main__][INFO] - Total examples: 500
160
+ [2025-07-09 17:01:46,310][__main__][INFO] - Min tokens: 512
161
+ [2025-07-09 17:01:46,310][__main__][INFO] - Max tokens: 512
162
+ [2025-07-09 17:01:46,310][__main__][INFO] - Avg tokens: 512.00
163
+ [2025-07-09 17:01:46,310][__main__][INFO] - Std tokens: 0.00
164
+ [2025-07-09 17:01:46,406][__main__][INFO] -
165
+ Token statistics for 'validation' split:
166
+ [2025-07-09 17:01:46,407][__main__][INFO] - Total examples: 132
167
+ [2025-07-09 17:01:46,407][__main__][INFO] - Min tokens: 512
168
+ [2025-07-09 17:01:46,407][__main__][INFO] - Max tokens: 512
169
+ [2025-07-09 17:01:46,407][__main__][INFO] - Avg tokens: 512.00
170
+ [2025-07-09 17:01:46,407][__main__][INFO] - Std tokens: 0.00
171
+ [2025-07-09 17:01:46,506][__main__][INFO] -
172
+ Token statistics for 'test' split:
173
+ [2025-07-09 17:01:46,506][__main__][INFO] - Total examples: 138
174
+ [2025-07-09 17:01:46,506][__main__][INFO] - Min tokens: 512
175
+ [2025-07-09 17:01:46,506][__main__][INFO] - Max tokens: 512
176
+ [2025-07-09 17:01:46,506][__main__][INFO] - Avg tokens: 512.00
177
+ [2025-07-09 17:01:46,506][__main__][INFO] - Std tokens: 0.00
178
+ [2025-07-09 17:01:46,506][__main__][INFO] - If token statistics are the same (max, avg, min) keep in mind that this is due to batched tokenization and padding.
179
+ [2025-07-09 17:01:46,506][__main__][INFO] - Model max length: 512. If it is the same as stats, then there is a high chance that sequences are being truncated.
180
+ [2025-07-09 17:01:46,719][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/config.json
181
+ [2025-07-09 17:01:46,720][transformers.configuration_utils][INFO] - Model config BertConfig {
182
+ "architectures": [
183
+ "BertForPreTraining"
184
+ ],
185
+ "attention_probs_dropout_prob": 0.1,
186
+ "classifier_dropout": null,
187
+ "hidden_act": "gelu",
188
+ "hidden_dropout_prob": 0.1,
189
+ "hidden_size": 768,
190
+ "id2label": {
191
+ "0": 0,
192
+ "1": 40,
193
+ "2": 80,
194
+ "3": 120,
195
+ "4": 160,
196
+ "5": 200
197
+ },
198
+ "initializer_range": 0.02,
199
+ "intermediate_size": 3072,
200
+ "label2id": {
201
+ "0": 0,
202
+ "40": 1,
203
+ "80": 2,
204
+ "120": 3,
205
+ "160": 4,
206
+ "200": 5
207
+ },
208
+ "layer_norm_eps": 1e-12,
209
+ "max_position_embeddings": 512,
210
+ "model_type": "bert",
211
+ "num_attention_heads": 12,
212
+ "num_hidden_layers": 12,
213
+ "pad_token_id": 0,
214
+ "position_embedding_type": "absolute",
215
+ "torch_dtype": "float32",
216
+ "transformers_version": "4.53.1",
217
+ "type_vocab_size": 2,
218
+ "use_cache": true,
219
+ "vocab_size": 30522
220
+ }
221
+
222
+ [2025-07-09 17:01:46,911][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--ricardoz--BERTugues-base-portuguese-cased/snapshots/76022866e209716d673e144cc9186f7b20830967/model.safetensors
223
+ [2025-07-09 17:01:46,911][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object
224
+ [2025-07-09 17:01:46,911][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32.
225
+ [2025-07-09 17:01:48,083][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at ricardoz/BERTugues-base-portuguese-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.dense.weight', 'cls.seq_relationship.bias', 'cls.seq_relationship.weight']
226
+ - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
227
+ - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
228
+ [2025-07-09 17:01:48,084][transformers.modeling_utils][WARNING] - Some weights of BertForSequenceClassification were not initialized from the model checkpoint at ricardoz/BERTugues-base-portuguese-cased and are newly initialized: ['classifier.bias', 'classifier.weight']
229
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
230
+ [2025-07-09 17:01:48,090][transformers.training_args][INFO] - PyTorch: setting up devices
231
+ [2025-07-09 17:01:48,113][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
232
+ [2025-07-09 17:01:48,121][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
233
+ [2025-07-09 17:01:48,144][transformers.trainer][INFO] - Using auto half precision backend
234
+ [2025-07-09 17:01:48,147][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
235
+ [2025-07-09 17:01:48,152][transformers.trainer][INFO] -
236
+ ***** Running Evaluation *****
237
+ [2025-07-09 17:01:48,152][transformers.trainer][INFO] - Num examples = 132
238
+ [2025-07-09 17:01:48,152][transformers.trainer][INFO] - Batch size = 16
239
+ [2025-07-09 17:01:49,260][transformers.trainer][INFO] - The following columns in the Training set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
240
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - ***** Running training *****
241
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Num examples = 500
242
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Num Epochs = 20
243
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Instantaneous batch size per device = 16
244
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
245
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Gradient Accumulation steps = 1
246
+ [2025-07-09 17:01:49,270][transformers.trainer][INFO] - Total optimization steps = 640
247
+ [2025-07-09 17:01:49,271][transformers.trainer][INFO] - Number of trainable parameters = 109,486,854
248
+ [2025-07-09 17:01:54,149][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
249
+ [2025-07-09 17:01:54,152][transformers.trainer][INFO] -
250
+ ***** Running Evaluation *****
251
+ [2025-07-09 17:01:54,152][transformers.trainer][INFO] - Num examples = 132
252
+ [2025-07-09 17:01:54,152][transformers.trainer][INFO] - Batch size = 16
253
+ [2025-07-09 17:01:54,570][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-32
254
+ [2025-07-09 17:01:54,572][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-32/config.json
255
+ [2025-07-09 17:01:55,547][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-32/model.safetensors
256
+ [2025-07-09 17:02:01,078][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
257
+ [2025-07-09 17:02:01,081][transformers.trainer][INFO] -
258
+ ***** Running Evaluation *****
259
+ [2025-07-09 17:02:01,081][transformers.trainer][INFO] - Num examples = 132
260
+ [2025-07-09 17:02:01,081][transformers.trainer][INFO] - Batch size = 16
261
+ [2025-07-09 17:02:01,499][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-64
262
+ [2025-07-09 17:02:01,501][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-64/config.json
263
+ [2025-07-09 17:02:02,401][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-64/model.safetensors
264
+ [2025-07-09 17:02:03,876][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-32] due to args.save_total_limit
265
+ [2025-07-09 17:02:08,594][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
266
+ [2025-07-09 17:02:08,597][transformers.trainer][INFO] -
267
+ ***** Running Evaluation *****
268
+ [2025-07-09 17:02:08,597][transformers.trainer][INFO] - Num examples = 132
269
+ [2025-07-09 17:02:08,598][transformers.trainer][INFO] - Batch size = 16
270
+ [2025-07-09 17:02:09,021][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-96
271
+ [2025-07-09 17:02:09,022][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-96/config.json
272
+ [2025-07-09 17:02:10,284][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-96/model.safetensors
273
+ [2025-07-09 17:02:15,880][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
274
+ [2025-07-09 17:02:15,883][transformers.trainer][INFO] -
275
+ ***** Running Evaluation *****
276
+ [2025-07-09 17:02:15,883][transformers.trainer][INFO] - Num examples = 132
277
+ [2025-07-09 17:02:15,883][transformers.trainer][INFO] - Batch size = 16
278
+ [2025-07-09 17:02:16,290][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-128
279
+ [2025-07-09 17:02:16,291][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-128/config.json
280
+ [2025-07-09 17:02:17,310][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-128/model.safetensors
281
+ [2025-07-09 17:02:18,246][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-64] due to args.save_total_limit
282
+ [2025-07-09 17:02:18,313][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-96] due to args.save_total_limit
283
+ [2025-07-09 17:02:22,925][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
284
+ [2025-07-09 17:02:22,927][transformers.trainer][INFO] -
285
+ ***** Running Evaluation *****
286
+ [2025-07-09 17:02:22,927][transformers.trainer][INFO] - Num examples = 132
287
+ [2025-07-09 17:02:22,927][transformers.trainer][INFO] - Batch size = 16
288
+ [2025-07-09 17:02:23,315][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-160
289
+ [2025-07-09 17:02:23,316][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-160/config.json
290
+ [2025-07-09 17:02:24,123][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-160/model.safetensors
291
+ [2025-07-09 17:02:29,422][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
292
+ [2025-07-09 17:02:29,424][transformers.trainer][INFO] -
293
+ ***** Running Evaluation *****
294
+ [2025-07-09 17:02:29,425][transformers.trainer][INFO] - Num examples = 132
295
+ [2025-07-09 17:02:29,425][transformers.trainer][INFO] - Batch size = 16
296
+ [2025-07-09 17:02:29,814][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-192
297
+ [2025-07-09 17:02:29,815][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-192/config.json
298
+ [2025-07-09 17:02:30,704][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-192/model.safetensors
299
+ [2025-07-09 17:02:32,183][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-160] due to args.save_total_limit
300
+ [2025-07-09 17:02:36,833][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
301
+ [2025-07-09 17:02:36,836][transformers.trainer][INFO] -
302
+ ***** Running Evaluation *****
303
+ [2025-07-09 17:02:36,836][transformers.trainer][INFO] - Num examples = 132
304
+ [2025-07-09 17:02:36,836][transformers.trainer][INFO] - Batch size = 16
305
+ [2025-07-09 17:02:37,229][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-224
306
+ [2025-07-09 17:02:37,230][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-224/config.json
307
+ [2025-07-09 17:02:38,109][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-224/model.safetensors
308
+ [2025-07-09 17:02:38,912][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-192] due to args.save_total_limit
309
+ [2025-07-09 17:02:43,498][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
310
+ [2025-07-09 17:02:43,501][transformers.trainer][INFO] -
311
+ ***** Running Evaluation *****
312
+ [2025-07-09 17:02:43,501][transformers.trainer][INFO] - Num examples = 132
313
+ [2025-07-09 17:02:43,501][transformers.trainer][INFO] - Batch size = 16
314
+ [2025-07-09 17:02:43,887][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-256
315
+ [2025-07-09 17:02:43,889][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-256/config.json
316
+ [2025-07-09 17:02:44,876][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-256/model.safetensors
317
+ [2025-07-09 17:02:45,756][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-224] due to args.save_total_limit
318
+ [2025-07-09 17:02:50,418][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
319
+ [2025-07-09 17:02:50,420][transformers.trainer][INFO] -
320
+ ***** Running Evaluation *****
321
+ [2025-07-09 17:02:50,420][transformers.trainer][INFO] - Num examples = 132
322
+ [2025-07-09 17:02:50,421][transformers.trainer][INFO] - Batch size = 16
323
+ [2025-07-09 17:02:50,818][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-288
324
+ [2025-07-09 17:02:50,819][transformers.configuration_utils][INFO] - Configuration saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-288/config.json
325
+ [2025-07-09 17:02:51,840][transformers.modeling_utils][INFO] - Model weights saved in /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-288/model.safetensors
326
+ [2025-07-09 17:02:52,719][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-256] due to args.save_total_limit
327
+ [2025-07-09 17:02:52,800][transformers.trainer][INFO] -
328
+
329
+ Training completed. Do not forget to share your model on huggingface.co/models =)
330
+
331
+
332
+ [2025-07-09 17:02:52,801][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-128 (score: 0.5213772228528187).
333
+ [2025-07-09 17:02:53,045][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-09/17-01-35/results/checkpoint-288] due to args.save_total_limit
334
+ [2025-07-09 17:02:53,175][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
335
+ [2025-07-09 17:02:53,178][transformers.trainer][INFO] -
336
+ ***** Running Evaluation *****
337
+ [2025-07-09 17:02:53,178][transformers.trainer][INFO] - Num examples = 132
338
+ [2025-07-09 17:02:53,178][transformers.trainer][INFO] - Batch size = 16
339
+ [2025-07-09 17:02:53,607][__main__][INFO] - Training completed successfully.
340
+ [2025-07-09 17:02:53,607][__main__][INFO] - Running on Test
341
+ [2025-07-09 17:02:53,607][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt. If essay_text, supporting_text, grades, prompt, essay_year, reference, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message.
342
+ [2025-07-09 17:02:53,610][transformers.trainer][INFO] -
343
+ ***** Running Evaluation *****
344
+ [2025-07-09 17:02:53,610][transformers.trainer][INFO] - Num examples = 138
345
+ [2025-07-09 17:02:53,610][transformers.trainer][INFO] - Batch size = 16
346
+ [2025-07-09 17:02:54,035][__main__][INFO] - Test metrics: {'eval_loss': 1.247100830078125, 'eval_model_preparation_time': 0.0025, 'eval_accuracy': 0.5434782608695652, 'eval_RMSE': 30.45547950507524, 'eval_QWK': 0.6139860139860139, 'eval_HDIV': 0.007246376811594235, 'eval_Macro_F1': 0.38733766233766237, 'eval_Micro_F1': 0.5434782608695652, 'eval_Weighted_F1': 0.5620203065855239, 'eval_TP_0': 0, 'eval_TN_0': 137, 'eval_FP_0': 0, 'eval_FN_0': 1, 'eval_TP_1': 0, 'eval_TN_1': 138, 'eval_FP_1': 0, 'eval_FN_1': 0, 'eval_TP_2': 7, 'eval_TN_2': 109, 'eval_FP_2': 19, 'eval_FN_2': 3, 'eval_TP_3': 35, 'eval_TN_3': 61, 'eval_FP_3': 11, 'eval_FN_3': 31, 'eval_TP_4': 28, 'eval_TN_4': 67, 'eval_FP_4': 20, 'eval_FN_4': 23, 'eval_TP_5': 5, 'eval_TN_5': 115, 'eval_FP_5': 13, 'eval_FN_5': 5, 'eval_runtime': 0.42, 'eval_samples_per_second': 328.555, 'eval_steps_per_second': 21.427, 'epoch': 9.0}
347
+ [2025-07-09 17:02:54,036][transformers.trainer][INFO] - Saving model checkpoint to ./results/best_model
348
+ [2025-07-09 17:02:54,038][transformers.configuration_utils][INFO] - Configuration saved in ./results/best_model/config.json
349
+ [2025-07-09 17:02:55,219][transformers.modeling_utils][INFO] - Model weights saved in ./results/best_model/model.safetensors
350
+ [2025-07-09 17:02:55,221][transformers.tokenization_utils_base][INFO] - tokenizer config file saved in ./results/best_model/tokenizer_config.json
351
+ [2025-07-09 17:02:55,221][transformers.tokenization_utils_base][INFO] - Special tokens file saved in ./results/best_model/special_tokens_map.json
352
+ [2025-07-09 17:02:55,234][__main__][INFO] - Model and tokenizer saved to ./results/best_model
353
+ [2025-07-09 17:02:55,238][__main__][INFO] - Fine Tuning Finished.
354
+ [2025-07-09 17:02:55,748][__main__][INFO] - Total emissions: 0.0015 kg CO2eq
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "4": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": true,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": true,
48
+ "extra_special_tokens": {},
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86df43a6e3e587411c325a3b7c2d1597d795466c11ad95b9347154103e2fb21e
3
+ size 5777
vocab.txt ADDED
The diff for this file is too large to render. See raw diff