abarbosa commited on
Commit
d120ccb
·
verified ·
1 Parent(s): 56186b4

Pushing fine-tuned model to Hugging Face Hub

Browse files
README.md ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language:
4
+ - pt
5
+ - en
6
+ tags:
7
+ - aes
8
+ datasets:
9
+ - kamel-usp/aes_enem_dataset
10
+ base_model: microsoft/Phi-3.5-mini-instruct
11
+ metrics:
12
+ - accuracy
13
+ - qwk
14
+ library_name: peft
15
+ model-index:
16
+ - name: Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context-r16
17
+ results:
18
+ - task:
19
+ type: text-classification
20
+ name: Automated Essay Score
21
+ dataset:
22
+ name: Automated Essay Score ENEM Dataset
23
+ type: kamel-usp/aes_enem_dataset
24
+ config: JBCS2025
25
+ split: test
26
+ metrics:
27
+ - name: Macro F1
28
+ type: f1
29
+ value: 0.1469089842699924
30
+ - name: QWK
31
+ type: qwk
32
+ value: 0.0458698830409355
33
+ - name: Weighted Macro F1
34
+ type: f1
35
+ value: 0.2236594123209007
36
+ ---
37
+ # Model ID: Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context-r16
38
+ ## Results
39
+ | | test_data |
40
+ |:-----------------|------------:|
41
+ | eval_accuracy | 0.224638 |
42
+ | eval_RMSE | 72.5518 |
43
+ | eval_QWK | 0.0458699 |
44
+ | eval_Macro_F1 | 0.146909 |
45
+ | eval_Weighted_F1 | 0.223659 |
46
+ | eval_Micro_F1 | 0.224638 |
47
+ | eval_HDIV | 0.15942 |
48
+
adapter_config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-3.5-mini-instruct",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.1,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": [
22
+ "classifier",
23
+ "score"
24
+ ],
25
+ "peft_type": "LORA",
26
+ "qalora_group_size": 16,
27
+ "r": 16,
28
+ "rank_pattern": {},
29
+ "revision": null,
30
+ "target_modules": [
31
+ "qkv_proj",
32
+ "o_proj",
33
+ "down_proj",
34
+ "gate_up_proj"
35
+ ],
36
+ "task_type": "SEQ_CLS",
37
+ "trainable_token_indices": null,
38
+ "use_dora": false,
39
+ "use_qalora": false,
40
+ "use_rslora": false
41
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ca631859ac16d196bd764ae47d305e9c2abf1744ac4c619e8b1b727cc57edc4
3
+ size 100734696
chat_template.jinja ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {% for message in messages %}{% if message['role'] == 'system' and message['content'] %}{{'<|system|>
2
+ ' + message['content'] + '<|end|>
3
+ '}}{% elif message['role'] == 'user' %}{{'<|user|>
4
+ ' + message['content'] + '<|end|>
5
+ '}}{% elif message['role'] == 'assistant' %}{{'<|assistant|>
6
+ ' + message['content'] + '<|end|>
7
+ '}}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>
8
+ ' }}{% else %}{{ eos_token }}{% endif %}
emissions.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ timestamp,project_name,run_id,experiment_id,duration,emissions,emissions_rate,cpu_power,gpu_power,ram_power,cpu_energy,gpu_energy,ram_energy,energy_consumed,country_name,country_iso_code,region,cloud_provider,cloud_region,os,python_version,codecarbon_version,cpu_count,cpu_model,gpu_count,gpu_model,longitude,latitude,ram_total_size,tracking_mode,on_cloud,pue
2
+ 2025-07-05T09:26:49,jbcs2025,c962c041-d39b-446a-9d05-d084325770f7,Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context,833.4110501129762,0.010344961509264488,1.2412796192061693e-05,70.0,649.5003189534933,70.0,0.015956810952765188,0.15272139495479564,0.015924686886109384,0.18460289279367023,France,FRA,île-de-france,,,Linux-5.15.0-136-generic-x86_64-with-glibc2.35,3.12.11,3.0.2,192,INTEL(R) XEON(R) PLATINUM 8568Y+,1,1 x NVIDIA H200,2.3494,48.8558,2015.363483428955,machine,N,1.0
evaluation_results.csv ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ eval_loss,eval_model_preparation_time,eval_accuracy,eval_RMSE,eval_QWK,eval_HDIV,eval_Macro_F1,eval_Micro_F1,eval_Weighted_F1,eval_TP_0,eval_TN_0,eval_FP_0,eval_FN_0,eval_TP_1,eval_TN_1,eval_FP_1,eval_FN_1,eval_TP_2,eval_TN_2,eval_FP_2,eval_FN_2,eval_TP_3,eval_TN_3,eval_FP_3,eval_FN_3,eval_TP_4,eval_TN_4,eval_FP_4,eval_FN_4,eval_TP_5,eval_TN_5,eval_FP_5,eval_FN_5,eval_runtime,eval_samples_per_second,eval_steps_per_second,epoch,reference,timestamp,id
2
+ 2.103470802307129,0.0071,0.26515151515151514,60.0,0.04146699266503673,0.1742424242424242,0.10690758516845474,0.26515151515151514,0.15552190453376225,0,131,0,1,0,107,0,25,0,132,0,0,5,68,3,56,30,8,94,0,0,117,0,15,21.7266,6.076,1.519,-1,validation_before_training,2025-07-05 09:13:09,Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context
3
+ 1.5709376335144043,0.0071,0.4015151515151515,57.94459213400123,0.32508860011813345,0.06060606060606055,0.2545544081224744,0.4015151515151515,0.42667986002574587,0,131,0,1,9,102,5,16,0,128,4,0,28,47,24,33,12,84,18,18,4,89,28,11,21.5527,6.125,1.531,7.0,validation_after_training,2025-07-05 09:13:09,Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context
4
+ 1.7205733060836792,0.0071,0.2246376811594203,72.55183054852598,0.04586988304093553,0.1594202898550725,0.1469089842699924,0.2246376811594203,0.22365941232090072,0,137,0,1,8,75,28,27,0,125,8,5,13,56,31,38,8,76,36,18,2,114,4,18,22.675,6.086,1.544,7.0,test_results,2025-07-05 09:13:09,Phi-3.5-mini-instruct-phi35_classification_lora-C2-full_context
run_experiment.log ADDED
@@ -0,0 +1,1436 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2025-07-05 09:12:52,585][__main__][INFO] - cache_dir: /tmp/
2
+ dataset:
3
+ name: kamel-usp/aes_enem_dataset
4
+ split: JBCS2025
5
+ training_params:
6
+ seed: 42
7
+ num_train_epochs: 20
8
+ logging_steps: 100
9
+ metric_for_best_model: QWK
10
+ bf16: true
11
+ bootstrap:
12
+ enabled: true
13
+ n_bootstrap: 10000
14
+ bootstrap_seed: 42
15
+ metrics:
16
+ - QWK
17
+ - Macro_F1
18
+ - Weighted_F1
19
+ post_training_results:
20
+ model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59
21
+ experiments:
22
+ model:
23
+ name: microsoft/Phi-3.5-mini-instruct
24
+ type: phi35_classification_lora
25
+ num_labels: 6
26
+ output_dir: ./results/phi4-balanced/C2
27
+ logging_dir: ./logs/phi4-balanced/C2
28
+ best_model_dir: ./results/phi4-balanced/C2/best_model
29
+ lora_r: 16
30
+ lora_dropout: 0.1
31
+ lora_alpha: 32
32
+ lora_target_modules: all-linear
33
+ tokenizer:
34
+ name: microsoft/Phi-3.5-mini-instruct
35
+ dataset:
36
+ grade_index: 1
37
+ use_full_context: true
38
+ training_params:
39
+ weight_decay: 0.01
40
+ warmup_ratio: 0.1
41
+ learning_rate: 5.0e-05
42
+ train_batch_size: 8
43
+ eval_batch_size: 4
44
+ gradient_accumulation_steps: 2
45
+ gradient_checkpointing: true
46
+
47
+ [2025-07-05 09:12:56,492][__main__][INFO] - GPU 0: NVIDIA H200 | TDP ≈ 700 W
48
+ [2025-07-05 09:12:56,492][__main__][INFO] - Starting the Fine Tuning training process.
49
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file tokenizer.model from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/tokenizer.model
50
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/tokenizer.json
51
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/added_tokens.json
52
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/special_tokens_map.json
53
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/tokenizer_config.json
54
+ [2025-07-05 09:13:00,617][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None
55
+ [2025-07-05 09:13:00,671][transformers.tokenization_utils_base][INFO] - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
56
+ [2025-07-05 09:13:00,676][__main__][INFO] - Tokenizer function parameters- Padding:longest; Truncation: False; Use Full Context: True
57
+ [2025-07-05 09:13:01,274][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
58
+ [2025-07-05 09:13:01,274][transformers.configuration_utils][INFO] - Model config Phi3Config {
59
+ "architectures": [
60
+ "Phi3ForCausalLM"
61
+ ],
62
+ "attention_bias": false,
63
+ "attention_dropout": 0.0,
64
+ "auto_map": {
65
+ "AutoConfig": "configuration_phi3.Phi3Config",
66
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
67
+ },
68
+ "bos_token_id": 1,
69
+ "embd_pdrop": 0.0,
70
+ "eos_token_id": 32000,
71
+ "hidden_act": "silu",
72
+ "hidden_size": 3072,
73
+ "id2label": {
74
+ "0": "LABEL_0",
75
+ "1": "LABEL_1",
76
+ "2": "LABEL_2",
77
+ "3": "LABEL_3",
78
+ "4": "LABEL_4",
79
+ "5": "LABEL_5"
80
+ },
81
+ "initializer_range": 0.02,
82
+ "intermediate_size": 8192,
83
+ "label2id": {
84
+ "LABEL_0": 0,
85
+ "LABEL_1": 1,
86
+ "LABEL_2": 2,
87
+ "LABEL_3": 3,
88
+ "LABEL_4": 4,
89
+ "LABEL_5": 5
90
+ },
91
+ "max_position_embeddings": 131072,
92
+ "model_type": "phi3",
93
+ "num_attention_heads": 32,
94
+ "num_hidden_layers": 32,
95
+ "num_key_value_heads": 32,
96
+ "original_max_position_embeddings": 4096,
97
+ "pad_token_id": 32000,
98
+ "partial_rotary_factor": 1.0,
99
+ "resid_pdrop": 0.0,
100
+ "rms_norm_eps": 1e-05,
101
+ "rope_scaling": {
102
+ "long_factor": [
103
+ 1.0800000429153442,
104
+ 1.1100000143051147,
105
+ 1.1399999856948853,
106
+ 1.340000033378601,
107
+ 1.5899999141693115,
108
+ 1.600000023841858,
109
+ 1.6200000047683716,
110
+ 2.620000123977661,
111
+ 3.2300000190734863,
112
+ 3.2300000190734863,
113
+ 4.789999961853027,
114
+ 7.400000095367432,
115
+ 7.700000286102295,
116
+ 9.09000015258789,
117
+ 12.199999809265137,
118
+ 17.670000076293945,
119
+ 24.46000099182129,
120
+ 28.57000160217285,
121
+ 30.420001983642578,
122
+ 30.840002059936523,
123
+ 32.590003967285156,
124
+ 32.93000411987305,
125
+ 42.320003509521484,
126
+ 44.96000289916992,
127
+ 50.340003967285156,
128
+ 50.45000457763672,
129
+ 57.55000305175781,
130
+ 57.93000411987305,
131
+ 58.21000289916992,
132
+ 60.1400032043457,
133
+ 62.61000442504883,
134
+ 62.62000274658203,
135
+ 62.71000289916992,
136
+ 63.1400032043457,
137
+ 63.1400032043457,
138
+ 63.77000427246094,
139
+ 63.93000411987305,
140
+ 63.96000289916992,
141
+ 63.970001220703125,
142
+ 64.02999877929688,
143
+ 64.06999969482422,
144
+ 64.08000183105469,
145
+ 64.12000274658203,
146
+ 64.41000366210938,
147
+ 64.4800033569336,
148
+ 64.51000213623047,
149
+ 64.52999877929688,
150
+ 64.83999633789062
151
+ ],
152
+ "short_factor": [
153
+ 1.0,
154
+ 1.0199999809265137,
155
+ 1.0299999713897705,
156
+ 1.0299999713897705,
157
+ 1.0499999523162842,
158
+ 1.0499999523162842,
159
+ 1.0499999523162842,
160
+ 1.0499999523162842,
161
+ 1.0499999523162842,
162
+ 1.0699999332427979,
163
+ 1.0999999046325684,
164
+ 1.1099998950958252,
165
+ 1.1599998474121094,
166
+ 1.1599998474121094,
167
+ 1.1699998378753662,
168
+ 1.2899998426437378,
169
+ 1.339999794960022,
170
+ 1.679999828338623,
171
+ 1.7899998426437378,
172
+ 1.8199998140335083,
173
+ 1.8499997854232788,
174
+ 1.8799997568130493,
175
+ 1.9099997282028198,
176
+ 1.9399996995925903,
177
+ 1.9899996519088745,
178
+ 2.0199997425079346,
179
+ 2.0199997425079346,
180
+ 2.0199997425079346,
181
+ 2.0199997425079346,
182
+ 2.0199997425079346,
183
+ 2.0199997425079346,
184
+ 2.0299997329711914,
185
+ 2.0299997329711914,
186
+ 2.0299997329711914,
187
+ 2.0299997329711914,
188
+ 2.0299997329711914,
189
+ 2.0299997329711914,
190
+ 2.0299997329711914,
191
+ 2.0299997329711914,
192
+ 2.0299997329711914,
193
+ 2.0799996852874756,
194
+ 2.0899996757507324,
195
+ 2.189999580383301,
196
+ 2.2199995517730713,
197
+ 2.5899994373321533,
198
+ 2.729999542236328,
199
+ 2.749999523162842,
200
+ 2.8399994373321533
201
+ ],
202
+ "type": "longrope"
203
+ },
204
+ "rope_theta": 10000.0,
205
+ "sliding_window": 262144,
206
+ "tie_word_embeddings": false,
207
+ "torch_dtype": "bfloat16",
208
+ "transformers_version": "4.53.0",
209
+ "use_cache": true,
210
+ "vocab_size": 32064
211
+ }
212
+
213
+ [2025-07-05 09:13:01,274][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/model.safetensors.index.json
214
+ [2025-07-05 09:13:01,275][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.bfloat16 as defined in model's config object
215
+ [2025-07-05 09:13:01,275][transformers.modeling_utils][INFO] - Instantiating Phi3ForSequenceClassification model under default dtype torch.bfloat16.
216
+ [2025-07-05 09:13:08,388][transformers.modeling_utils][INFO] - Some weights of the model checkpoint at microsoft/Phi-3.5-mini-instruct were not used when initializing Phi3ForSequenceClassification: ['lm_head.weight']
217
+ - This IS expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
218
+ - This IS NOT expected if you are initializing Phi3ForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
219
+ [2025-07-05 09:13:08,388][transformers.modeling_utils][WARNING] - Some weights of Phi3ForSequenceClassification were not initialized from the model checkpoint at microsoft/Phi-3.5-mini-instruct and are newly initialized: ['score.weight']
220
+ You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
221
+ [2025-07-05 09:13:09,355][__main__][INFO] - Initialized new PEFT model for ce loss
222
+ [2025-07-05 09:13:09,357][__main__][INFO] - None
223
+ [2025-07-05 09:13:09,358][transformers.training_args][INFO] - PyTorch: setting up devices
224
+ [2025-07-05 09:13:09,395][__main__][INFO] - Total steps: 620. Number of warmup steps: 62
225
+ [2025-07-05 09:13:09,400][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching.
226
+ [2025-07-05 09:13:09,422][transformers.trainer][INFO] - Using auto half precision backend
227
+ [2025-07-05 09:13:09,423][transformers.trainer][WARNING] - No label_names provided for model class `PeftModelForSequenceClassification`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.
228
+ [2025-07-05 09:13:09,424][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
229
+ [2025-07-05 09:13:09,434][transformers.trainer][INFO] -
230
+ ***** Running Evaluation *****
231
+ [2025-07-05 09:13:09,434][transformers.trainer][INFO] - Num examples = 132
232
+ [2025-07-05 09:13:09,434][transformers.trainer][INFO] - Batch size = 4
233
+ [2025-07-05 09:13:31,324][transformers.trainer][INFO] - The following columns in the Training set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
234
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - ***** Running training *****
235
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Num examples = 500
236
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Num Epochs = 20
237
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Instantaneous batch size per device = 8
238
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Total train batch size (w. parallel, distributed & accumulation) = 16
239
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Gradient Accumulation steps = 2
240
+ [2025-07-05 09:13:31,356][transformers.trainer][INFO] - Total optimization steps = 640
241
+ [2025-07-05 09:13:31,358][transformers.trainer][INFO] - Number of trainable parameters = 25,184,256
242
+ [2025-07-05 09:13:31,470][transformers.models.phi3.modeling_phi3][WARNING] - `use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`.
243
+ [2025-07-05 09:14:56,611][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
244
+ [2025-07-05 09:14:56,615][transformers.trainer][INFO] -
245
+ ***** Running Evaluation *****
246
+ [2025-07-05 09:14:56,615][transformers.trainer][INFO] - Num examples = 132
247
+ [2025-07-05 09:14:56,615][transformers.trainer][INFO] - Batch size = 4
248
+ [2025-07-05 09:15:18,206][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-32
249
+ [2025-07-05 09:15:18,512][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
250
+ [2025-07-05 09:15:18,512][transformers.configuration_utils][INFO] - Model config Phi3Config {
251
+ "architectures": [
252
+ "Phi3ForCausalLM"
253
+ ],
254
+ "attention_bias": false,
255
+ "attention_dropout": 0.0,
256
+ "auto_map": {
257
+ "AutoConfig": "configuration_phi3.Phi3Config",
258
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
259
+ },
260
+ "bos_token_id": 1,
261
+ "embd_pdrop": 0.0,
262
+ "eos_token_id": 32000,
263
+ "hidden_act": "silu",
264
+ "hidden_size": 3072,
265
+ "initializer_range": 0.02,
266
+ "intermediate_size": 8192,
267
+ "max_position_embeddings": 131072,
268
+ "model_type": "phi3",
269
+ "num_attention_heads": 32,
270
+ "num_hidden_layers": 32,
271
+ "num_key_value_heads": 32,
272
+ "original_max_position_embeddings": 4096,
273
+ "pad_token_id": 32000,
274
+ "partial_rotary_factor": 1.0,
275
+ "resid_pdrop": 0.0,
276
+ "rms_norm_eps": 1e-05,
277
+ "rope_scaling": {
278
+ "long_factor": [
279
+ 1.0800000429153442,
280
+ 1.1100000143051147,
281
+ 1.1399999856948853,
282
+ 1.340000033378601,
283
+ 1.5899999141693115,
284
+ 1.600000023841858,
285
+ 1.6200000047683716,
286
+ 2.620000123977661,
287
+ 3.2300000190734863,
288
+ 3.2300000190734863,
289
+ 4.789999961853027,
290
+ 7.400000095367432,
291
+ 7.700000286102295,
292
+ 9.09000015258789,
293
+ 12.199999809265137,
294
+ 17.670000076293945,
295
+ 24.46000099182129,
296
+ 28.57000160217285,
297
+ 30.420001983642578,
298
+ 30.840002059936523,
299
+ 32.590003967285156,
300
+ 32.93000411987305,
301
+ 42.320003509521484,
302
+ 44.96000289916992,
303
+ 50.340003967285156,
304
+ 50.45000457763672,
305
+ 57.55000305175781,
306
+ 57.93000411987305,
307
+ 58.21000289916992,
308
+ 60.1400032043457,
309
+ 62.61000442504883,
310
+ 62.62000274658203,
311
+ 62.71000289916992,
312
+ 63.1400032043457,
313
+ 63.1400032043457,
314
+ 63.77000427246094,
315
+ 63.93000411987305,
316
+ 63.96000289916992,
317
+ 63.970001220703125,
318
+ 64.02999877929688,
319
+ 64.06999969482422,
320
+ 64.08000183105469,
321
+ 64.12000274658203,
322
+ 64.41000366210938,
323
+ 64.4800033569336,
324
+ 64.51000213623047,
325
+ 64.52999877929688,
326
+ 64.83999633789062
327
+ ],
328
+ "short_factor": [
329
+ 1.0,
330
+ 1.0199999809265137,
331
+ 1.0299999713897705,
332
+ 1.0299999713897705,
333
+ 1.0499999523162842,
334
+ 1.0499999523162842,
335
+ 1.0499999523162842,
336
+ 1.0499999523162842,
337
+ 1.0499999523162842,
338
+ 1.0699999332427979,
339
+ 1.0999999046325684,
340
+ 1.1099998950958252,
341
+ 1.1599998474121094,
342
+ 1.1599998474121094,
343
+ 1.1699998378753662,
344
+ 1.2899998426437378,
345
+ 1.339999794960022,
346
+ 1.679999828338623,
347
+ 1.7899998426437378,
348
+ 1.8199998140335083,
349
+ 1.8499997854232788,
350
+ 1.8799997568130493,
351
+ 1.9099997282028198,
352
+ 1.9399996995925903,
353
+ 1.9899996519088745,
354
+ 2.0199997425079346,
355
+ 2.0199997425079346,
356
+ 2.0199997425079346,
357
+ 2.0199997425079346,
358
+ 2.0199997425079346,
359
+ 2.0199997425079346,
360
+ 2.0299997329711914,
361
+ 2.0299997329711914,
362
+ 2.0299997329711914,
363
+ 2.0299997329711914,
364
+ 2.0299997329711914,
365
+ 2.0299997329711914,
366
+ 2.0299997329711914,
367
+ 2.0299997329711914,
368
+ 2.0299997329711914,
369
+ 2.0799996852874756,
370
+ 2.0899996757507324,
371
+ 2.189999580383301,
372
+ 2.2199995517730713,
373
+ 2.5899994373321533,
374
+ 2.729999542236328,
375
+ 2.749999523162842,
376
+ 2.8399994373321533
377
+ ],
378
+ "type": "longrope"
379
+ },
380
+ "rope_theta": 10000.0,
381
+ "sliding_window": 262144,
382
+ "tie_word_embeddings": false,
383
+ "torch_dtype": "bfloat16",
384
+ "transformers_version": "4.53.0",
385
+ "use_cache": true,
386
+ "vocab_size": 32064
387
+ }
388
+
389
+ [2025-07-05 09:16:44,166][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
390
+ [2025-07-05 09:16:44,170][transformers.trainer][INFO] -
391
+ ***** Running Evaluation *****
392
+ [2025-07-05 09:16:44,170][transformers.trainer][INFO] - Num examples = 132
393
+ [2025-07-05 09:16:44,170][transformers.trainer][INFO] - Batch size = 4
394
+ [2025-07-05 09:17:05,745][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-64
395
+ [2025-07-05 09:17:06,055][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
396
+ [2025-07-05 09:17:06,056][transformers.configuration_utils][INFO] - Model config Phi3Config {
397
+ "architectures": [
398
+ "Phi3ForCausalLM"
399
+ ],
400
+ "attention_bias": false,
401
+ "attention_dropout": 0.0,
402
+ "auto_map": {
403
+ "AutoConfig": "configuration_phi3.Phi3Config",
404
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
405
+ },
406
+ "bos_token_id": 1,
407
+ "embd_pdrop": 0.0,
408
+ "eos_token_id": 32000,
409
+ "hidden_act": "silu",
410
+ "hidden_size": 3072,
411
+ "initializer_range": 0.02,
412
+ "intermediate_size": 8192,
413
+ "max_position_embeddings": 131072,
414
+ "model_type": "phi3",
415
+ "num_attention_heads": 32,
416
+ "num_hidden_layers": 32,
417
+ "num_key_value_heads": 32,
418
+ "original_max_position_embeddings": 4096,
419
+ "pad_token_id": 32000,
420
+ "partial_rotary_factor": 1.0,
421
+ "resid_pdrop": 0.0,
422
+ "rms_norm_eps": 1e-05,
423
+ "rope_scaling": {
424
+ "long_factor": [
425
+ 1.0800000429153442,
426
+ 1.1100000143051147,
427
+ 1.1399999856948853,
428
+ 1.340000033378601,
429
+ 1.5899999141693115,
430
+ 1.600000023841858,
431
+ 1.6200000047683716,
432
+ 2.620000123977661,
433
+ 3.2300000190734863,
434
+ 3.2300000190734863,
435
+ 4.789999961853027,
436
+ 7.400000095367432,
437
+ 7.700000286102295,
438
+ 9.09000015258789,
439
+ 12.199999809265137,
440
+ 17.670000076293945,
441
+ 24.46000099182129,
442
+ 28.57000160217285,
443
+ 30.420001983642578,
444
+ 30.840002059936523,
445
+ 32.590003967285156,
446
+ 32.93000411987305,
447
+ 42.320003509521484,
448
+ 44.96000289916992,
449
+ 50.340003967285156,
450
+ 50.45000457763672,
451
+ 57.55000305175781,
452
+ 57.93000411987305,
453
+ 58.21000289916992,
454
+ 60.1400032043457,
455
+ 62.61000442504883,
456
+ 62.62000274658203,
457
+ 62.71000289916992,
458
+ 63.1400032043457,
459
+ 63.1400032043457,
460
+ 63.77000427246094,
461
+ 63.93000411987305,
462
+ 63.96000289916992,
463
+ 63.970001220703125,
464
+ 64.02999877929688,
465
+ 64.06999969482422,
466
+ 64.08000183105469,
467
+ 64.12000274658203,
468
+ 64.41000366210938,
469
+ 64.4800033569336,
470
+ 64.51000213623047,
471
+ 64.52999877929688,
472
+ 64.83999633789062
473
+ ],
474
+ "short_factor": [
475
+ 1.0,
476
+ 1.0199999809265137,
477
+ 1.0299999713897705,
478
+ 1.0299999713897705,
479
+ 1.0499999523162842,
480
+ 1.0499999523162842,
481
+ 1.0499999523162842,
482
+ 1.0499999523162842,
483
+ 1.0499999523162842,
484
+ 1.0699999332427979,
485
+ 1.0999999046325684,
486
+ 1.1099998950958252,
487
+ 1.1599998474121094,
488
+ 1.1599998474121094,
489
+ 1.1699998378753662,
490
+ 1.2899998426437378,
491
+ 1.339999794960022,
492
+ 1.679999828338623,
493
+ 1.7899998426437378,
494
+ 1.8199998140335083,
495
+ 1.8499997854232788,
496
+ 1.8799997568130493,
497
+ 1.9099997282028198,
498
+ 1.9399996995925903,
499
+ 1.9899996519088745,
500
+ 2.0199997425079346,
501
+ 2.0199997425079346,
502
+ 2.0199997425079346,
503
+ 2.0199997425079346,
504
+ 2.0199997425079346,
505
+ 2.0199997425079346,
506
+ 2.0299997329711914,
507
+ 2.0299997329711914,
508
+ 2.0299997329711914,
509
+ 2.0299997329711914,
510
+ 2.0299997329711914,
511
+ 2.0299997329711914,
512
+ 2.0299997329711914,
513
+ 2.0299997329711914,
514
+ 2.0299997329711914,
515
+ 2.0799996852874756,
516
+ 2.0899996757507324,
517
+ 2.189999580383301,
518
+ 2.2199995517730713,
519
+ 2.5899994373321533,
520
+ 2.729999542236328,
521
+ 2.749999523162842,
522
+ 2.8399994373321533
523
+ ],
524
+ "type": "longrope"
525
+ },
526
+ "rope_theta": 10000.0,
527
+ "sliding_window": 262144,
528
+ "tie_word_embeddings": false,
529
+ "torch_dtype": "bfloat16",
530
+ "transformers_version": "4.53.0",
531
+ "use_cache": true,
532
+ "vocab_size": 32064
533
+ }
534
+
535
+ [2025-07-05 09:17:06,349][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-32] due to args.save_total_limit
536
+ [2025-07-05 09:18:31,648][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
537
+ [2025-07-05 09:18:31,652][transformers.trainer][INFO] -
538
+ ***** Running Evaluation *****
539
+ [2025-07-05 09:18:31,652][transformers.trainer][INFO] - Num examples = 132
540
+ [2025-07-05 09:18:31,652][transformers.trainer][INFO] - Batch size = 4
541
+ [2025-07-05 09:18:53,226][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-96
542
+ [2025-07-05 09:18:53,697][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
543
+ [2025-07-05 09:18:53,698][transformers.configuration_utils][INFO] - Model config Phi3Config {
544
+ "architectures": [
545
+ "Phi3ForCausalLM"
546
+ ],
547
+ "attention_bias": false,
548
+ "attention_dropout": 0.0,
549
+ "auto_map": {
550
+ "AutoConfig": "configuration_phi3.Phi3Config",
551
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
552
+ },
553
+ "bos_token_id": 1,
554
+ "embd_pdrop": 0.0,
555
+ "eos_token_id": 32000,
556
+ "hidden_act": "silu",
557
+ "hidden_size": 3072,
558
+ "initializer_range": 0.02,
559
+ "intermediate_size": 8192,
560
+ "max_position_embeddings": 131072,
561
+ "model_type": "phi3",
562
+ "num_attention_heads": 32,
563
+ "num_hidden_layers": 32,
564
+ "num_key_value_heads": 32,
565
+ "original_max_position_embeddings": 4096,
566
+ "pad_token_id": 32000,
567
+ "partial_rotary_factor": 1.0,
568
+ "resid_pdrop": 0.0,
569
+ "rms_norm_eps": 1e-05,
570
+ "rope_scaling": {
571
+ "long_factor": [
572
+ 1.0800000429153442,
573
+ 1.1100000143051147,
574
+ 1.1399999856948853,
575
+ 1.340000033378601,
576
+ 1.5899999141693115,
577
+ 1.600000023841858,
578
+ 1.6200000047683716,
579
+ 2.620000123977661,
580
+ 3.2300000190734863,
581
+ 3.2300000190734863,
582
+ 4.789999961853027,
583
+ 7.400000095367432,
584
+ 7.700000286102295,
585
+ 9.09000015258789,
586
+ 12.199999809265137,
587
+ 17.670000076293945,
588
+ 24.46000099182129,
589
+ 28.57000160217285,
590
+ 30.420001983642578,
591
+ 30.840002059936523,
592
+ 32.590003967285156,
593
+ 32.93000411987305,
594
+ 42.320003509521484,
595
+ 44.96000289916992,
596
+ 50.340003967285156,
597
+ 50.45000457763672,
598
+ 57.55000305175781,
599
+ 57.93000411987305,
600
+ 58.21000289916992,
601
+ 60.1400032043457,
602
+ 62.61000442504883,
603
+ 62.62000274658203,
604
+ 62.71000289916992,
605
+ 63.1400032043457,
606
+ 63.1400032043457,
607
+ 63.77000427246094,
608
+ 63.93000411987305,
609
+ 63.96000289916992,
610
+ 63.970001220703125,
611
+ 64.02999877929688,
612
+ 64.06999969482422,
613
+ 64.08000183105469,
614
+ 64.12000274658203,
615
+ 64.41000366210938,
616
+ 64.4800033569336,
617
+ 64.51000213623047,
618
+ 64.52999877929688,
619
+ 64.83999633789062
620
+ ],
621
+ "short_factor": [
622
+ 1.0,
623
+ 1.0199999809265137,
624
+ 1.0299999713897705,
625
+ 1.0299999713897705,
626
+ 1.0499999523162842,
627
+ 1.0499999523162842,
628
+ 1.0499999523162842,
629
+ 1.0499999523162842,
630
+ 1.0499999523162842,
631
+ 1.0699999332427979,
632
+ 1.0999999046325684,
633
+ 1.1099998950958252,
634
+ 1.1599998474121094,
635
+ 1.1599998474121094,
636
+ 1.1699998378753662,
637
+ 1.2899998426437378,
638
+ 1.339999794960022,
639
+ 1.679999828338623,
640
+ 1.7899998426437378,
641
+ 1.8199998140335083,
642
+ 1.8499997854232788,
643
+ 1.8799997568130493,
644
+ 1.9099997282028198,
645
+ 1.9399996995925903,
646
+ 1.9899996519088745,
647
+ 2.0199997425079346,
648
+ 2.0199997425079346,
649
+ 2.0199997425079346,
650
+ 2.0199997425079346,
651
+ 2.0199997425079346,
652
+ 2.0199997425079346,
653
+ 2.0299997329711914,
654
+ 2.0299997329711914,
655
+ 2.0299997329711914,
656
+ 2.0299997329711914,
657
+ 2.0299997329711914,
658
+ 2.0299997329711914,
659
+ 2.0299997329711914,
660
+ 2.0299997329711914,
661
+ 2.0299997329711914,
662
+ 2.0799996852874756,
663
+ 2.0899996757507324,
664
+ 2.189999580383301,
665
+ 2.2199995517730713,
666
+ 2.5899994373321533,
667
+ 2.729999542236328,
668
+ 2.749999523162842,
669
+ 2.8399994373321533
670
+ ],
671
+ "type": "longrope"
672
+ },
673
+ "rope_theta": 10000.0,
674
+ "sliding_window": 262144,
675
+ "tie_word_embeddings": false,
676
+ "torch_dtype": "bfloat16",
677
+ "transformers_version": "4.53.0",
678
+ "use_cache": true,
679
+ "vocab_size": 32064
680
+ }
681
+
682
+ [2025-07-05 09:20:19,223][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
683
+ [2025-07-05 09:20:19,226][transformers.trainer][INFO] -
684
+ ***** Running Evaluation *****
685
+ [2025-07-05 09:20:19,226][transformers.trainer][INFO] - Num examples = 132
686
+ [2025-07-05 09:20:19,226][transformers.trainer][INFO] - Batch size = 4
687
+ [2025-07-05 09:20:40,807][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-128
688
+ [2025-07-05 09:20:41,285][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
689
+ [2025-07-05 09:20:41,286][transformers.configuration_utils][INFO] - Model config Phi3Config {
690
+ "architectures": [
691
+ "Phi3ForCausalLM"
692
+ ],
693
+ "attention_bias": false,
694
+ "attention_dropout": 0.0,
695
+ "auto_map": {
696
+ "AutoConfig": "configuration_phi3.Phi3Config",
697
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
698
+ },
699
+ "bos_token_id": 1,
700
+ "embd_pdrop": 0.0,
701
+ "eos_token_id": 32000,
702
+ "hidden_act": "silu",
703
+ "hidden_size": 3072,
704
+ "initializer_range": 0.02,
705
+ "intermediate_size": 8192,
706
+ "max_position_embeddings": 131072,
707
+ "model_type": "phi3",
708
+ "num_attention_heads": 32,
709
+ "num_hidden_layers": 32,
710
+ "num_key_value_heads": 32,
711
+ "original_max_position_embeddings": 4096,
712
+ "pad_token_id": 32000,
713
+ "partial_rotary_factor": 1.0,
714
+ "resid_pdrop": 0.0,
715
+ "rms_norm_eps": 1e-05,
716
+ "rope_scaling": {
717
+ "long_factor": [
718
+ 1.0800000429153442,
719
+ 1.1100000143051147,
720
+ 1.1399999856948853,
721
+ 1.340000033378601,
722
+ 1.5899999141693115,
723
+ 1.600000023841858,
724
+ 1.6200000047683716,
725
+ 2.620000123977661,
726
+ 3.2300000190734863,
727
+ 3.2300000190734863,
728
+ 4.789999961853027,
729
+ 7.400000095367432,
730
+ 7.700000286102295,
731
+ 9.09000015258789,
732
+ 12.199999809265137,
733
+ 17.670000076293945,
734
+ 24.46000099182129,
735
+ 28.57000160217285,
736
+ 30.420001983642578,
737
+ 30.840002059936523,
738
+ 32.590003967285156,
739
+ 32.93000411987305,
740
+ 42.320003509521484,
741
+ 44.96000289916992,
742
+ 50.340003967285156,
743
+ 50.45000457763672,
744
+ 57.55000305175781,
745
+ 57.93000411987305,
746
+ 58.21000289916992,
747
+ 60.1400032043457,
748
+ 62.61000442504883,
749
+ 62.62000274658203,
750
+ 62.71000289916992,
751
+ 63.1400032043457,
752
+ 63.1400032043457,
753
+ 63.77000427246094,
754
+ 63.93000411987305,
755
+ 63.96000289916992,
756
+ 63.970001220703125,
757
+ 64.02999877929688,
758
+ 64.06999969482422,
759
+ 64.08000183105469,
760
+ 64.12000274658203,
761
+ 64.41000366210938,
762
+ 64.4800033569336,
763
+ 64.51000213623047,
764
+ 64.52999877929688,
765
+ 64.83999633789062
766
+ ],
767
+ "short_factor": [
768
+ 1.0,
769
+ 1.0199999809265137,
770
+ 1.0299999713897705,
771
+ 1.0299999713897705,
772
+ 1.0499999523162842,
773
+ 1.0499999523162842,
774
+ 1.0499999523162842,
775
+ 1.0499999523162842,
776
+ 1.0499999523162842,
777
+ 1.0699999332427979,
778
+ 1.0999999046325684,
779
+ 1.1099998950958252,
780
+ 1.1599998474121094,
781
+ 1.1599998474121094,
782
+ 1.1699998378753662,
783
+ 1.2899998426437378,
784
+ 1.339999794960022,
785
+ 1.679999828338623,
786
+ 1.7899998426437378,
787
+ 1.8199998140335083,
788
+ 1.8499997854232788,
789
+ 1.8799997568130493,
790
+ 1.9099997282028198,
791
+ 1.9399996995925903,
792
+ 1.9899996519088745,
793
+ 2.0199997425079346,
794
+ 2.0199997425079346,
795
+ 2.0199997425079346,
796
+ 2.0199997425079346,
797
+ 2.0199997425079346,
798
+ 2.0199997425079346,
799
+ 2.0299997329711914,
800
+ 2.0299997329711914,
801
+ 2.0299997329711914,
802
+ 2.0299997329711914,
803
+ 2.0299997329711914,
804
+ 2.0299997329711914,
805
+ 2.0299997329711914,
806
+ 2.0299997329711914,
807
+ 2.0299997329711914,
808
+ 2.0799996852874756,
809
+ 2.0899996757507324,
810
+ 2.189999580383301,
811
+ 2.2199995517730713,
812
+ 2.5899994373321533,
813
+ 2.729999542236328,
814
+ 2.749999523162842,
815
+ 2.8399994373321533
816
+ ],
817
+ "type": "longrope"
818
+ },
819
+ "rope_theta": 10000.0,
820
+ "sliding_window": 262144,
821
+ "tie_word_embeddings": false,
822
+ "torch_dtype": "bfloat16",
823
+ "transformers_version": "4.53.0",
824
+ "use_cache": true,
825
+ "vocab_size": 32064
826
+ }
827
+
828
+ [2025-07-05 09:20:41,479][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-96] due to args.save_total_limit
829
+ [2025-07-05 09:22:06,786][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
830
+ [2025-07-05 09:22:06,789][transformers.trainer][INFO] -
831
+ ***** Running Evaluation *****
832
+ [2025-07-05 09:22:06,789][transformers.trainer][INFO] - Num examples = 132
833
+ [2025-07-05 09:22:06,789][transformers.trainer][INFO] - Batch size = 4
834
+ [2025-07-05 09:22:28,369][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-160
835
+ [2025-07-05 09:22:28,661][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
836
+ [2025-07-05 09:22:28,662][transformers.configuration_utils][INFO] - Model config Phi3Config {
837
+ "architectures": [
838
+ "Phi3ForCausalLM"
839
+ ],
840
+ "attention_bias": false,
841
+ "attention_dropout": 0.0,
842
+ "auto_map": {
843
+ "AutoConfig": "configuration_phi3.Phi3Config",
844
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
845
+ },
846
+ "bos_token_id": 1,
847
+ "embd_pdrop": 0.0,
848
+ "eos_token_id": 32000,
849
+ "hidden_act": "silu",
850
+ "hidden_size": 3072,
851
+ "initializer_range": 0.02,
852
+ "intermediate_size": 8192,
853
+ "max_position_embeddings": 131072,
854
+ "model_type": "phi3",
855
+ "num_attention_heads": 32,
856
+ "num_hidden_layers": 32,
857
+ "num_key_value_heads": 32,
858
+ "original_max_position_embeddings": 4096,
859
+ "pad_token_id": 32000,
860
+ "partial_rotary_factor": 1.0,
861
+ "resid_pdrop": 0.0,
862
+ "rms_norm_eps": 1e-05,
863
+ "rope_scaling": {
864
+ "long_factor": [
865
+ 1.0800000429153442,
866
+ 1.1100000143051147,
867
+ 1.1399999856948853,
868
+ 1.340000033378601,
869
+ 1.5899999141693115,
870
+ 1.600000023841858,
871
+ 1.6200000047683716,
872
+ 2.620000123977661,
873
+ 3.2300000190734863,
874
+ 3.2300000190734863,
875
+ 4.789999961853027,
876
+ 7.400000095367432,
877
+ 7.700000286102295,
878
+ 9.09000015258789,
879
+ 12.199999809265137,
880
+ 17.670000076293945,
881
+ 24.46000099182129,
882
+ 28.57000160217285,
883
+ 30.420001983642578,
884
+ 30.840002059936523,
885
+ 32.590003967285156,
886
+ 32.93000411987305,
887
+ 42.320003509521484,
888
+ 44.96000289916992,
889
+ 50.340003967285156,
890
+ 50.45000457763672,
891
+ 57.55000305175781,
892
+ 57.93000411987305,
893
+ 58.21000289916992,
894
+ 60.1400032043457,
895
+ 62.61000442504883,
896
+ 62.62000274658203,
897
+ 62.71000289916992,
898
+ 63.1400032043457,
899
+ 63.1400032043457,
900
+ 63.77000427246094,
901
+ 63.93000411987305,
902
+ 63.96000289916992,
903
+ 63.970001220703125,
904
+ 64.02999877929688,
905
+ 64.06999969482422,
906
+ 64.08000183105469,
907
+ 64.12000274658203,
908
+ 64.41000366210938,
909
+ 64.4800033569336,
910
+ 64.51000213623047,
911
+ 64.52999877929688,
912
+ 64.83999633789062
913
+ ],
914
+ "short_factor": [
915
+ 1.0,
916
+ 1.0199999809265137,
917
+ 1.0299999713897705,
918
+ 1.0299999713897705,
919
+ 1.0499999523162842,
920
+ 1.0499999523162842,
921
+ 1.0499999523162842,
922
+ 1.0499999523162842,
923
+ 1.0499999523162842,
924
+ 1.0699999332427979,
925
+ 1.0999999046325684,
926
+ 1.1099998950958252,
927
+ 1.1599998474121094,
928
+ 1.1599998474121094,
929
+ 1.1699998378753662,
930
+ 1.2899998426437378,
931
+ 1.339999794960022,
932
+ 1.679999828338623,
933
+ 1.7899998426437378,
934
+ 1.8199998140335083,
935
+ 1.8499997854232788,
936
+ 1.8799997568130493,
937
+ 1.9099997282028198,
938
+ 1.9399996995925903,
939
+ 1.9899996519088745,
940
+ 2.0199997425079346,
941
+ 2.0199997425079346,
942
+ 2.0199997425079346,
943
+ 2.0199997425079346,
944
+ 2.0199997425079346,
945
+ 2.0199997425079346,
946
+ 2.0299997329711914,
947
+ 2.0299997329711914,
948
+ 2.0299997329711914,
949
+ 2.0299997329711914,
950
+ 2.0299997329711914,
951
+ 2.0299997329711914,
952
+ 2.0299997329711914,
953
+ 2.0299997329711914,
954
+ 2.0299997329711914,
955
+ 2.0799996852874756,
956
+ 2.0899996757507324,
957
+ 2.189999580383301,
958
+ 2.2199995517730713,
959
+ 2.5899994373321533,
960
+ 2.729999542236328,
961
+ 2.749999523162842,
962
+ 2.8399994373321533
963
+ ],
964
+ "type": "longrope"
965
+ },
966
+ "rope_theta": 10000.0,
967
+ "sliding_window": 262144,
968
+ "tie_word_embeddings": false,
969
+ "torch_dtype": "bfloat16",
970
+ "transformers_version": "4.53.0",
971
+ "use_cache": true,
972
+ "vocab_size": 32064
973
+ }
974
+
975
+ [2025-07-05 09:22:28,879][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-128] due to args.save_total_limit
976
+ [2025-07-05 09:23:54,216][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
977
+ [2025-07-05 09:23:54,219][transformers.trainer][INFO] -
978
+ ***** Running Evaluation *****
979
+ [2025-07-05 09:23:54,219][transformers.trainer][INFO] - Num examples = 132
980
+ [2025-07-05 09:23:54,219][transformers.trainer][INFO] - Batch size = 4
981
+ [2025-07-05 09:24:15,789][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-192
982
+ [2025-07-05 09:24:16,139][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
983
+ [2025-07-05 09:24:16,139][transformers.configuration_utils][INFO] - Model config Phi3Config {
984
+ "architectures": [
985
+ "Phi3ForCausalLM"
986
+ ],
987
+ "attention_bias": false,
988
+ "attention_dropout": 0.0,
989
+ "auto_map": {
990
+ "AutoConfig": "configuration_phi3.Phi3Config",
991
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
992
+ },
993
+ "bos_token_id": 1,
994
+ "embd_pdrop": 0.0,
995
+ "eos_token_id": 32000,
996
+ "hidden_act": "silu",
997
+ "hidden_size": 3072,
998
+ "initializer_range": 0.02,
999
+ "intermediate_size": 8192,
1000
+ "max_position_embeddings": 131072,
1001
+ "model_type": "phi3",
1002
+ "num_attention_heads": 32,
1003
+ "num_hidden_layers": 32,
1004
+ "num_key_value_heads": 32,
1005
+ "original_max_position_embeddings": 4096,
1006
+ "pad_token_id": 32000,
1007
+ "partial_rotary_factor": 1.0,
1008
+ "resid_pdrop": 0.0,
1009
+ "rms_norm_eps": 1e-05,
1010
+ "rope_scaling": {
1011
+ "long_factor": [
1012
+ 1.0800000429153442,
1013
+ 1.1100000143051147,
1014
+ 1.1399999856948853,
1015
+ 1.340000033378601,
1016
+ 1.5899999141693115,
1017
+ 1.600000023841858,
1018
+ 1.6200000047683716,
1019
+ 2.620000123977661,
1020
+ 3.2300000190734863,
1021
+ 3.2300000190734863,
1022
+ 4.789999961853027,
1023
+ 7.400000095367432,
1024
+ 7.700000286102295,
1025
+ 9.09000015258789,
1026
+ 12.199999809265137,
1027
+ 17.670000076293945,
1028
+ 24.46000099182129,
1029
+ 28.57000160217285,
1030
+ 30.420001983642578,
1031
+ 30.840002059936523,
1032
+ 32.590003967285156,
1033
+ 32.93000411987305,
1034
+ 42.320003509521484,
1035
+ 44.96000289916992,
1036
+ 50.340003967285156,
1037
+ 50.45000457763672,
1038
+ 57.55000305175781,
1039
+ 57.93000411987305,
1040
+ 58.21000289916992,
1041
+ 60.1400032043457,
1042
+ 62.61000442504883,
1043
+ 62.62000274658203,
1044
+ 62.71000289916992,
1045
+ 63.1400032043457,
1046
+ 63.1400032043457,
1047
+ 63.77000427246094,
1048
+ 63.93000411987305,
1049
+ 63.96000289916992,
1050
+ 63.970001220703125,
1051
+ 64.02999877929688,
1052
+ 64.06999969482422,
1053
+ 64.08000183105469,
1054
+ 64.12000274658203,
1055
+ 64.41000366210938,
1056
+ 64.4800033569336,
1057
+ 64.51000213623047,
1058
+ 64.52999877929688,
1059
+ 64.83999633789062
1060
+ ],
1061
+ "short_factor": [
1062
+ 1.0,
1063
+ 1.0199999809265137,
1064
+ 1.0299999713897705,
1065
+ 1.0299999713897705,
1066
+ 1.0499999523162842,
1067
+ 1.0499999523162842,
1068
+ 1.0499999523162842,
1069
+ 1.0499999523162842,
1070
+ 1.0499999523162842,
1071
+ 1.0699999332427979,
1072
+ 1.0999999046325684,
1073
+ 1.1099998950958252,
1074
+ 1.1599998474121094,
1075
+ 1.1599998474121094,
1076
+ 1.1699998378753662,
1077
+ 1.2899998426437378,
1078
+ 1.339999794960022,
1079
+ 1.679999828338623,
1080
+ 1.7899998426437378,
1081
+ 1.8199998140335083,
1082
+ 1.8499997854232788,
1083
+ 1.8799997568130493,
1084
+ 1.9099997282028198,
1085
+ 1.9399996995925903,
1086
+ 1.9899996519088745,
1087
+ 2.0199997425079346,
1088
+ 2.0199997425079346,
1089
+ 2.0199997425079346,
1090
+ 2.0199997425079346,
1091
+ 2.0199997425079346,
1092
+ 2.0199997425079346,
1093
+ 2.0299997329711914,
1094
+ 2.0299997329711914,
1095
+ 2.0299997329711914,
1096
+ 2.0299997329711914,
1097
+ 2.0299997329711914,
1098
+ 2.0299997329711914,
1099
+ 2.0299997329711914,
1100
+ 2.0299997329711914,
1101
+ 2.0299997329711914,
1102
+ 2.0799996852874756,
1103
+ 2.0899996757507324,
1104
+ 2.189999580383301,
1105
+ 2.2199995517730713,
1106
+ 2.5899994373321533,
1107
+ 2.729999542236328,
1108
+ 2.749999523162842,
1109
+ 2.8399994373321533
1110
+ ],
1111
+ "type": "longrope"
1112
+ },
1113
+ "rope_theta": 10000.0,
1114
+ "sliding_window": 262144,
1115
+ "tie_word_embeddings": false,
1116
+ "torch_dtype": "bfloat16",
1117
+ "transformers_version": "4.53.0",
1118
+ "use_cache": true,
1119
+ "vocab_size": 32064
1120
+ }
1121
+
1122
+ [2025-07-05 09:24:16,383][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-160] due to args.save_total_limit
1123
+ [2025-07-05 09:25:41,672][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1124
+ [2025-07-05 09:25:41,675][transformers.trainer][INFO] -
1125
+ ***** Running Evaluation *****
1126
+ [2025-07-05 09:25:41,675][transformers.trainer][INFO] - Num examples = 132
1127
+ [2025-07-05 09:25:41,675][transformers.trainer][INFO] - Batch size = 4
1128
+ [2025-07-05 09:26:03,251][transformers.trainer][INFO] - Saving model checkpoint to /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-224
1129
+ [2025-07-05 09:26:03,545][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
1130
+ [2025-07-05 09:26:03,546][transformers.configuration_utils][INFO] - Model config Phi3Config {
1131
+ "architectures": [
1132
+ "Phi3ForCausalLM"
1133
+ ],
1134
+ "attention_bias": false,
1135
+ "attention_dropout": 0.0,
1136
+ "auto_map": {
1137
+ "AutoConfig": "configuration_phi3.Phi3Config",
1138
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
1139
+ },
1140
+ "bos_token_id": 1,
1141
+ "embd_pdrop": 0.0,
1142
+ "eos_token_id": 32000,
1143
+ "hidden_act": "silu",
1144
+ "hidden_size": 3072,
1145
+ "initializer_range": 0.02,
1146
+ "intermediate_size": 8192,
1147
+ "max_position_embeddings": 131072,
1148
+ "model_type": "phi3",
1149
+ "num_attention_heads": 32,
1150
+ "num_hidden_layers": 32,
1151
+ "num_key_value_heads": 32,
1152
+ "original_max_position_embeddings": 4096,
1153
+ "pad_token_id": 32000,
1154
+ "partial_rotary_factor": 1.0,
1155
+ "resid_pdrop": 0.0,
1156
+ "rms_norm_eps": 1e-05,
1157
+ "rope_scaling": {
1158
+ "long_factor": [
1159
+ 1.0800000429153442,
1160
+ 1.1100000143051147,
1161
+ 1.1399999856948853,
1162
+ 1.340000033378601,
1163
+ 1.5899999141693115,
1164
+ 1.600000023841858,
1165
+ 1.6200000047683716,
1166
+ 2.620000123977661,
1167
+ 3.2300000190734863,
1168
+ 3.2300000190734863,
1169
+ 4.789999961853027,
1170
+ 7.400000095367432,
1171
+ 7.700000286102295,
1172
+ 9.09000015258789,
1173
+ 12.199999809265137,
1174
+ 17.670000076293945,
1175
+ 24.46000099182129,
1176
+ 28.57000160217285,
1177
+ 30.420001983642578,
1178
+ 30.840002059936523,
1179
+ 32.590003967285156,
1180
+ 32.93000411987305,
1181
+ 42.320003509521484,
1182
+ 44.96000289916992,
1183
+ 50.340003967285156,
1184
+ 50.45000457763672,
1185
+ 57.55000305175781,
1186
+ 57.93000411987305,
1187
+ 58.21000289916992,
1188
+ 60.1400032043457,
1189
+ 62.61000442504883,
1190
+ 62.62000274658203,
1191
+ 62.71000289916992,
1192
+ 63.1400032043457,
1193
+ 63.1400032043457,
1194
+ 63.77000427246094,
1195
+ 63.93000411987305,
1196
+ 63.96000289916992,
1197
+ 63.970001220703125,
1198
+ 64.02999877929688,
1199
+ 64.06999969482422,
1200
+ 64.08000183105469,
1201
+ 64.12000274658203,
1202
+ 64.41000366210938,
1203
+ 64.4800033569336,
1204
+ 64.51000213623047,
1205
+ 64.52999877929688,
1206
+ 64.83999633789062
1207
+ ],
1208
+ "short_factor": [
1209
+ 1.0,
1210
+ 1.0199999809265137,
1211
+ 1.0299999713897705,
1212
+ 1.0299999713897705,
1213
+ 1.0499999523162842,
1214
+ 1.0499999523162842,
1215
+ 1.0499999523162842,
1216
+ 1.0499999523162842,
1217
+ 1.0499999523162842,
1218
+ 1.0699999332427979,
1219
+ 1.0999999046325684,
1220
+ 1.1099998950958252,
1221
+ 1.1599998474121094,
1222
+ 1.1599998474121094,
1223
+ 1.1699998378753662,
1224
+ 1.2899998426437378,
1225
+ 1.339999794960022,
1226
+ 1.679999828338623,
1227
+ 1.7899998426437378,
1228
+ 1.8199998140335083,
1229
+ 1.8499997854232788,
1230
+ 1.8799997568130493,
1231
+ 1.9099997282028198,
1232
+ 1.9399996995925903,
1233
+ 1.9899996519088745,
1234
+ 2.0199997425079346,
1235
+ 2.0199997425079346,
1236
+ 2.0199997425079346,
1237
+ 2.0199997425079346,
1238
+ 2.0199997425079346,
1239
+ 2.0199997425079346,
1240
+ 2.0299997329711914,
1241
+ 2.0299997329711914,
1242
+ 2.0299997329711914,
1243
+ 2.0299997329711914,
1244
+ 2.0299997329711914,
1245
+ 2.0299997329711914,
1246
+ 2.0299997329711914,
1247
+ 2.0299997329711914,
1248
+ 2.0299997329711914,
1249
+ 2.0799996852874756,
1250
+ 2.0899996757507324,
1251
+ 2.189999580383301,
1252
+ 2.2199995517730713,
1253
+ 2.5899994373321533,
1254
+ 2.729999542236328,
1255
+ 2.749999523162842,
1256
+ 2.8399994373321533
1257
+ ],
1258
+ "type": "longrope"
1259
+ },
1260
+ "rope_theta": 10000.0,
1261
+ "sliding_window": 262144,
1262
+ "tie_word_embeddings": false,
1263
+ "torch_dtype": "bfloat16",
1264
+ "transformers_version": "4.53.0",
1265
+ "use_cache": true,
1266
+ "vocab_size": 32064
1267
+ }
1268
+
1269
+ [2025-07-05 09:26:03,780][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-192] due to args.save_total_limit
1270
+ [2025-07-05 09:26:03,786][transformers.trainer][INFO] -
1271
+
1272
+ Training completed. Do not forget to share your model on huggingface.co/models =)
1273
+
1274
+
1275
+ [2025-07-05 09:26:03,786][transformers.trainer][INFO] - Loading best model from /workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-64 (score: 0.32508860011813345).
1276
+ [2025-07-05 09:26:03,961][transformers.trainer][INFO] - Deleting older checkpoint [/workspace/jbcs2025/outputs/2025-07-05/09-12-52/results/phi4-balanced/C2/checkpoint-224] due to args.save_total_limit
1277
+ [2025-07-05 09:26:03,969][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1278
+ [2025-07-05 09:26:03,972][transformers.trainer][INFO] -
1279
+ ***** Running Evaluation *****
1280
+ [2025-07-05 09:26:03,972][transformers.trainer][INFO] - Num examples = 132
1281
+ [2025-07-05 09:26:03,972][transformers.trainer][INFO] - Batch size = 4
1282
+ [2025-07-05 09:26:25,533][__main__][INFO] - Training completed successfully.
1283
+ [2025-07-05 09:26:25,533][__main__][INFO] - Running on Test
1284
+ [2025-07-05 09:26:25,534][transformers.trainer][INFO] - The following columns in the Evaluation set don't have a corresponding argument in `PeftModelForSequenceClassification.forward` and have been ignored: essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text. If essay_year, prompt, id, essay_text, reference, grades, id_prompt, supporting_text are not expected by `PeftModelForSequenceClassification.forward`, you can safely ignore this message.
1285
+ [2025-07-05 09:26:25,536][transformers.trainer][INFO] -
1286
+ ***** Running Evaluation *****
1287
+ [2025-07-05 09:26:25,536][transformers.trainer][INFO] - Num examples = 138
1288
+ [2025-07-05 09:26:25,537][transformers.trainer][INFO] - Batch size = 4
1289
+ [2025-07-05 09:26:48,219][__main__][INFO] - Test metrics: {'eval_loss': 1.7205733060836792, 'eval_model_preparation_time': 0.0071, 'eval_accuracy': 0.2246376811594203, 'eval_RMSE': 72.55183054852598, 'eval_QWK': 0.04586988304093553, 'eval_HDIV': 0.1594202898550725, 'eval_Macro_F1': 0.1469089842699924, 'eval_Micro_F1': 0.2246376811594203, 'eval_Weighted_F1': 0.22365941232090072, 'eval_TP_0': 0, 'eval_TN_0': 137, 'eval_FP_0': 0, 'eval_FN_0': 1, 'eval_TP_1': 8, 'eval_TN_1': 75, 'eval_FP_1': 28, 'eval_FN_1': 27, 'eval_TP_2': 0, 'eval_TN_2': 125, 'eval_FP_2': 8, 'eval_FN_2': 5, 'eval_TP_3': 13, 'eval_TN_3': 56, 'eval_FP_3': 31, 'eval_FN_3': 38, 'eval_TP_4': 8, 'eval_TN_4': 76, 'eval_FP_4': 36, 'eval_FN_4': 18, 'eval_TP_5': 2, 'eval_TN_5': 114, 'eval_FP_5': 4, 'eval_FN_5': 18, 'eval_runtime': 22.675, 'eval_samples_per_second': 6.086, 'eval_steps_per_second': 1.544, 'epoch': 7.0}
1290
+ [2025-07-05 09:26:48,220][transformers.trainer][INFO] - Saving model checkpoint to ./results/phi4-balanced/C2/best_model
1291
+ [2025-07-05 09:26:48,637][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /workspace/.hf_home/hub/models--microsoft--Phi-3.5-mini-instruct/snapshots/3145e03a9fd4cdd7cd953c34d9bbf7ad606122ca/config.json
1292
+ [2025-07-05 09:26:48,638][transformers.configuration_utils][INFO] - Model config Phi3Config {
1293
+ "architectures": [
1294
+ "Phi3ForCausalLM"
1295
+ ],
1296
+ "attention_bias": false,
1297
+ "attention_dropout": 0.0,
1298
+ "auto_map": {
1299
+ "AutoConfig": "configuration_phi3.Phi3Config",
1300
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM"
1301
+ },
1302
+ "bos_token_id": 1,
1303
+ "embd_pdrop": 0.0,
1304
+ "eos_token_id": 32000,
1305
+ "hidden_act": "silu",
1306
+ "hidden_size": 3072,
1307
+ "initializer_range": 0.02,
1308
+ "intermediate_size": 8192,
1309
+ "max_position_embeddings": 131072,
1310
+ "model_type": "phi3",
1311
+ "num_attention_heads": 32,
1312
+ "num_hidden_layers": 32,
1313
+ "num_key_value_heads": 32,
1314
+ "original_max_position_embeddings": 4096,
1315
+ "pad_token_id": 32000,
1316
+ "partial_rotary_factor": 1.0,
1317
+ "resid_pdrop": 0.0,
1318
+ "rms_norm_eps": 1e-05,
1319
+ "rope_scaling": {
1320
+ "long_factor": [
1321
+ 1.0800000429153442,
1322
+ 1.1100000143051147,
1323
+ 1.1399999856948853,
1324
+ 1.340000033378601,
1325
+ 1.5899999141693115,
1326
+ 1.600000023841858,
1327
+ 1.6200000047683716,
1328
+ 2.620000123977661,
1329
+ 3.2300000190734863,
1330
+ 3.2300000190734863,
1331
+ 4.789999961853027,
1332
+ 7.400000095367432,
1333
+ 7.700000286102295,
1334
+ 9.09000015258789,
1335
+ 12.199999809265137,
1336
+ 17.670000076293945,
1337
+ 24.46000099182129,
1338
+ 28.57000160217285,
1339
+ 30.420001983642578,
1340
+ 30.840002059936523,
1341
+ 32.590003967285156,
1342
+ 32.93000411987305,
1343
+ 42.320003509521484,
1344
+ 44.96000289916992,
1345
+ 50.340003967285156,
1346
+ 50.45000457763672,
1347
+ 57.55000305175781,
1348
+ 57.93000411987305,
1349
+ 58.21000289916992,
1350
+ 60.1400032043457,
1351
+ 62.61000442504883,
1352
+ 62.62000274658203,
1353
+ 62.71000289916992,
1354
+ 63.1400032043457,
1355
+ 63.1400032043457,
1356
+ 63.77000427246094,
1357
+ 63.93000411987305,
1358
+ 63.96000289916992,
1359
+ 63.970001220703125,
1360
+ 64.02999877929688,
1361
+ 64.06999969482422,
1362
+ 64.08000183105469,
1363
+ 64.12000274658203,
1364
+ 64.41000366210938,
1365
+ 64.4800033569336,
1366
+ 64.51000213623047,
1367
+ 64.52999877929688,
1368
+ 64.83999633789062
1369
+ ],
1370
+ "short_factor": [
1371
+ 1.0,
1372
+ 1.0199999809265137,
1373
+ 1.0299999713897705,
1374
+ 1.0299999713897705,
1375
+ 1.0499999523162842,
1376
+ 1.0499999523162842,
1377
+ 1.0499999523162842,
1378
+ 1.0499999523162842,
1379
+ 1.0499999523162842,
1380
+ 1.0699999332427979,
1381
+ 1.0999999046325684,
1382
+ 1.1099998950958252,
1383
+ 1.1599998474121094,
1384
+ 1.1599998474121094,
1385
+ 1.1699998378753662,
1386
+ 1.2899998426437378,
1387
+ 1.339999794960022,
1388
+ 1.679999828338623,
1389
+ 1.7899998426437378,
1390
+ 1.8199998140335083,
1391
+ 1.8499997854232788,
1392
+ 1.8799997568130493,
1393
+ 1.9099997282028198,
1394
+ 1.9399996995925903,
1395
+ 1.9899996519088745,
1396
+ 2.0199997425079346,
1397
+ 2.0199997425079346,
1398
+ 2.0199997425079346,
1399
+ 2.0199997425079346,
1400
+ 2.0199997425079346,
1401
+ 2.0199997425079346,
1402
+ 2.0299997329711914,
1403
+ 2.0299997329711914,
1404
+ 2.0299997329711914,
1405
+ 2.0299997329711914,
1406
+ 2.0299997329711914,
1407
+ 2.0299997329711914,
1408
+ 2.0299997329711914,
1409
+ 2.0299997329711914,
1410
+ 2.0299997329711914,
1411
+ 2.0799996852874756,
1412
+ 2.0899996757507324,
1413
+ 2.189999580383301,
1414
+ 2.2199995517730713,
1415
+ 2.5899994373321533,
1416
+ 2.729999542236328,
1417
+ 2.749999523162842,
1418
+ 2.8399994373321533
1419
+ ],
1420
+ "type": "longrope"
1421
+ },
1422
+ "rope_theta": 10000.0,
1423
+ "sliding_window": 262144,
1424
+ "tie_word_embeddings": false,
1425
+ "torch_dtype": "bfloat16",
1426
+ "transformers_version": "4.53.0",
1427
+ "use_cache": true,
1428
+ "vocab_size": 32064
1429
+ }
1430
+
1431
+ [2025-07-05 09:26:48,865][transformers.tokenization_utils_base][INFO] - chat template saved in ./results/phi4-balanced/C2/best_model/chat_template.jinja
1432
+ [2025-07-05 09:26:48,866][transformers.tokenization_utils_base][INFO] - tokenizer config file saved in ./results/phi4-balanced/C2/best_model/tokenizer_config.json
1433
+ [2025-07-05 09:26:48,866][transformers.tokenization_utils_base][INFO] - Special tokens file saved in ./results/phi4-balanced/C2/best_model/special_tokens_map.json
1434
+ [2025-07-05 09:26:48,887][__main__][INFO] - Model and tokenizer saved to ./results/phi4-balanced/C2/best_model
1435
+ [2025-07-05 09:26:48,892][__main__][INFO] - Fine Tuning Finished.
1436
+ [2025-07-05 09:26:49,400][__main__][INFO] - Total emissions: 0.0103 kg CO2eq
special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|dummy_id_0|>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "32000": {
31
+ "content": "<|endoftext|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "32001": {
39
+ "content": "<|assistant|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "32002": {
47
+ "content": "<|placeholder1|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "32003": {
55
+ "content": "<|placeholder2|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "32004": {
63
+ "content": "<|placeholder3|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "32005": {
71
+ "content": "<|placeholder4|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "32006": {
79
+ "content": "<|system|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "32007": {
87
+ "content": "<|end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "32008": {
95
+ "content": "<|placeholder5|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "32009": {
103
+ "content": "<|placeholder6|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": true,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "32010": {
111
+ "content": "<|user|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": true,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "32011": {
119
+ "content": "<|dummy_id_0|>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": true
125
+ }
126
+ },
127
+ "bos_token": "<s>",
128
+ "clean_up_tokenization_spaces": false,
129
+ "eos_token": "<|endoftext|>",
130
+ "extra_special_tokens": {},
131
+ "legacy": false,
132
+ "model_max_length": 131072,
133
+ "pad_token": "<|dummy_id_0|>",
134
+ "padding_side": "left",
135
+ "sp_model_kwargs": {},
136
+ "tokenizer_class": "LlamaTokenizer",
137
+ "unk_token": "<unk>",
138
+ "use_default_system_prompt": false
139
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8af69a66110540d9dfe5067efdda14ee753a28677528b21888a1eb62b4a1a09
3
+ size 5841