diff --git a/README.md b/README.md index d7870729c8c41fde8dd90f06fb12742af8a2bd5f..f2a169dafd4cc07270757640d9c5b4935062bb77 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,13 @@ --- pretty_name: "JBCS2025: AES Experimental Logs and Predictions" license: "cc-by-nc-4.0" +configs: +- config_name: default + data_files: + - split: evaluation_results + path: evaluation_results-*.parquet + - split: boostrap_confidence_intervals + path: boostrap_confidence_intervals-*.parquet tags: - automatic-essay-scoring - portuguese diff --git a/boostrap_confidence_intervals-00000-of-00001.parquet b/boostrap_confidence_intervals-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..80574c3285a3c8a4e3760c5a3d32a2e45fc63894 --- /dev/null +++ b/boostrap_confidence_intervals-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47fea678a70c7bb2c62475f002e11b2b8784fc038ff771736c558e77ce73845b +size 11142 diff --git a/create_parquet_files.py b/create_parquet_files.py new file mode 100644 index 0000000000000000000000000000000000000000..ca9b5fd6fe55f38909d5df219520735b8b73fef4 --- /dev/null +++ b/create_parquet_files.py @@ -0,0 +1,40 @@ +import pandas as pd +from pathlib import Path +import pyarrow # ensures pyarrow is installed for Parquet support + + +def find_and_group_csvs(): + base = Path(".") + groups = { + "evaluation_results": sorted(base.rglob("evaluation_results.csv")), + "bootstrap_confidence_intervals": sorted(base.rglob("bootstrap_confidence_intervals.csv")), + } + for name, paths in groups.items(): + print(f"[INFO] Found {len(paths)} files for '{name}'") + if not paths: + print(f"[WARNING] No files found for '{name}'") + return groups + + +def combine(paths, out_path): + if not paths: + print(f"[SKIP] No files to combine for {out_path}") + return + + print(f"[INFO] Combining {len(paths)} files into {out_path}") + dfs = [pd.read_csv(p) for p in paths] + + # Basic schema validation + cols = {tuple(df.columns) for df in dfs} + if len(cols) > 1: + raise ValueError(f"[ERROR] {out_path}: header mismatch across shards") + + combined = pd.concat(dfs, ignore_index=True) + combined.to_parquet(out_path, engine="pyarrow", index=False) + print(f"[SUCCESS] Written {out_path} with {len(combined)} rows") + + +if __name__ == "__main__": + groups = find_and_group_csvs() + combine(groups["evaluation_results"], "evaluation_results-00000-of-00001.parquet") + combine(groups["bootstrap_confidence_intervals"], "boostrap_confidence_intervals-00000-of-00001.parquet") diff --git a/evaluation_results-00000-of-00001.parquet b/evaluation_results-00000-of-00001.parquet new file mode 100644 index 0000000000000000000000000000000000000000..ddfbe7719e95563bec3640e0824a16c4f49e26f8 --- /dev/null +++ b/evaluation_results-00000-of-00001.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7abeb787de299fb07bc61a9545f50518b37a9a48bdddcfec16dc3596d302c958 +size 23909 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/config.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/config.yaml index 7d7448bae50f14f51a6c00b0bfc9f242f26edb12..9d56608e42ee9b58908448ef9231bf84e7a2ed49 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/config.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -15,9 +23,9 @@ experiments: name: kamel-usp/jbcs2025_bertimbau_base-C1 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C1 - logging_dir: ./logs/mbert_base/C1 - best_model_dir: ./results/mbert_base/C1/best_model + output_dir: ./results/ + logging_dir: ./logs/ + best_model_dir: ./results/best_model tokenizer: name: neuralmind/bert-base-portuguese-cased dataset: diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/hydra.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/hydra.yaml index 69c9e0e24ac4004c1732250b36ac492b38b68446..469390df13a62acf0c853cff0384437dd176a783 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/hydra.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C1 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C1 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-06-57 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-36-00 choices: experiments: base_models/C1 hydra/env: default diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/overrides.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..78e47947ea11210c8750c6189f7b3c700482a505 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/overrides.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C1 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv index cbb1543bf7b7e1b1a1101afbc63225e3b51e2718..e00ae059b2b04226464ff3851e0bd513643bcf15 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv @@ -1,2 +1,2 @@ experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width -jbcs2025_bertimbau_base-C1-encoder_classification-C1,2025-06-28 16:31:25,0.6726698793738349,0.5786694701512399,0.7587417074110893,0.18007223725984933,0.4756728951042896,0.36004609141863914,0.6232464233862081,0.2632003319675689,0.6413009122974154,0.556374600523932,0.7241688998827073,0.16779429935877532 +jbcs2025_bertimbau_base-C1-encoder_classification-C1,2025-06-28 17:36:00,0.6726698793738349,0.5786694701512399,0.7587417074110893,0.18007223725984933,0.4756728951042896,0.36004609141863914,0.6232464233862081,0.2632003319675689,0.6413009122974154,0.556374600523932,0.7241688998827073,0.16779429935877532 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/evaluation_results.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/evaluation_results.csv index 0005cd68a394b7f161824c8b90bad789d6837c4e..350dea7f2f283f03480e41780984fa7985216595 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/evaluation_results.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.644927536231884,26.37521893583148,0.6742722265932337,0.007246376811594235,0.44138845418188133,0.644927536231884,0.6413771139990777,0,137,0,1,0,138,0,0,5,123,5,5,56,52,20,10,22,79,8,29,6,112,16,4,2025-05-25 22:06:57,jbcs2025_bertimbau_base-C1-encoder_classification-C1 +0.644927536231884,26.37521893583148,0.6742722265932337,0.007246376811594235,0.44138845418188133,0.644927536231884,0.6413771139990777,0,137,0,1,0,138,0,0,5,123,5,5,56,52,20,10,22,79,8,29,6,112,16,4,2025-06-28 17:36:00,jbcs2025_bertimbau_base-C1-encoder_classification-C1 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/run_inference_experiment.log b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/run_inference_experiment.log index c864f0c61c380697ed21244ab1b67625e8b6a52d..e156d9fa146eb6035ec380c956388c42da75073d 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/run_inference_experiment.log +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C1-encoder_classification-C1/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:06:57,689][__main__][INFO] - Starting inference experiment -[2025-05-25 22:06:57,690][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:36:00,040][__main__][INFO] - Starting inference experiment +[2025-06-28 17:36:00,041][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -16,9 +24,9 @@ experiments: name: kamel-usp/jbcs2025_bertimbau_base-C1 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C1 - logging_dir: ./logs/mbert_base/C1 - best_model_dir: ./results/mbert_base/C1/best_model + output_dir: ./results/ + logging_dir: ./logs/ + best_model_dir: ./results/best_model tokenizer: name: neuralmind/bert-base-portuguese-cased dataset: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:06:57,692][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:07:02,772][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:07:02,773][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:36:00,055][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:36:04,586][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:36:04,587][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:07:05,017][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt -[2025-05-25 22:07:05,018][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:07:05,018][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json -[2025-05-25 22:07:05,018][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json -[2025-05-25 22:07:05,018][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json -[2025-05-25 22:07:05,018][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:07:05,018][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:07:05,019][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json +[2025-06-28 17:36:04,589][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:36:04,590][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:36:04,591][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:07:05,045][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:07:05,045][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:36:04,639][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:36:04,639][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:07:05,058][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:07:05,260][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C1 -[2025-05-25 22:07:05,945][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/config.json -[2025-05-25 22:07:05,947][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:36:04,653][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:36:04,665][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C1 +[2025-06-28 17:36:04,870][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/config.json +[2025-06-28 17:36:04,874][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:07:23,482][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/model.safetensors -[2025-05-25 22:07:23,526][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C1/snapshots/1ad2e0f61009276ce3c1d23b24b6f55e0eb102d8/model.safetensors +[2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:36:04,937][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:36:05,142][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:07:23,526][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C1. +[2025-06-28 17:36:05,142][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C1. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:07:23,570][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:07:23,606][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:07:23,831][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:07:23,832][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:07:23,832][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_year, essay_text, reference, grades, prompt, id_prompt, id, supporting_text. If essay_year, essay_text, reference, grades, prompt, id_prompt, id, supporting_text are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:07:23,836][transformers.trainer][INFO] - +[2025-06-28 17:36:05,148][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:36:05,160][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:36:05,164][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:36:05,180][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:36:08,901][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:36:08,902][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: prompt, reference, id, supporting_text, essay_text, essay_year, id_prompt, grades. If prompt, reference, id, supporting_text, essay_text, essay_year, id_prompt, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:36:08,906][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:07:23,836][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:07:23,836][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:07:24,356][transformers][INFO] - {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6742722265932337, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44138845418188133, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6413771139990777, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(56), 'TN_3': np.int64(52), 'FP_3': np.int64(20), 'FN_3': np.int64(10), 'TP_4': np.int64(22), 'TN_4': np.int64(79), 'FP_4': np.int64(8), 'FN_4': np.int64(29), 'TP_5': np.int64(6), 'TN_5': np.int64(112), 'FP_5': np.int64(16), 'FN_5': np.int64(4)} -[2025-05-25 22:07:24,367][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C1-encoder_classification-C1_inference_results.jsonl -[2025-05-25 22:07:24,373][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6742722265932337, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44138845418188133, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6413771139990777, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(56), 'TN_3': np.int64(52), 'FP_3': np.int64(20), 'FN_3': np.int64(10), 'TP_4': np.int64(22), 'TN_4': np.int64(79), 'FP_4': np.int64(8), 'FN_4': np.int64(29), 'TP_5': np.int64(6), 'TN_5': np.int64(112), 'FP_5': np.int64(16), 'FN_5': np.int64(4)} -[2025-05-25 22:07:24,373][__main__][INFO] - Inference experiment completed +[2025-06-28 17:36:08,906][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:36:08,906][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:36:12,220][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C1-encoder_classification-C1_inference_results.jsonl +[2025-06-28 17:36:12,224][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:37:02,235][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:37:02,235][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:37:02,235][__main__][INFO] - QWK: 0.6727 [0.5787, 0.7587] +[2025-06-28 17:37:02,235][__main__][INFO] - Macro_F1: 0.4757 [0.3600, 0.6232] +[2025-06-28 17:37:02,235][__main__][INFO] - Weighted_F1: 0.6413 [0.5564, 0.7242] +[2025-06-28 17:37:02,235][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6742722265932337, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.44138845418188133, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6413771139990777, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(56), 'TN_3': np.int64(52), 'FP_3': np.int64(20), 'FN_3': np.int64(10), 'TP_4': np.int64(22), 'TN_4': np.int64(79), 'FP_4': np.int64(8), 'FN_4': np.int64(29), 'TP_5': np.int64(6), 'TN_5': np.int64(112), 'FP_5': np.int64(16), 'FN_5': np.int64(4)} +[2025-06-28 17:37:02,235][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/config.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/config.yaml index b4a68132c637cc1d0d81c2d8452e35528e428261..b18e0905fbe478a68086c2616f6f63e75142e116 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/config.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/hydra.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/hydra.yaml index 951596781401847dc4831364940ec257572fe769..30f49384d432c87c93f32552ca9af8a58af30e52 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/hydra.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C2 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C2 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-33-08 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-37-11 choices: experiments: base_models/C2 hydra/env: default diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/overrides.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..b2ff6385847671687feaef4b9fecb4d81cc52fc3 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/overrides.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C2 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..fd9bb06d124083d85ae8fad0b5709c7c1c94b178 --- /dev/null +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau_base-C2-encoder_classification-C2,2025-06-28 17:37:11,0.41819188204779456,0.27759865754644286,0.5466018786751335,0.2690032211286907,0.29623085261327686,0.21542890620802888,0.3976815226515651,0.18225261644353621,0.3817868369579885,0.2993269590182539,0.46412896590642116,0.16480200688816726 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/evaluation_results.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/evaluation_results.csv index 44823b0c10905c0340ab3b77fe724216c14c377e..bee9e424a5dedf0b6ac4ce7996c8c41ce6c567c2 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/evaluation_results.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.3695652173913043,55.74268002896518,0.41025641025641035,0.06521739130434778,0.27254317053298555,0.3695652173913043,0.37216098145030935,0,137,0,1,13,90,13,22,3,112,21,2,25,54,33,26,4,100,12,22,6,110,8,14,2025-05-25 22:33:08,jbcs2025_bertimbau_base-C2-encoder_classification-C2 +0.37681159420289856,55.32512598464997,0.4220445459737294,0.06521739130434778,0.2801049472150572,0.37681159420289856,0.38226236003582026,0,137,0,1,13,90,13,22,3,112,21,2,25,56,31,26,5,99,13,21,6,110,8,14,2025-06-28 17:37:11,jbcs2025_bertimbau_base-C2-encoder_classification-C2 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl index fe79b85e69d0fe682cc8b1ed481a46729e207d22..0f79afa15c3d039aea394ff7dea834e7f133ab3d 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl @@ -39,7 +39,7 @@ {"id": "5.html", "id_prompt": "agrotoxicos-ou-defensivos-agricolas-dois-nomes-e-uma-polemica", "essay_text": "A forma como determinados métodos ou produtos são denominados decorre, muitas vezes, dos efeitos que eles causam. \nPode-se citar, por exemplo, o emprego de pesticidas em plantações, cujos reconhecidos efeitos nocivos ao meio ambiente e à saúde humana, reforçam os motivos para que os mesmos não sejam vistos como defensores agrícolas, mas sim como agrotóxicos.\nCom efeito, o fomento ao plantio orgânico e a políticas ambientais mais responsáveis são as medidas que se fazem mais pertinentes.\nNesse contexto, os agrotóxicos se tornaram parte integrante da produção agrícola, sendo justificado, tanto por seus fabricantes quanto por grandes produtores rurais, de que seu uso é imprescindível para a agricultura em larga escala, assim como seu uso não representa riscos significativos. No entanto, a realização de pesquisas científicas, como a realizada por Rachel Carson em 1962, constatam que seu uso agride o meio ambiente e que está relacionado a diversos fatores que desencadeiam o desequilíbrio ambiental, como a contaminação dos solos e lençóis freáticos, a sedimentação de rios bem como a contaminação dos próprios alimentos, o que atinge de forma direta a saúde dos seres humanos.\nAlém disso, observa-se uma carência de políticas públicas que dêem a devida importância ao assunto, no sentido de fomentar uma maior fiscalização, bem como investir em medidas que incentivem a diminuição do uso de agrotóxicos nas produções agrícolas. Ao invés disso, nota-se um expressivo aumento de seu uso nas plantações a cada ano, situação que pode ser agravada em virtude do alto número de agrotóxicos liberados, recentemente, pelo governo federal brasileiro.\nUrge, portanto, a valorização de alternativas mais saudáveis na agricultura por meio de políticas ambientais mais conscientes. Dessa forma, verifica-se a necessidade de potencializar a agricultura orgânica familiar, com o aumento de subsídios para o seu desenvolvimento, assim como o fortalecimento de instrumentos legislativos que imponham limites mais rigorosos quanto a utilização de componentes químicos em lavouras. Assim, tais ações visam não apenas mitigar o uso de pesticidas na agricultura, como também amadurecer uma conscientização na sociedade, de que, a exemplo da sua mais adequada denominação, seu uso traz resultados mais tóxicos do que benéficos.", "label": 3, "prediction": 4, "grade_index": 1, "reference": ""} {"id": "19.html", "id_prompt": "a-terapia-de-reversao-da-orientacao-sexual", "essay_text": "Representantes do CFP (Conselho Federal de Psicologia) estão discutindo recentemente sobre a terapia da orientação sexual, algo sem fundamento. \nFatos comprovam que o Brasil é um dos países mais violentos contra gays, lésbicas, e transexuais do mundo, mas isso não é motivo para que tenha como obrigatoriedade curar as pessoas que sejam assim. \nOuve-se muito comentários maldosos discriminando essas pessoas. Mas já pararam para pensar que todos tem têm seu direito de ser o que quiserem ser? Pessoas com um pensamento muito fechado, que não se abrem para entender o que os outros tem têm de diferente e que também são normais apoiam com certeza a \"cura gay\".\nSeria mais fácil se todos pudessem escolher o que ser sem se preocupar com o que os outros julgam. Ao invés de criar propostas de terapia de orientação sexual o CFP podia realizar métodos de igualdade que cumprissem tudo conforme os gostos e modos de cada. ", "label": 3, "prediction": 3, "grade_index": 1, "reference": ""} {"id": "0.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "A imigração, que sempre fez parte da história e da formação dos povos, tem se tornado um problema de difícil solução. Tanto os países desenvolvidos, como os países em desenvolvimento, como o Brasil, têm encontrado diversos obstáculos na tentativa de normalizar os fluxos migratórios, cada vez mais frequentes e preocupantes.\nMuitos são os motivos que levam milhões de pessoas ao redor do mundo a deixar seu país de origem, dentre os quais se destacam as crises econômicas, sociais e políticas, a exemplo da Venezuela, cujo governo nada tem feito para mudar a situação precária em que se encontra o país, tornando o crescente êxodo de seus habitantes insustentável para os países vizinhos.\nEm que pese à necessidade de acolher de forma digna e humanitária os imigrantes que buscam melhores condições de vida, o Brasil tem revelado nos últimos meses insuficiência de infraestrutura e de recursos financeiros para garantir segurança e qualidade de vida tanto para os imigrantes, como para os brasileiros que passaram a conviver com essa realidade.\nO Brasil deve assegurar a entrada pacífica e digna tantos dos imigrantes venezuelanos, como de quaisquer outros imigrantes que necessitem garantir a sua própria subsistência e de sua família, como forma de respeito e proteção aos direitos humanos. A fim de efetivar esses direitos, no entanto, o país tem pela frente o desafio de distribuir corretamente esses estrangeiros no território nacional e dar a eles a assistência social necessária, a fim de evitar o aumento da desigualdade social e a xenofobia.", "label": 4, "prediction": 3, "grade_index": 1, "reference": ""} -{"id": "8.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "Emprego é o que estrutura os cidadãos de uma sociedade, fornecendo um meio de sobrevivência. Porém com o aumento da qualificação profissional e a chegada da tecnologia no mercado de trabalho, a curto prazo terá um efeito negativo para os indivíduos da conjuntura social. Uma vez que, não há igualdade de oportunidades para todas as pessoas, ocasionado o desemprego.\nNo filme \"Tempos Modernos\", de Charlie Chaplin, evidência uma perspectiva onde os operários tinham apenas uma função para desempenhar, contudo, se surgisse uma tecnologia que substituísse essa mão de obra, como consequência os trabalhadores ficariam desempregados. E com os avanços tecnológicos, o mercado de trabalho requisita uma maior capacitação profissional, logo, aqueles considerados menos qualificados ficam de fora dessa seleção, não existindo para eles uma estrutura social adequada que assegure que serão preparados para ter mais capacidade na profissão.\nKarl Marx e Fredrich Engels abordaram a temática dos meios de profissão, como exemplo, o capitalismo. Foi perceptível para eles que existe uma classe que domina e outra que é dominada, desse modo, não tem igualdade de oportunidades para as pessoas. Visto que, são classificados de acordo com a riqueza que possuem, dessa maneira, existe um abismo social, pois há aqueles que tem mais suporte para alcançar mais qualificação, enquanto, os pobres desprovidos disso encontram dificuldades para entrar nessa área competitiva que é o mercado de trabalho.\nFatores como esses revelam a necessidade de melhorias sociais que se realizadas corretamente promoverá a longo prazo uma diminuição do desemprego, com qualidade e segurança. Portanto, deve haver mobilização de instituições privadas para financiar o investimento na construção de centros de qualificação profissional aos indivíduos que precisam dessa melhoria, assim, o Estado em conjunto com os componentes dele devem direcionar a geração de emprego para a sociedade, realizando concursos públicos que garantam estabilidade para o cidadão", "label": 4, "prediction": 3, "grade_index": 1, "reference": ""} +{"id": "8.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "Emprego é o que estrutura os cidadãos de uma sociedade, fornecendo um meio de sobrevivência. Porém com o aumento da qualificação profissional e a chegada da tecnologia no mercado de trabalho, a curto prazo terá um efeito negativo para os indivíduos da conjuntura social. Uma vez que, não há igualdade de oportunidades para todas as pessoas, ocasionado o desemprego.\nNo filme \"Tempos Modernos\", de Charlie Chaplin, evidência uma perspectiva onde os operários tinham apenas uma função para desempenhar, contudo, se surgisse uma tecnologia que substituísse essa mão de obra, como consequência os trabalhadores ficariam desempregados. E com os avanços tecnológicos, o mercado de trabalho requisita uma maior capacitação profissional, logo, aqueles considerados menos qualificados ficam de fora dessa seleção, não existindo para eles uma estrutura social adequada que assegure que serão preparados para ter mais capacidade na profissão.\nKarl Marx e Fredrich Engels abordaram a temática dos meios de profissão, como exemplo, o capitalismo. Foi perceptível para eles que existe uma classe que domina e outra que é dominada, desse modo, não tem igualdade de oportunidades para as pessoas. Visto que, são classificados de acordo com a riqueza que possuem, dessa maneira, existe um abismo social, pois há aqueles que tem mais suporte para alcançar mais qualificação, enquanto, os pobres desprovidos disso encontram dificuldades para entrar nessa área competitiva que é o mercado de trabalho.\nFatores como esses revelam a necessidade de melhorias sociais que se realizadas corretamente promoverá a longo prazo uma diminuição do desemprego, com qualidade e segurança. Portanto, deve haver mobilização de instituições privadas para financiar o investimento na construção de centros de qualificação profissional aos indivíduos que precisam dessa melhoria, assim, o Estado em conjunto com os componentes dele devem direcionar a geração de emprego para a sociedade, realizando concursos públicos que garantam estabilidade para o cidadão", "label": 4, "prediction": 4, "grade_index": 1, "reference": ""} {"id": "17.html", "id_prompt": "agrotoxicos-ou-defensivos-agricolas-dois-nomes-e-uma-polemica", "essay_text": "Em meio a esta discussão para definir um nome aos produtos utilizados na agricultura, é importante destacar que há dois pontos de vista: o dos ambientalistas e o dos fabricantes/agricultores. Sendo assim, ambos as partes trazem argumentos que defendem suas conclusões de acordo com suas áreas de experiência.\nOs ambientalistas, aqueles que possuem como principal objetivo preservar o meio ambiente, afirmam que os produtos devem ser chamados de \"agrotóxicos\" por causarem um impacto negativo (tóxico) aos seres vivos. Já os agricultores ou fabricantes desses produtos defendem que o nome certo para os peticidas é \"defensivos agrícolas\", por defenderem as plantações das pragas.\nPara finalmente chegar a um desfecho desta discussão, deve-se analisar a real função dos peticidas, que é a seguinte: evitar danos causados por pragas em plantações. Ao mesmo tempo que esses produtos protegem as plantações, eles causam danos ao meio ambiente e aos seres vivos, ou seja, possuem a função de defender mas são tóxicos. Os ambientalistas estão ao lado das consequências da toxidade, e os que produzem e trabalham com esses produtos estão preocupados apenas com a produtividade (o ponto de vista econômico), sem se preocupar com os danos causados pelos produtos que utilizam para faturar.\nEm virtude dos fatos analisados acima, conclui-se que ambos os nomes propostos se encaixam aos peticidas, porém o mesmo não irá deixar de ser tóxico por atuar na defesa das plantações, e do ponto de vista ambientalista, esses produtos trazem mais desvantagens do que vantagens à todo o planeta, logo, o termo mais correto a ser utilizado seria \"agrotóxico\".", "label": 3, "prediction": 3, "grade_index": 1, "reference": ""} {"id": "11.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "No decorrer dos séculos XVIII e XX, o mundo passou por três revoluções industriais, e atualmente, no século XXI, a quarta revolução industrial já acontece. As inovações tecnológicas proporcionadas por esse fenômeno impactam diversos setores como, saúde e agricultura, e a tendência é o mercado de trabalho aderir às novas tecnologias que estão sendo desenvolvidas. Entretanto, se os trabalhadores não acompanharem as modernizações, as oportunidades de emprego poderão se tornar escassas.\nSegundo dados do Instituto Brasileiro de Geografia e Estatística (IBGE), é importante destacar que o Brasil possuía uma taxa de desemprego de 11,2% no final de 2019, aproximadamente 12 milhões de pessoas. Contudo, existem setores com muita disponibilidade de vagas, como os de tecnologia, os quais exigem uma qualificação do candidato, sendo esse requisito, um fator desclassificatório para muitas pessoas que não possuem uma formação superior.\nCom o desenvolvimento da Inteligência Artificial (IA), muitas tarefas padronizadas que eram realizadas por serem humanos foram e estão sendo substituídas pelas máquinas inteligentes. Em oposição a isso, vários empregos surgiram, como e-commerce, marketing online e e-business, porém esses serviços não conseguem abranger a demanda de indivíduos descapacitados, pois as premissas principais desses ramos são a criatividade e capacitação para o uso da tecnologia. Sendo assim, as pessoas excluídas acabam recorrendo aos subempregos ou não conseguem trabalhar.\nPortanto, é primordial que o Estado faça adesão de medidas para amenizar a realidade atual. A fim de que os jovens possam ter conhecimento das transformações tecnológicas, é necessário que as escolas realizem trabalhos, palestras e projetos de vida relacionados às profissões atuais. Em adição, a presença do Ministério da Educação e Cultura (MEC) é importante, para a implantação de disciplinas obrigatórias no componente curricular das escolas, como informática ou robótica, para que a juventude possa se integrar mais facilmente no mercado tecnológico. Já para a população adulta, o Governo deve garantir a existência de cursos gratuitos para a sociedade. Com essas ações, os trabalhadores poderão acompanhar as inovações da era e conquistar uma vaga de emprego.", "label": 5, "prediction": 5, "grade_index": 1, "reference": ""} {"id": "13.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "Sabe-se que os imigrantes venezuelanos estão migrando do seu país de origem devido a crise econômica.\nComo citou Maquiavel “Não há nada mais difícil ou perigoso do que tomar a frente na introdução de uma mudança”, é indubitável que o governo tem que pensar em diversas maneiras de acolher ou de rejeitar os imigrantes por quê pode gerar desemprego, violência, falta de saneamento básico, dentre outros fatores, devido a grande quantidade de pessoas que imigram.\nPorém, o Governo deve tomar medidas cabíveis, permitindo imigração de 25% dos venezuelanos. Assim, o Governo, junto com as empresas privadas, pode gerar novos empregos e com isso diminuindo a violência. ", "label": 4, "prediction": 2, "grade_index": 1, "reference": ""} @@ -65,7 +65,7 @@ {"id": "1.html", "id_prompt": "agrotoxicos-ou-defensivos-agricolas-dois-nomes-e-uma-polemica", "essay_text": "Os agrotóxicos, ou pesticidas como alguns preferem, são produtos que fortalecem o desenvolvimento das plantas, evitando que elas sejam atingidas por pestes e que deixam elas mais formosas. O problema começa quando esses produtos começam a causar problemas aos seres vivos, assim como é retratado no livro \"Primavera Silenciosa\", de Rachel Carson.\nA partir do momento em que o agrotóxico entra em contato com o solo, com lagoas ou córregos, ele pode prejudicar os animais selvagens, domésticos e os seres humanos que consomem o alimento ou a água contaminada por ele, levando quem o consumiu a ser contaminado também.\nApesar do agrotóxico ter as suas vantagens, o fato de que ele é prejudicial à saúde é inegável e indiscutível. Só que a maioria das industrias químicas e dos agricultores estão mais preocupados nos lucros que eles vão ter em cima dos produtos que eles vendem do que nas consequências do uso dos pesticidas.\nOs agricultores deveriam usar outros recursos para proteger suas plantações, como produtos que, ao mesmo tempo que protegessem suas plantações, não fizessem mal para os seres que ali viver nem que contaminasse as águas de rios, oceanos e córregos que ali se encontram.", "label": 1, "prediction": 3, "grade_index": 1, "reference": ""} {"id": "0.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "A imigração, que sempre fez parte da história e da formação dos povos, tem se tornado um problema de difícil solução. Tanto os países desenvolvidos, como os países em desenvolvimento, como o Brasil, têm encontrado diversos obstáculos na tentativa de normalizar os fluxos migratórios, cada vez mais frequentes e preocupantes.\nMuitos são os motivos que levam milhões de pessoas ao redor do mundo a deixar seu país de origem, dentre os quais se destacam as crises econômicas, sociais e políticas, a exemplo da Venezuela, cujo governo nada tem feito para mudar a situação precária em que se encontra o país, tornando o crescente êxodo de seus habitantes insustentável para os países vizinhos.\nEm que pese à necessidade de acolher de forma digna e humanitária os imigrantes que buscam melhores condições de vida, o Brasil tem revelado nos últimos meses insuficiência de infraestrutura e de recursos financeiros para garantir segurança e qualidade de vida tanto para os imigrantes, como para os brasileiros que passaram a conviver com essa realidade.\nO Brasil deve assegurar a entrada pacífica e digna tantos dos imigrantes venezuelanos, como de quaisquer outros imigrantes que necessitem garantir a sua própria subsistência e de sua família, como forma de respeito e proteção aos direitos humanos. A fim de efetivar esses direitos, no entanto, o país tem pela frente o desafio de distribuir corretamente esses estrangeiros no território nacional e dar a eles a assistência social necessária, a fim de evitar o aumento da desigualdade social e a xenofobia.", "label": 3, "prediction": 3, "grade_index": 1, "reference": ""} {"id": "10.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "No mundo atual, milhares de indivíduos estão saindo do seu país natal e deslocando-se para o Brasil. Tomam essa decisão, de deixar famílias, amigos e parentes, no intuito de trabalhar e conquistar uma vida melhor para a sua sobrevivência.\nEsses seres humanos estão por toda parte no nosso país. Em ruas, praias e empresas, vendendo de tudo para se manter. Infelizmente, essas ocorrências são desagradáveis, pois precisam fugir de guerras, pobreza e crises para continuar a sua motivação de vida. Para algumas pessoas, isso pode ser algo aprazível, mas para outras não.\nA migração está evoluindo cada vez mais, todos os dias em função dos aspectos humanistas. Cada pessoa é única e a sua vida deve ser construída da maneira, que resultam o sucesso e a tranquilidade Nada é fácil atualmente, mas não podemos desanimar. A luta de hoje, será o sucesso de amanhã.", "label": 1, "prediction": 2, "grade_index": 1, "reference": ""} -{"id": "8.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "Emprego é o que estrutura os cidadãos de uma sociedade, fornecendo um meio de sobrevivência. Porém com o aumento da qualificação profissional e a chegada da tecnologia no mercado de trabalho, a curto prazo terá um efeito negativo para os indivíduos da conjuntura social. Uma vez que, não há igualdade de oportunidades para todas as pessoas, ocasionado o desemprego.\nNo filme \"Tempos Modernos\", de Charlie Chaplin, evidência uma perspectiva onde os operários tinham apenas uma função para desempenhar, contudo, se surgisse uma tecnologia que substituísse essa mão de obra, como consequência os trabalhadores ficariam desempregados. E com os avanços tecnológicos, o mercado de trabalho requisita uma maior capacitação profissional, logo, aqueles considerados menos qualificados ficam de fora dessa seleção, não existindo para eles uma estrutura social adequada que assegure que serão preparados para ter mais capacidade na profissão.\nKarl Marx e Fredrich Engels abordaram a temática dos meios de profissão, como exemplo, o capitalismo. Foi perceptível para eles que existe uma classe que domina e outra que é dominada, desse modo, não tem igualdade de oportunidades para as pessoas. Visto que, são classificados de acordo com a riqueza que possuem, dessa maneira, existe um abismo social, pois há aqueles que tem mais suporte para alcançar mais qualificação, enquanto, os pobres desprovidos disso encontram dificuldades para entrar nessa área competitiva que é o mercado de trabalho.\nFatores como esses revelam a necessidade de melhorias sociais que se realizadas corretamente promoverá a longo prazo uma diminuição do desemprego, com qualidade e segurança. Portanto, deve haver mobilização de instituições privadas para financiar o investimento na construção de centros de qualificação profissional aos indivíduos que precisam dessa melhoria, assim, o Estado em conjunto com os componentes dele devem direcionar a geração de emprego para a sociedade, realizando concursos públicos que garantam estabilidade para o cidadão", "label": 5, "prediction": 3, "grade_index": 1, "reference": ""} +{"id": "8.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "Emprego é o que estrutura os cidadãos de uma sociedade, fornecendo um meio de sobrevivência. Porém com o aumento da qualificação profissional e a chegada da tecnologia no mercado de trabalho, a curto prazo terá um efeito negativo para os indivíduos da conjuntura social. Uma vez que, não há igualdade de oportunidades para todas as pessoas, ocasionado o desemprego.\nNo filme \"Tempos Modernos\", de Charlie Chaplin, evidência uma perspectiva onde os operários tinham apenas uma função para desempenhar, contudo, se surgisse uma tecnologia que substituísse essa mão de obra, como consequência os trabalhadores ficariam desempregados. E com os avanços tecnológicos, o mercado de trabalho requisita uma maior capacitação profissional, logo, aqueles considerados menos qualificados ficam de fora dessa seleção, não existindo para eles uma estrutura social adequada que assegure que serão preparados para ter mais capacidade na profissão.\nKarl Marx e Fredrich Engels abordaram a temática dos meios de profissão, como exemplo, o capitalismo. Foi perceptível para eles que existe uma classe que domina e outra que é dominada, desse modo, não tem igualdade de oportunidades para as pessoas. Visto que, são classificados de acordo com a riqueza que possuem, dessa maneira, existe um abismo social, pois há aqueles que tem mais suporte para alcançar mais qualificação, enquanto, os pobres desprovidos disso encontram dificuldades para entrar nessa área competitiva que é o mercado de trabalho.\nFatores como esses revelam a necessidade de melhorias sociais que se realizadas corretamente promoverá a longo prazo uma diminuição do desemprego, com qualidade e segurança. Portanto, deve haver mobilização de instituições privadas para financiar o investimento na construção de centros de qualificação profissional aos indivíduos que precisam dessa melhoria, assim, o Estado em conjunto com os componentes dele devem direcionar a geração de emprego para a sociedade, realizando concursos públicos que garantam estabilidade para o cidadão", "label": 5, "prediction": 4, "grade_index": 1, "reference": ""} {"id": "5.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "Um dos temas mais debatidos atualmente, a migração é um fenômeno que vem crescendo à medida que povos tentam fugir de crises humanitárias como as que assolam a Síria e a Venezuela. Se, por um lado, há aqueles que defendem o acolhimento das pessoas que tentam obter residência em outros países; existem grupos cada vez maiores de indivíduos contrários a políticas mais brandas de imigração, devido à atual fragilidade da econômica mundial.\nA movimentação de povos em todo o mundo é um processo inerente à raça humana e é responsável por mudanças que contribuíram para a existência do homem no planeta Terra. Porém, o que antes era usado como meio de busca por locais com melhores condições naturais, como regiões com água potável, passou, após o início dos conflitos entre povos, a servir como fuga das mazelas geradas por conflitos. Um exemplo disso é a vinda, para o Brasil, de europeus, que fugiam das consequências da segunda guerra mundial, responsável pela destruição de grande parte do velho continente. \nConquanto a crise migratória na Europa recebeu, nos últimos anos, um destaque nas discussões entre as autoridades mundiais, o que mais tem preocupado os brasileiros é o fluxo de venezuelanos que vem imigrando para o Brasil. Isso porque o maior país da America do Sul, historicamente um lugar que sempre recebeu bem imigrantes, uma das características que contribuíram para que o país se tornasse uma das maiores potências mundiais, não está preparado para receber muitas pessoas. Um exemplo disso é a precariedade dos serviços públicos, como na área da saúde, que já prejudica milhares de brasileiros e poderia piorar com um aumento populacional repentino, como aconteceu em Roraima, onde foi declarado estado de calamidade devido à grande concentração de estrangeiros.\nDessa forma, fica claro que o governo brasileiro deve assumir uma posição de cautela em relação à entrada de imigrantes no país, sejam vindos de países vizinhos ou de outras regiões como o norte do continente Africano. Uma vez que o Brasil enfrenta uma crise sem precedentes, que reduziu a arrecadação dos governos e, consequentemente, prejudicou os serviços públicos, permitir a entrada de imigrantes pode significar colocar essas pessoas sob o risco de enfrentarem condições desumanas de vida aqui também.", "label": 4, "prediction": 5, "grade_index": 1, "reference": ""} {"id": "3.html", "id_prompt": "qualificacao-e-o-futuro-do-emprego", "essay_text": "O mercado de trabalho está sofrendo mudanças, e apesar de investimento em educação, não estão sendo elaboradas estratégias pra para adaptar profissionais nesse novo cenário. Os jovens quase não tem noções sobre o novo mercado de trabalho voltado para setores tecnológicos deixando muitos a margem de uma boa qualidade de vida.\nTal fato causa também decadência no índice de desenvolvimento dos países, como o Brasil que é tecnologicamente atrasado. Devido à extinção de profissões na troca da mão de obra humana pela máquina, é preciso buscar capacitação nos empregos que estão surgindo.\nNo Brasil são ensinadas nas escolas apenas as disciplinas essenciais de forma arcaica, sem esclarecimento sobre o perfil profissional atravésde professores pouco competentes. Isso acontece principalmente nas regiões mais carentes, o que leva os indivíduos entre outros fatores, não obterem boa formação escolar, recorrerem a sub empregos e não buscar qualificação.\nÉ necessário que os países, principalmente os menos desenvolvidos elaborem projetos de reestruturação em seus sistemas de educação, desenvolvendo ideias mutuamente entre seus aliados e associações de nações como o BRICS, investindo em pesquisa e intercâmbio de conhecimento para capacitar os jovens nas vagas mais demandadas, diminuindo o número de desempregados efetivamente.", "label": 3, "prediction": 3, "grade_index": 1, "reference": ""} {"id": "4.html", "id_prompt": "o-brasil-e-os-imigrantes-no-mundo-contemporaneo", "essay_text": "A população do nosso país foi formada, em grande parte, por imigrantes portugueses, africanos (foram trazidos à força em navios negreiros), italianos, japoneses entre outros. Cada um fez parte de um período histórico dessa nação, com contribuições para a estrutura econômica, política e agrícola. Na contemporaneidade o Brasil continua a receber essas pessoas que vem de guerras, crises econômicas, perseguições religiosas e políticas e tantos outros motivos.\nNos últimos anos, migraram os haitianos, bolivianos, coreanos, angolanos, sírios e recentemente em 2018, os venezuelanos, que estão fugindo da crise em sua pátria.\nA Declaração dos Direitos Humanos garante o direito à vida, à liberdade, ao trabalho e a educação, sem discriminação, a todo ser humano. A posição do Brasil deve ser a mesma do século XIX e XX, quando recebeu diversos imigrantes e agora na contemporaneidade, a de continuar aceitando a migração dessas pessoas, pois estará cumprindo os direitos humanos, que são inerente a todos independente da nacionalidade, idioma, etnia ou qualquer outra condição.\nEles migram carregados de necessidades básicas e elas devem ser atendidas com prioridade, como a documentação, moradia, saúde, educação, higiene, alimentação e renda. Mesmo o nosso país sendo subdesenvolvido, não pode deixar de garantir esse direito à vida aos imigrantes. \nEm suma, há muitas ONGS e instituições religiosas que ajudam essas pessoas. O governo pode subsidiar esses lugares com o recrutamento de voluntários da área da saúde, educação (com foco no aprendizado da língua portuguesa) e orientar sobre a procura por trabalho e abrigos para os imigrantes.\nÉ preciso, também, ter um olhar para os filhos deles, que estão em período escolar e orientar as escolas a desenvolverem projetos com os alunos sobre a xenofobia, o bullying e a discriminação; procurar trazer a cultura dessas crianças e adolescentes ao conhecimento dos discentes brasileiros, para que se desenvolva empatia e respeito à diversidade cultural.", "label": 5, "prediction": 3, "grade_index": 1, "reference": ""} diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/run_inference_experiment.log b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/run_inference_experiment.log index 6a6ec09e03c7feaeeb813ba009e06efaaa8c0307..659a2bc50d40f068a5eee933097b30427a306013 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/run_inference_experiment.log +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C2-encoder_classification-C2/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:33:08,903][__main__][INFO] - Starting inference experiment -[2025-05-25 22:33:08,904][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:37:11,852][__main__][INFO] - Starting inference experiment +[2025-06-28 17:37:11,853][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:33:08,906][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:33:12,645][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:33:12,646][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:37:11,866][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:37:16,964][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:37:16,965][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:33:12,820][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt -[2025-05-25 22:33:12,820][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:33:12,821][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json -[2025-05-25 22:33:12,821][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json -[2025-05-25 22:33:12,821][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json -[2025-05-25 22:33:12,821][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:33:12,821][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:33:12,822][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json +[2025-06-28 17:37:16,966][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:37:16,966][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:37:16,967][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:33:12,847][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:33:12,847][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:37:16,994][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:37:16,994][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:33:12,859][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:33:12,923][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C2 -[2025-05-25 22:33:13,456][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/config.json -[2025-05-25 22:33:13,457][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:37:17,008][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:37:17,060][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C2 +[2025-06-28 17:37:17,290][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/config.json +[2025-06-28 17:37:17,293][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:33:21,029][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/model.safetensors -[2025-05-25 22:33:21,071][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:37:17,352][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C2/snapshots/3afae7b80c36bf0042b19778620a0ad1135b7135/model.safetensors +[2025-06-28 17:37:17,353][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:37:17,353][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:37:17,545][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:33:21,071][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C2. +[2025-06-28 17:37:17,545][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C2. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:33:21,114][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:33:21,150][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:33:21,370][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:33:21,371][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:33:21,372][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id, essay_year, essay_text, supporting_text, prompt, id_prompt, reference, grades. If id, essay_year, essay_text, supporting_text, prompt, id_prompt, reference, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:33:21,376][transformers.trainer][INFO] - +[2025-06-28 17:37:17,551][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:37:17,563][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:37:17,567][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:37:17,583][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:37:21,074][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:37:21,075][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, essay_text, id, prompt, id_prompt, supporting_text, essay_year, reference. If grades, essay_text, id, prompt, id_prompt, supporting_text, essay_year, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:37:21,079][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:33:21,376][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:33:21,376][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:33:21,893][transformers][INFO] - {'accuracy': 0.3695652173913043, 'RMSE': 55.74268002896518, 'QWK': 0.41025641025641035, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.27254317053298555, 'Micro_F1': 0.3695652173913043, 'Weighted_F1': 0.37216098145030935, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(13), 'TN_1': np.int64(90), 'FP_1': np.int64(13), 'FN_1': np.int64(22), 'TP_2': np.int64(3), 'TN_2': np.int64(112), 'FP_2': np.int64(21), 'FN_2': np.int64(2), 'TP_3': np.int64(25), 'TN_3': np.int64(54), 'FP_3': np.int64(33), 'FN_3': np.int64(26), 'TP_4': np.int64(4), 'TN_4': np.int64(100), 'FP_4': np.int64(12), 'FN_4': np.int64(22), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} -[2025-05-25 22:33:21,904][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl -[2025-05-25 22:33:21,909][__main__][INFO] - Inference results: {'accuracy': 0.3695652173913043, 'RMSE': 55.74268002896518, 'QWK': 0.41025641025641035, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.27254317053298555, 'Micro_F1': 0.3695652173913043, 'Weighted_F1': 0.37216098145030935, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(13), 'TN_1': np.int64(90), 'FP_1': np.int64(13), 'FN_1': np.int64(22), 'TP_2': np.int64(3), 'TN_2': np.int64(112), 'FP_2': np.int64(21), 'FN_2': np.int64(2), 'TP_3': np.int64(25), 'TN_3': np.int64(54), 'FP_3': np.int64(33), 'FN_3': np.int64(26), 'TP_4': np.int64(4), 'TN_4': np.int64(100), 'FP_4': np.int64(12), 'FN_4': np.int64(22), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} -[2025-05-25 22:33:21,909][__main__][INFO] - Inference experiment completed +[2025-06-28 17:37:21,079][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:37:21,079][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:37:24,400][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C2-encoder_classification-C2_inference_results.jsonl +[2025-06-28 17:37:24,403][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:38:13,623][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:38:13,623][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:38:13,623][__main__][INFO] - QWK: 0.4182 [0.2776, 0.5466] +[2025-06-28 17:38:13,623][__main__][INFO] - Macro_F1: 0.2962 [0.2154, 0.3977] +[2025-06-28 17:38:13,624][__main__][INFO] - Weighted_F1: 0.3818 [0.2993, 0.4641] +[2025-06-28 17:38:13,624][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 55.32512598464997, 'QWK': 0.4220445459737294, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.2801049472150572, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.38226236003582026, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(13), 'TN_1': np.int64(90), 'FP_1': np.int64(13), 'FN_1': np.int64(22), 'TP_2': np.int64(3), 'TN_2': np.int64(112), 'FP_2': np.int64(21), 'FN_2': np.int64(2), 'TP_3': np.int64(25), 'TN_3': np.int64(56), 'FP_3': np.int64(31), 'FN_3': np.int64(26), 'TP_4': np.int64(5), 'TN_4': np.int64(99), 'FP_4': np.int64(13), 'FN_4': np.int64(21), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} +[2025-06-28 17:38:13,624][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/config.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/config.yaml index 97bfbc141bf304cc44c978a3a2464b0408261337..101757e6a115292332d04ed1e0d29fcf5b3e401a 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/config.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/hydra.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/hydra.yaml index e1a1423250384109d4f53882d57fd2fa7bf2e9c9..3ac50fb457ddae85ac9d9af733f3a9f661c3a76b 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/hydra.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C3 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C3 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-35-19 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-38-23 choices: experiments: base_models/C3 hydra/env: default diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/overrides.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..4100f0c2b53b8a1825f5882dd49554426e481e58 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/overrides.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C3 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..69845660a2fac5a083b57aa9471f44ee2543cb35 --- /dev/null +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau_base-C3-encoder_classification-C3,2025-06-28 17:38:23,0.34425463449799465,0.20848447033589465,0.47933895194622367,0.270854481610329,0.27540748660610137,0.20263838658028993,0.36522069296926984,0.1625823063889799,0.3356541043911277,0.25734749784644845,0.4165551974170723,0.15920769957062386 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/evaluation_results.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/evaluation_results.csv index 459a6ba91a29e9875664a4b52740ebb0fb63edec..ba3a9b74f4e0fd42d5f564d343d69fe4dc592b7b 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/evaluation_results.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.37681159420289856,52.64042641120627,0.3452054794520547,0.09420289855072461,0.25943499029705924,0.37681159420289856,0.33380294701134283,0,137,0,1,0,109,0,29,13,101,19,5,20,71,22,25,17,67,33,21,2,119,12,5,2025-05-25 22:35:19,jbcs2025_bertimbau_base-C3-encoder_classification-C3 +0.37681159420289856,52.64042641120627,0.3452054794520547,0.09420289855072461,0.25943499029705924,0.37681159420289856,0.33380294701134283,0,137,0,1,0,109,0,29,13,101,19,5,20,71,22,25,17,67,33,21,2,119,12,5,2025-06-28 17:38:23,jbcs2025_bertimbau_base-C3-encoder_classification-C3 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/run_inference_experiment.log b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/run_inference_experiment.log index f7c27b3e2feb01234283af1fa43328ca76d89a95..6dbb0b08c3dabe83c5bb2bd6c41bf364d44343fa 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/run_inference_experiment.log +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C3-encoder_classification-C3/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:35:19,693][__main__][INFO] - Starting inference experiment -[2025-05-25 22:35:19,694][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:38:23,230][__main__][INFO] - Starting inference experiment +[2025-06-28 17:38:23,232][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:35:19,696][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:35:23,654][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:35:23,656][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:38:23,244][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:38:28,780][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:38:28,784][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json -[2025-05-25 22:35:23,834][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:35:23,834][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:35:23,835][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt +[2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json +[2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json +[2025-06-28 17:38:28,787][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json +[2025-06-28 17:38:28,788][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:38:28,789][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:38:28,791][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:35:23,860][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:35:23,860][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:38:28,843][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:38:28,844][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:35:23,873][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:35:24,069][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C3 -[2025-05-25 22:35:24,602][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/config.json -[2025-05-25 22:35:24,603][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:38:28,858][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:38:28,913][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C3 +[2025-06-28 17:38:29,119][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/config.json +[2025-06-28 17:38:29,122][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:36:01,843][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/model.safetensors -[2025-05-25 22:36:01,885][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:38:29,182][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C3/snapshots/bad03f1db697f1fb612e4d74bb55d6f0e8cd7a16/model.safetensors +[2025-06-28 17:38:29,183][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:38:29,183][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:38:29,376][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:36:01,885][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C3. +[2025-06-28 17:38:29,376][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C3. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:36:01,927][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:36:01,962][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:36:02,188][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:36:02,189][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:36:02,190][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: supporting_text, reference, essay_year, id_prompt, id, grades, essay_text, prompt. If supporting_text, reference, essay_year, id_prompt, id, grades, essay_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:36:02,194][transformers.trainer][INFO] - +[2025-06-28 17:38:29,382][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:38:29,395][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:38:29,398][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:38:29,414][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:38:32,887][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:38:32,890][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, prompt, id, id_prompt, supporting_text, essay_text, essay_year, reference. If grades, prompt, id, id_prompt, supporting_text, essay_text, essay_year, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:38:32,909][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:36:02,194][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:36:02,194][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:36:02,718][transformers][INFO] - {'accuracy': 0.37681159420289856, 'RMSE': 52.64042641120627, 'QWK': 0.3452054794520547, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.25943499029705924, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.33380294701134283, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(13), 'TN_2': np.int64(101), 'FP_2': np.int64(19), 'FN_2': np.int64(5), 'TP_3': np.int64(20), 'TN_3': np.int64(71), 'FP_3': np.int64(22), 'FN_3': np.int64(25), 'TP_4': np.int64(17), 'TN_4': np.int64(67), 'FP_4': np.int64(33), 'FN_4': np.int64(21), 'TP_5': np.int64(2), 'TN_5': np.int64(119), 'FP_5': np.int64(12), 'FN_5': np.int64(5)} -[2025-05-25 22:36:02,729][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C3-encoder_classification-C3_inference_results.jsonl -[2025-05-25 22:36:02,734][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 52.64042641120627, 'QWK': 0.3452054794520547, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.25943499029705924, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.33380294701134283, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(13), 'TN_2': np.int64(101), 'FP_2': np.int64(19), 'FN_2': np.int64(5), 'TP_3': np.int64(20), 'TN_3': np.int64(71), 'FP_3': np.int64(22), 'FN_3': np.int64(25), 'TP_4': np.int64(17), 'TN_4': np.int64(67), 'FP_4': np.int64(33), 'FN_4': np.int64(21), 'TP_5': np.int64(2), 'TN_5': np.int64(119), 'FP_5': np.int64(12), 'FN_5': np.int64(5)} -[2025-05-25 22:36:02,734][__main__][INFO] - Inference experiment completed +[2025-06-28 17:38:32,909][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:38:32,910][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:38:36,240][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C3-encoder_classification-C3_inference_results.jsonl +[2025-06-28 17:38:36,244][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:39:26,821][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:39:26,821][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:39:26,821][__main__][INFO] - QWK: 0.3443 [0.2085, 0.4793] +[2025-06-28 17:39:26,821][__main__][INFO] - Macro_F1: 0.2754 [0.2026, 0.3652] +[2025-06-28 17:39:26,821][__main__][INFO] - Weighted_F1: 0.3357 [0.2573, 0.4166] +[2025-06-28 17:39:26,821][__main__][INFO] - Inference results: {'accuracy': 0.37681159420289856, 'RMSE': 52.64042641120627, 'QWK': 0.3452054794520547, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.25943499029705924, 'Micro_F1': 0.37681159420289856, 'Weighted_F1': 0.33380294701134283, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(13), 'TN_2': np.int64(101), 'FP_2': np.int64(19), 'FN_2': np.int64(5), 'TP_3': np.int64(20), 'TN_3': np.int64(71), 'FP_3': np.int64(22), 'FN_3': np.int64(25), 'TP_4': np.int64(17), 'TN_4': np.int64(67), 'FP_4': np.int64(33), 'FN_4': np.int64(21), 'TP_5': np.int64(2), 'TN_5': np.int64(119), 'FP_5': np.int64(12), 'FN_5': np.int64(5)} +[2025-06-28 17:39:26,821][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/config.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/config.yaml index 63e98af910400baf6d77815a71035b515bbbb0e6..37eda3d253f5e94d753e86d8046c0e7b01e66c2c 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/config.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/hydra.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/hydra.yaml index 79e8269f72143cc1d8903d5ecb8356cf1a12b2c9..203b05091682bab085d517899cca40d01d7dc894 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/hydra.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C4 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C4 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-37-09 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-39-36 choices: experiments: base_models/C4 hydra/env: default diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/overrides.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..06dbbbc94e710165d4219bb4744368e3a67e991d 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/overrides.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C4 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..84f5bdd57139dcde4696a2d22f8b254eb2c07aeb --- /dev/null +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau_base-C4-encoder_classification-C4,2025-06-28 17:39:36,0.623338029229533,0.5110704244499952,0.7250524714839471,0.21398204703395196,0.41365346789602125,0.2906398052196123,0.5906355015808844,0.2999956963612721,0.6556936287214997,0.5748725140399749,0.7321161735801723,0.15724365954019748 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/evaluation_results.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/evaluation_results.csv index d161866c1e26be679449f7ed9322b4119df62d83..00ec425cc69937f8456212fc4da584d3f2d5bbb2 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/evaluation_results.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.644927536231884,26.37521893583148,0.6258134490238612,0.007246376811594235,0.36114488348530904,0.644927536231884,0.6545879036165807,0,137,0,1,0,137,0,1,5,118,11,4,51,49,13,25,30,74,18,16,3,126,7,2,2025-05-25 22:37:09,jbcs2025_bertimbau_base-C4-encoder_classification-C4 +0.644927536231884,26.37521893583148,0.6258134490238612,0.007246376811594235,0.36114488348530904,0.644927536231884,0.6545879036165807,0,137,0,1,0,137,0,1,5,118,11,4,51,49,13,25,30,74,18,16,3,126,7,2,2025-06-28 17:39:36,jbcs2025_bertimbau_base-C4-encoder_classification-C4 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/run_inference_experiment.log b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/run_inference_experiment.log index ed7acc92d2e095d3635ba62e04594ed33e0578ec..c60857fa65219346e89fec8300f836e59133e1ce 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/run_inference_experiment.log +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C4-encoder_classification-C4/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:37:09,856][__main__][INFO] - Starting inference experiment -[2025-05-25 22:37:09,858][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:39:36,464][__main__][INFO] - Starting inference experiment +[2025-06-28 17:39:36,466][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:37:09,859][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:37:13,670][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:13,671][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:39:36,479][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:39:41,572][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:39:41,573][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json -[2025-05-25 22:37:13,836][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:37:13,836][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:13,837][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json +[2025-06-28 17:39:41,573][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:39:41,574][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:39:41,574][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:13,862][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:13,863][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:39:41,599][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:39:41,599][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:13,875][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:37:13,941][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C4 -[2025-05-25 22:37:14,097][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/config.json -[2025-05-25 22:37:14,098][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:39:41,613][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:39:41,666][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C4 +[2025-06-28 17:39:41,892][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/config.json +[2025-06-28 17:39:41,893][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:17,022][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/model.safetensors -[2025-05-25 22:37:17,066][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:39:41,925][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C4/snapshots/be129129fc134c0e782ae9f62b33da331367ab7b/model.safetensors +[2025-06-28 17:39:41,926][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:39:41,926][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:39:42,130][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:37:17,066][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C4. +[2025-06-28 17:39:42,131][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C4. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:37:17,106][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:37:17,143][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:37:17,367][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:37:17,368][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:37:17,369][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, prompt, supporting_text, essay_year, id, reference, id_prompt, grades. If essay_text, prompt, supporting_text, essay_year, id, reference, id_prompt, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:37:17,372][transformers.trainer][INFO] - +[2025-06-28 17:39:42,136][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:39:42,149][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:39:42,152][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:39:42,169][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:39:45,638][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:39:45,640][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, supporting_text, prompt, essay_text, essay_year, id, id_prompt, reference. If grades, supporting_text, prompt, essay_text, essay_year, id, id_prompt, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:39:45,644][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:37:17,372][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:37:17,372][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:37:17,906][transformers][INFO] - {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6258134490238612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.36114488348530904, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6545879036165807, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(118), 'FP_2': np.int64(11), 'FN_2': np.int64(4), 'TP_3': np.int64(51), 'TN_3': np.int64(49), 'FP_3': np.int64(13), 'FN_3': np.int64(25), 'TP_4': np.int64(30), 'TN_4': np.int64(74), 'FP_4': np.int64(18), 'FN_4': np.int64(16), 'TP_5': np.int64(3), 'TN_5': np.int64(126), 'FP_5': np.int64(7), 'FN_5': np.int64(2)} -[2025-05-25 22:37:17,916][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C4-encoder_classification-C4_inference_results.jsonl -[2025-05-25 22:37:17,920][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6258134490238612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.36114488348530904, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6545879036165807, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(118), 'FP_2': np.int64(11), 'FN_2': np.int64(4), 'TP_3': np.int64(51), 'TN_3': np.int64(49), 'FP_3': np.int64(13), 'FN_3': np.int64(25), 'TP_4': np.int64(30), 'TN_4': np.int64(74), 'FP_4': np.int64(18), 'FN_4': np.int64(16), 'TP_5': np.int64(3), 'TN_5': np.int64(126), 'FP_5': np.int64(7), 'FN_5': np.int64(2)} -[2025-05-25 22:37:17,921][__main__][INFO] - Inference experiment completed +[2025-06-28 17:39:45,644][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:39:45,644][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:39:48,945][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C4-encoder_classification-C4_inference_results.jsonl +[2025-06-28 17:39:48,948][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:40:38,116][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:40:38,116][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:40:38,116][__main__][INFO] - QWK: 0.6233 [0.5111, 0.7251] +[2025-06-28 17:40:38,116][__main__][INFO] - Macro_F1: 0.4137 [0.2906, 0.5906] +[2025-06-28 17:40:38,116][__main__][INFO] - Weighted_F1: 0.6557 [0.5749, 0.7321] +[2025-06-28 17:40:38,116][__main__][INFO] - Inference results: {'accuracy': 0.644927536231884, 'RMSE': 26.37521893583148, 'QWK': 0.6258134490238612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.36114488348530904, 'Micro_F1': 0.644927536231884, 'Weighted_F1': 0.6545879036165807, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(5), 'TN_2': np.int64(118), 'FP_2': np.int64(11), 'FN_2': np.int64(4), 'TP_3': np.int64(51), 'TN_3': np.int64(49), 'FP_3': np.int64(13), 'FN_3': np.int64(25), 'TP_4': np.int64(30), 'TN_4': np.int64(74), 'FP_4': np.int64(18), 'FN_4': np.int64(16), 'TP_5': np.int64(3), 'TN_5': np.int64(126), 'FP_5': np.int64(7), 'FN_5': np.int64(2)} +[2025-06-28 17:40:38,116][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/config.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/config.yaml index c3a3ea434b9fe9a7b6a67e3fa8783c7d2ca64953..03bc5d3964afae32d0dfd3014d5c903d7236053c 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/config.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/hydra.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/hydra.yaml index 2a683cfee5e429e7fadad3380bcdc59f412b5cdb..8228640a28f35c0850b6ff27335c3e31b62529d8 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/hydra.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C5 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C5 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-37-54 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-40-47 choices: experiments: base_models/C5 hydra/env: default diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/overrides.yaml b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..40b7755772dc7157c21a647ba6422f48f0f380d1 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/overrides.yaml +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C5 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..04833e17ff7008da491a9e558f1e41ca8e4a4143 --- /dev/null +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau_base-C5-encoder_classification-C5,2025-06-28 17:40:47,0.47349799901126716,0.3401973117894254,0.5947975929869902,0.2546002811975648,0.20469588256838514,0.14697576658446224,0.27274642041824704,0.1257706538337848,0.25750931482031114,0.18034272476682853,0.33952288243091566,0.15918015766408714 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/evaluation_results.csv b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/evaluation_results.csv index ddbb6e7e659b68f0eac33243b1be3f2889fa841e..3514a1ec60693c2b31323a479e92c0e657280904 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/evaluation_results.csv +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.3188405797101449,61.2904702146299,0.476219483623073,0.13043478260869568,0.2055897809038726,0.3188405797101449,0.25808413038205613,3,113,3,19,9,71,35,23,3,103,11,21,1,108,5,24,28,66,40,4,0,135,0,3,2025-05-25 22:37:54,jbcs2025_bertimbau_base-C5-encoder_classification-C5 +0.3188405797101449,61.2904702146299,0.476219483623073,0.13043478260869568,0.2055897809038726,0.3188405797101449,0.25808413038205613,3,113,3,19,9,71,35,23,3,103,11,21,1,108,5,24,28,66,40,4,0,135,0,3,2025-06-28 17:40:47,jbcs2025_bertimbau_base-C5-encoder_classification-C5 diff --git a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/run_inference_experiment.log b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/run_inference_experiment.log index acb2b0e508eb8bbe8ad18cd1b0881e62d99eced4..d38372f0c9b0f71f93ffb58ad201f479a5b3c3ce 100644 --- a/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/run_inference_experiment.log +++ b/runs/base_models/bertimbau/jbcs2025_bertimbau_base-C5-encoder_classification-C5/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:37:54,188][__main__][INFO] - Starting inference experiment -[2025-05-25 22:37:54,189][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:40:47,727][__main__][INFO] - Starting inference experiment +[2025-06-28 17:40:47,729][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:37:54,191][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:37:58,343][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:58,345][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:40:47,742][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:40:53,158][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:40:53,162][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:58,562][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt -[2025-05-25 22:37:58,563][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:37:58,563][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json -[2025-05-25 22:37:58,563][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json -[2025-05-25 22:37:58,563][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json -[2025-05-25 22:37:58,563][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:37:58,563][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:58,564][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:40:53,164][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/vocab.txt +[2025-06-28 17:40:53,164][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:40:53,165][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/added_tokens.json +[2025-06-28 17:40:53,165][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/special_tokens_map.json +[2025-06-28 17:40:53,165][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/tokenizer_config.json +[2025-06-28 17:40:53,165][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:40:53,166][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:40:53,169][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:58,590][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json -[2025-05-25 22:37:58,590][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:40:53,222][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-base-portuguese-cased/snapshots/94d69c95f98f7d5b2a8700c420230ae10def0baa/config.json +[2025-06-28 17:40:53,222][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:37:58,603][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:37:58,793][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C5 -[2025-05-25 22:37:59,487][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C5/snapshots/fb36ac8b730b27c491174f81a69d6da1c0962026/config.json -[2025-05-25 22:37:59,488][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:40:53,236][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:40:53,288][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau_base-C5 +[2025-06-28 17:40:53,627][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C5/snapshots/fb36ac8b730b27c491174f81a69d6da1c0962026/config.json +[2025-06-28 17:40:53,630][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:38:06,663][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C5/snapshots/fb36ac8b730b27c491174f81a69d6da1c0962026/model.safetensors -[2025-05-25 22:38:06,704][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:40:53,693][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau_base-C5/snapshots/fb36ac8b730b27c491174f81a69d6da1c0962026/model.safetensors +[2025-06-28 17:40:53,693][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:40:53,693][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:40:53,888][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:38:06,704][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C5. +[2025-06-28 17:40:53,888][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau_base-C5. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:38:06,746][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:38:06,782][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:38:07,003][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:38:07,004][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:38:07,005][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, reference, supporting_text, grades, prompt, essay_year, id, id_prompt. If essay_text, reference, supporting_text, grades, prompt, essay_year, id, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:38:07,009][transformers.trainer][INFO] - +[2025-06-28 17:40:53,894][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:40:53,907][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:40:53,910][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:40:53,927][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:40:57,628][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:40:57,632][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: supporting_text, id, essay_text, prompt, reference, id_prompt, grades, essay_year. If supporting_text, id, essay_text, prompt, reference, id_prompt, grades, essay_year are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:40:57,651][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:38:07,009][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:38:07,009][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:38:07,534][transformers][INFO] - {'accuracy': 0.3188405797101449, 'RMSE': 61.2904702146299, 'QWK': 0.476219483623073, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2055897809038726, 'Micro_F1': 0.3188405797101449, 'Weighted_F1': 0.25808413038205613, 'TP_0': np.int64(3), 'TN_0': np.int64(113), 'FP_0': np.int64(3), 'FN_0': np.int64(19), 'TP_1': np.int64(9), 'TN_1': np.int64(71), 'FP_1': np.int64(35), 'FN_1': np.int64(23), 'TP_2': np.int64(3), 'TN_2': np.int64(103), 'FP_2': np.int64(11), 'FN_2': np.int64(21), 'TP_3': np.int64(1), 'TN_3': np.int64(108), 'FP_3': np.int64(5), 'FN_3': np.int64(24), 'TP_4': np.int64(28), 'TN_4': np.int64(66), 'FP_4': np.int64(40), 'FN_4': np.int64(4), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} -[2025-05-25 22:38:07,544][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C5-encoder_classification-C5_inference_results.jsonl -[2025-05-25 22:38:07,549][__main__][INFO] - Inference results: {'accuracy': 0.3188405797101449, 'RMSE': 61.2904702146299, 'QWK': 0.476219483623073, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2055897809038726, 'Micro_F1': 0.3188405797101449, 'Weighted_F1': 0.25808413038205613, 'TP_0': np.int64(3), 'TN_0': np.int64(113), 'FP_0': np.int64(3), 'FN_0': np.int64(19), 'TP_1': np.int64(9), 'TN_1': np.int64(71), 'FP_1': np.int64(35), 'FN_1': np.int64(23), 'TP_2': np.int64(3), 'TN_2': np.int64(103), 'FP_2': np.int64(11), 'FN_2': np.int64(21), 'TP_3': np.int64(1), 'TN_3': np.int64(108), 'FP_3': np.int64(5), 'FN_3': np.int64(24), 'TP_4': np.int64(28), 'TN_4': np.int64(66), 'FP_4': np.int64(40), 'FN_4': np.int64(4), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} -[2025-05-25 22:38:07,549][__main__][INFO] - Inference experiment completed +[2025-06-28 17:40:57,651][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:40:57,651][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:41:00,994][__main__][INFO] - Inference results saved to jbcs2025_bertimbau_base-C5-encoder_classification-C5_inference_results.jsonl +[2025-06-28 17:41:00,998][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:41:51,297][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:41:51,297][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:41:51,297][__main__][INFO] - QWK: 0.4735 [0.3402, 0.5948] +[2025-06-28 17:41:51,297][__main__][INFO] - Macro_F1: 0.2047 [0.1470, 0.2727] +[2025-06-28 17:41:51,297][__main__][INFO] - Weighted_F1: 0.2575 [0.1803, 0.3395] +[2025-06-28 17:41:51,297][__main__][INFO] - Inference results: {'accuracy': 0.3188405797101449, 'RMSE': 61.2904702146299, 'QWK': 0.476219483623073, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2055897809038726, 'Micro_F1': 0.3188405797101449, 'Weighted_F1': 0.25808413038205613, 'TP_0': np.int64(3), 'TN_0': np.int64(113), 'FP_0': np.int64(3), 'FN_0': np.int64(19), 'TP_1': np.int64(9), 'TN_1': np.int64(71), 'FP_1': np.int64(35), 'FN_1': np.int64(23), 'TP_2': np.int64(3), 'TN_2': np.int64(103), 'FP_2': np.int64(11), 'FN_2': np.int64(21), 'TP_3': np.int64(1), 'TN_3': np.int64(108), 'FP_3': np.int64(5), 'FN_3': np.int64(24), 'TP_4': np.int64(28), 'TN_4': np.int64(66), 'FP_4': np.int64(40), 'FN_4': np.int64(4), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} +[2025-06-28 17:41:51,297][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/config.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/config.yaml index 43e4ada1a8bfb1a05bd756ac8aa93dbf2d919a56..4b743865afc268819558988be7fefe6c34b49fef 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/config.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -18,9 +23,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C1 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C1 - logging_dir: ./logs/mbert_base/C1 - best_model_dir: ./results/mbert_base/C1/best_model + output_dir: ./results/ + logging_dir: ./logs/ + best_model_dir: ./results/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/hydra.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/hydra.yaml index f3fbe2e23f8936961f2a848ee954581933e740d6..305dea0633683abfc682521a368c2c1bd64e4e04 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/hydra.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C1 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C1 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-44-30 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-45-08 choices: experiments: base_models/C1 hydra/env: default diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/overrides.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..78e47947ea11210c8750c6189f7b3c700482a505 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/overrides.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C1 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..ad396e8ad8264160362397d8dab147855245f4b7 --- /dev/null +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_mbert_base-C1-encoder_classification-C1,2025-06-28 17:45:08,0.4469509804733242,0.32734526290173793,0.5646932058882819,0.23734794298654394,0.3491979017242674,0.2550171780040366,0.4642559466461772,0.20923876864214058,0.5173838543055693,0.43296245937188504,0.6034695806943422,0.17050712132245716 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/evaluation_results.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/evaluation_results.csv index 6c8b5fc4d6e8fccc20bc48955de6ade61ca05b33..805036f3fd799e3b314b93a8b5bc6ab1b888e62c 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/evaluation_results.csv +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.5362318840579711,30.072376462244492,0.4505920783993467,0.007246376811594235,0.3244639912039582,0.5362318840579711,0.518137852459147,0,137,0,1,0,138,0,0,5,123,5,5,42,44,28,24,27,58,29,24,0,126,2,10,2025-05-25 21:44:30,jbcs2025_mbert_base-C1-encoder_classification-C1 +0.5362318840579711,30.072376462244492,0.4505920783993467,0.007246376811594235,0.3244639912039582,0.5362318840579711,0.518137852459147,0,137,0,1,0,138,0,0,5,123,5,5,42,44,28,24,27,58,29,24,0,126,2,10,2025-06-28 17:45:08,jbcs2025_mbert_base-C1-encoder_classification-C1 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/run_inference_experiment.log b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/run_inference_experiment.log index c34f7c910ecf689f055cc63828c1067e5ec57459..e4dc8a3d37a32c8cb4a947aba0fda161f7c8a554 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/run_inference_experiment.log +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C1-encoder_classification-C1/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:44:30,752][__main__][INFO] - Starting inference experiment -[2025-05-25 21:44:30,753][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:45:08,568][__main__][INFO] - Starting inference experiment +[2025-06-28 17:45:08,570][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -19,9 +24,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C1 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C1 - logging_dir: ./logs/mbert_base/C1 - best_model_dir: ./results/mbert_base/C1/best_model + output_dir: ./results/ + logging_dir: ./logs/ + best_model_dir: ./results/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:44:30,754][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:44:35,108][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:44:35,109][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:45:08,584][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:45:15,114][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:45:15,119][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -61,20 +66,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json -[2025-05-25 21:44:35,297][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:44:35,297][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:44:35,298][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:45:17,060][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt +[2025-06-28 17:45:17,061][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json +[2025-06-28 17:45:17,061][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None +[2025-06-28 17:45:17,062][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None +[2025-06-28 17:45:17,062][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json +[2025-06-28 17:45:17,062][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:45:17,063][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:45:17,064][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -98,16 +103,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:44:35,466][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:44:35,502][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C1 -[2025-05-25 21:44:35,699][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C1/snapshots/065d457ead3102211e83566d77783a845d0f3351/config.json -[2025-05-25 21:44:35,700][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:45:17,228][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:45:17,497][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C1 +[2025-06-28 17:45:18,489][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C1/snapshots/065d457ead3102211e83566d77783a845d0f3351/config.json +[2025-06-28 17:45:18,490][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -149,27 +154,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:44:35,839][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C1/snapshots/065d457ead3102211e83566d77783a845d0f3351/model.safetensors -[2025-05-25 21:44:35,881][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:46:21,534][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C1/snapshots/065d457ead3102211e83566d77783a845d0f3351/model.safetensors +[2025-06-28 17:46:21,536][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:46:21,536][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:46:21,795][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:44:35,881][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C1. +[2025-06-28 17:46:21,795][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C1. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:44:35,922][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:44:35,958][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:44:36,200][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:44:36,200][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:44:36,201][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_year, id_prompt, grades, essay_text, supporting_text, reference, id, prompt. If essay_year, id_prompt, grades, essay_text, supporting_text, reference, id, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:44:36,205][transformers.trainer][INFO] - +[2025-06-28 17:46:21,801][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:46:21,813][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:46:21,816][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:46:21,833][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:46:25,527][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:46:25,530][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id, reference, grades, essay_text, id_prompt, essay_year, supporting_text, prompt. If id, reference, grades, essay_text, id_prompt, essay_year, supporting_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:46:25,550][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:44:36,205][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:44:36,205][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:44:36,725][transformers][INFO] - {'accuracy': 0.5362318840579711, 'RMSE': 30.072376462244492, 'QWK': 0.4505920783993467, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.3244639912039582, 'Micro_F1': 0.5362318840579711, 'Weighted_F1': 0.518137852459147, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(42), 'TN_3': np.int64(44), 'FP_3': np.int64(28), 'FN_3': np.int64(24), 'TP_4': np.int64(27), 'TN_4': np.int64(58), 'FP_4': np.int64(29), 'FN_4': np.int64(24), 'TP_5': np.int64(0), 'TN_5': np.int64(126), 'FP_5': np.int64(2), 'FN_5': np.int64(10)} -[2025-05-25 21:44:36,735][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C1-encoder_classification-C1_inference_results.jsonl -[2025-05-25 21:44:36,756][__main__][INFO] - Inference results: {'accuracy': 0.5362318840579711, 'RMSE': 30.072376462244492, 'QWK': 0.4505920783993467, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.3244639912039582, 'Micro_F1': 0.5362318840579711, 'Weighted_F1': 0.518137852459147, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(42), 'TN_3': np.int64(44), 'FP_3': np.int64(28), 'FN_3': np.int64(24), 'TP_4': np.int64(27), 'TN_4': np.int64(58), 'FP_4': np.int64(29), 'FN_4': np.int64(24), 'TP_5': np.int64(0), 'TN_5': np.int64(126), 'FP_5': np.int64(2), 'FN_5': np.int64(10)} -[2025-05-25 21:44:36,756][__main__][INFO] - Inference experiment completed +[2025-06-28 17:46:25,550][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:46:25,551][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:46:28,878][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C1-encoder_classification-C1_inference_results.jsonl +[2025-06-28 17:46:28,892][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:47:18,712][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:47:18,712][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:47:18,712][__main__][INFO] - QWK: 0.4470 [0.3273, 0.5647] +[2025-06-28 17:47:18,712][__main__][INFO] - Macro_F1: 0.3492 [0.2550, 0.4643] +[2025-06-28 17:47:18,712][__main__][INFO] - Weighted_F1: 0.5174 [0.4330, 0.6035] +[2025-06-28 17:47:18,712][__main__][INFO] - Inference results: {'accuracy': 0.5362318840579711, 'RMSE': 30.072376462244492, 'QWK': 0.4505920783993467, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.3244639912039582, 'Micro_F1': 0.5362318840579711, 'Weighted_F1': 0.518137852459147, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(5), 'TN_2': np.int64(123), 'FP_2': np.int64(5), 'FN_2': np.int64(5), 'TP_3': np.int64(42), 'TN_3': np.int64(44), 'FP_3': np.int64(28), 'FN_3': np.int64(24), 'TP_4': np.int64(27), 'TN_4': np.int64(58), 'FP_4': np.int64(29), 'FN_4': np.int64(24), 'TP_5': np.int64(0), 'TN_5': np.int64(126), 'FP_5': np.int64(2), 'FN_5': np.int64(10)} +[2025-06-28 17:47:18,712][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/config.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/config.yaml index 83de0b01e8df2dfa967cb9c1138022a6e4bcc497..5816a127be4ac9979f0f8f1447a49c80511a3f58 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/config.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/hydra.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/hydra.yaml index 1ea4789b5fc53dbe3b9866327c324e2f6854c205..88520a257a78b28b29e0c437868ed4bc5021b3e6 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/hydra.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C2 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C2 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/20-19-30 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-47-23 choices: experiments: base_models/C2 hydra/env: default diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/overrides.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..b2ff6385847671687feaef4b9fecb4d81cc52fc3 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/overrides.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C2 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..22341553698fc4991669ca0405e922b77f80da47 --- /dev/null +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_mbert_base-C2-encoder_classification-C2,2025-06-28 17:47:23,0.14437890192671712,0.001934092752835843,0.285562229212569,0.28362813645973317,0.23239532042246563,0.155705774323124,0.33457928743828114,0.17887351311515715,0.31801246492466145,0.23938401343609686,0.40182271968007593,0.16243870624397908 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/evaluation_results.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/evaluation_results.csv index 08e9226400bf1997e12ad0624869c930dd6afedb..6b766c239d62d657e624e4044b8e7a231f9e84ac 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/evaluation_results.csv +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.36231884057971014,62.78557943912954,0.14498141263940523,0.08695652173913049,0.22145597726993074,0.36231884057971014,0.3182603637608693,0,137,0,1,5,88,15,30,1,130,3,4,32,41,46,19,12,94,18,14,0,112,6,20,2025-05-25 20:19:30,jbcs2025_mbert_base-C2-encoder_classification-C2 +0.36231884057971014,62.78557943912954,0.14498141263940523,0.08695652173913049,0.22145597726993074,0.36231884057971014,0.3182603637608693,0,137,0,1,5,88,15,30,1,130,3,4,32,41,46,19,12,94,18,14,0,112,6,20,2025-06-28 17:47:23,jbcs2025_mbert_base-C2-encoder_classification-C2 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/run_inference_experiment.log b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/run_inference_experiment.log index 0968fb5a8b23fd328634428409cb337b8b0e5872..8c310a074f4a028a2f650c3d14df11de32de43bd 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/run_inference_experiment.log +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C2-encoder_classification-C2/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 20:19:30,664][__main__][INFO] - Starting inference experiment -[2025-05-25 20:19:30,665][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:47:23,345][__main__][INFO] - Starting inference experiment +[2025-06-28 17:47:23,346][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 20:19:30,666][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 20:19:34,789][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 20:19:34,791][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:47:23,360][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:47:28,352][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:47:28,356][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -61,20 +66,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json -[2025-05-25 20:19:34,967][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 20:19:34,967][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 20:19:34,968][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:47:28,358][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt +[2025-06-28 17:47:28,359][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json +[2025-06-28 17:47:28,359][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None +[2025-06-28 17:47:28,359][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None +[2025-06-28 17:47:28,359][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json +[2025-06-28 17:47:28,359][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:47:28,360][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:47:28,363][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -98,16 +103,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 20:19:35,182][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 20:19:35,428][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C2 -[2025-05-25 20:19:36,005][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C2/snapshots/ba829169b10334433e2387bea31578db06725a3c/config.json -[2025-05-25 20:19:36,005][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:47:28,567][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:47:28,836][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C2 +[2025-06-28 17:47:29,700][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C2/snapshots/ba829169b10334433e2387bea31578db06725a3c/config.json +[2025-06-28 17:47:29,701][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -149,27 +154,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 20:20:52,274][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C2/snapshots/ba829169b10334433e2387bea31578db06725a3c/model.safetensors -[2025-05-25 20:20:52,320][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:48:32,428][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C2/snapshots/ba829169b10334433e2387bea31578db06725a3c/model.safetensors +[2025-06-28 17:48:32,430][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:48:32,431][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:48:32,690][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 20:20:52,320][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C2. +[2025-06-28 17:48:32,690][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C2. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 20:20:52,362][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 20:20:52,398][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 20:20:52,634][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 20:20:52,635][__main__][INFO] - Running inference on test dataset -[2025-05-25 20:20:52,636][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, prompt, essay_text, essay_year, grades, supporting_text, reference, id. If id_prompt, prompt, essay_text, essay_year, grades, supporting_text, reference, id are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 20:20:52,640][transformers.trainer][INFO] - +[2025-06-28 17:48:32,696][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:48:32,709][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:48:32,712][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:48:32,729][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:48:36,213][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:48:36,215][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, supporting_text, grades, essay_text, id, prompt, id_prompt, essay_year. If reference, supporting_text, grades, essay_text, id, prompt, id_prompt, essay_year are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:48:36,227][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 20:20:52,640][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 20:20:52,640][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 20:20:53,194][transformers][INFO] - {'accuracy': 0.36231884057971014, 'RMSE': 62.78557943912954, 'QWK': 0.14498141263940523, 'HDIV': 0.08695652173913049, 'Macro_F1': 0.22145597726993074, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3182603637608693, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(5), 'TN_1': np.int64(88), 'FP_1': np.int64(15), 'FN_1': np.int64(30), 'TP_2': np.int64(1), 'TN_2': np.int64(130), 'FP_2': np.int64(3), 'FN_2': np.int64(4), 'TP_3': np.int64(32), 'TN_3': np.int64(41), 'FP_3': np.int64(46), 'FN_3': np.int64(19), 'TP_4': np.int64(12), 'TN_4': np.int64(94), 'FP_4': np.int64(18), 'FN_4': np.int64(14), 'TP_5': np.int64(0), 'TN_5': np.int64(112), 'FP_5': np.int64(6), 'FN_5': np.int64(20)} -[2025-05-25 20:20:53,204][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C2-encoder_classification-C2_inference_results.jsonl -[2025-05-25 20:20:53,223][__main__][INFO] - Inference results: {'accuracy': 0.36231884057971014, 'RMSE': 62.78557943912954, 'QWK': 0.14498141263940523, 'HDIV': 0.08695652173913049, 'Macro_F1': 0.22145597726993074, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3182603637608693, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(5), 'TN_1': np.int64(88), 'FP_1': np.int64(15), 'FN_1': np.int64(30), 'TP_2': np.int64(1), 'TN_2': np.int64(130), 'FP_2': np.int64(3), 'FN_2': np.int64(4), 'TP_3': np.int64(32), 'TN_3': np.int64(41), 'FP_3': np.int64(46), 'FN_3': np.int64(19), 'TP_4': np.int64(12), 'TN_4': np.int64(94), 'FP_4': np.int64(18), 'FN_4': np.int64(14), 'TP_5': np.int64(0), 'TN_5': np.int64(112), 'FP_5': np.int64(6), 'FN_5': np.int64(20)} -[2025-05-25 20:20:53,223][__main__][INFO] - Inference experiment completed +[2025-06-28 17:48:36,228][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:48:36,228][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:48:39,575][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C2-encoder_classification-C2_inference_results.jsonl +[2025-06-28 17:48:39,589][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:49:28,874][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:49:28,874][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:49:28,875][__main__][INFO] - QWK: 0.1444 [0.0019, 0.2856] +[2025-06-28 17:49:28,875][__main__][INFO] - Macro_F1: 0.2324 [0.1557, 0.3346] +[2025-06-28 17:49:28,875][__main__][INFO] - Weighted_F1: 0.3180 [0.2394, 0.4018] +[2025-06-28 17:49:28,875][__main__][INFO] - Inference results: {'accuracy': 0.36231884057971014, 'RMSE': 62.78557943912954, 'QWK': 0.14498141263940523, 'HDIV': 0.08695652173913049, 'Macro_F1': 0.22145597726993074, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3182603637608693, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(5), 'TN_1': np.int64(88), 'FP_1': np.int64(15), 'FN_1': np.int64(30), 'TP_2': np.int64(1), 'TN_2': np.int64(130), 'FP_2': np.int64(3), 'FN_2': np.int64(4), 'TP_3': np.int64(32), 'TN_3': np.int64(41), 'FP_3': np.int64(46), 'FN_3': np.int64(19), 'TP_4': np.int64(12), 'TN_4': np.int64(94), 'FP_4': np.int64(18), 'FN_4': np.int64(14), 'TP_5': np.int64(0), 'TN_5': np.int64(112), 'FP_5': np.int64(6), 'FN_5': np.int64(20)} +[2025-06-28 17:49:28,875][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/config.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/config.yaml index 37c6d4a72cd5296cad9604593fe46b0d2c6b26c4..40ade9f925ab870779a2d5f8650712ea69af22a7 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/config.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/hydra.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/hydra.yaml index ed98dbd5394eb4187e57e06e4dbd09cbe902be03..70eae901b2c4bdc4146a04b79ed9b6be088401c8 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/hydra.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C3 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C3 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-25-03 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-49-33 choices: experiments: base_models/C3 hydra/env: default diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/overrides.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..4100f0c2b53b8a1825f5882dd49554426e481e58 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/overrides.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C3 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..7cfc060dd426fec8588b69042a8edac67c5b0a14 --- /dev/null +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_mbert_base-C3-encoder_classification-C3,2025-06-28 17:49:33,0.2618424087718754,0.10210150729441123,0.4199083537960727,0.31780684650166147,0.16614765792326347,0.11015299312713149,0.2324391515727227,0.12228615844559122,0.1633811825402068,0.1012985474086221,0.2311794347558356,0.1298808873472135 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/evaluation_results.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/evaluation_results.csv index 37872fea4c3fe3bd560ebd52d65d8caa1e6aabbf..130295c820ff2ed2b035824296087c1a65a8fb5b 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/evaluation_results.csv +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.2318840579710145,60.24106163777641,0.2641316569559441,0.09420289855072461,0.15672242946179116,0.2318840579710145,0.1613437300185681,0,137,0,1,0,109,0,29,15,57,63,3,1,92,1,44,12,80,20,26,4,109,22,3,2025-05-25 21:25:03,jbcs2025_mbert_base-C3-encoder_classification-C3 +0.2318840579710145,60.24106163777641,0.2641316569559441,0.09420289855072461,0.15672242946179116,0.2318840579710145,0.1613437300185681,0,137,0,1,0,109,0,29,15,57,63,3,1,92,1,44,12,80,20,26,4,109,22,3,2025-06-28 17:49:33,jbcs2025_mbert_base-C3-encoder_classification-C3 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/run_inference_experiment.log b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/run_inference_experiment.log index adf9d396a716b27332ae892f0e07c4dc5abb1655..7827a48d3accd6fc58774d6a8412d09b2a69d8ba 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/run_inference_experiment.log +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C3-encoder_classification-C3/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:25:03,033][__main__][INFO] - Starting inference experiment -[2025-05-25 21:25:03,034][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:49:33,521][__main__][INFO] - Starting inference experiment +[2025-06-28 17:49:33,522][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:25:03,036][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:25:07,380][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:25:07,382][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:49:33,536][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:49:39,049][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:49:39,053][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -61,20 +66,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json -[2025-05-25 21:25:07,568][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:25:07,568][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:25:07,569][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:49:39,055][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt +[2025-06-28 17:49:39,056][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json +[2025-06-28 17:49:39,056][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None +[2025-06-28 17:49:39,057][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None +[2025-06-28 17:49:39,057][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json +[2025-06-28 17:49:39,057][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:49:39,059][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:49:39,062][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -98,16 +103,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:25:07,788][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:25:08,054][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C3 -[2025-05-25 21:25:08,667][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C3/snapshots/1b0c0041ae7f2bcb714debdb0ad20e11af1be8a3/config.json -[2025-05-25 21:25:08,668][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:49:39,253][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:49:39,520][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C3 +[2025-06-28 17:49:40,414][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C3/snapshots/1b0c0041ae7f2bcb714debdb0ad20e11af1be8a3/config.json +[2025-06-28 17:49:40,415][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -149,27 +154,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:25:22,139][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C3/snapshots/1b0c0041ae7f2bcb714debdb0ad20e11af1be8a3/model.safetensors -[2025-05-25 21:25:22,182][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:50:45,544][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C3/snapshots/1b0c0041ae7f2bcb714debdb0ad20e11af1be8a3/model.safetensors +[2025-06-28 17:50:45,546][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:50:45,546][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:50:45,804][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:25:22,182][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C3. +[2025-06-28 17:50:45,804][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C3. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:25:22,235][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:25:22,270][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:25:22,533][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:25:22,534][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:25:22,535][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id, essay_text, essay_year, grades, prompt, supporting_text, id_prompt, reference. If id, essay_text, essay_year, grades, prompt, supporting_text, id_prompt, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:25:22,539][transformers.trainer][INFO] - +[2025-06-28 17:50:45,810][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:50:45,823][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:50:45,826][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:50:45,842][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:50:49,565][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:50:49,566][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, supporting_text, essay_text, essay_year, id_prompt, grades, prompt, id. If reference, supporting_text, essay_text, essay_year, id_prompt, grades, prompt, id are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:50:49,573][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:25:22,539][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:25:22,539][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:25:23,063][transformers][INFO] - {'accuracy': 0.2318840579710145, 'RMSE': 60.24106163777641, 'QWK': 0.2641316569559441, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.15672242946179116, 'Micro_F1': 0.2318840579710145, 'Weighted_F1': 0.1613437300185681, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(15), 'TN_2': np.int64(57), 'FP_2': np.int64(63), 'FN_2': np.int64(3), 'TP_3': np.int64(1), 'TN_3': np.int64(92), 'FP_3': np.int64(1), 'FN_3': np.int64(44), 'TP_4': np.int64(12), 'TN_4': np.int64(80), 'FP_4': np.int64(20), 'FN_4': np.int64(26), 'TP_5': np.int64(4), 'TN_5': np.int64(109), 'FP_5': np.int64(22), 'FN_5': np.int64(3)} -[2025-05-25 21:25:23,073][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C3-encoder_classification-C3_inference_results.jsonl -[2025-05-25 21:25:23,093][__main__][INFO] - Inference results: {'accuracy': 0.2318840579710145, 'RMSE': 60.24106163777641, 'QWK': 0.2641316569559441, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.15672242946179116, 'Micro_F1': 0.2318840579710145, 'Weighted_F1': 0.1613437300185681, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(15), 'TN_2': np.int64(57), 'FP_2': np.int64(63), 'FN_2': np.int64(3), 'TP_3': np.int64(1), 'TN_3': np.int64(92), 'FP_3': np.int64(1), 'FN_3': np.int64(44), 'TP_4': np.int64(12), 'TN_4': np.int64(80), 'FP_4': np.int64(20), 'FN_4': np.int64(26), 'TP_5': np.int64(4), 'TN_5': np.int64(109), 'FP_5': np.int64(22), 'FN_5': np.int64(3)} -[2025-05-25 21:25:23,094][__main__][INFO] - Inference experiment completed +[2025-06-28 17:50:49,573][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:50:49,573][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:50:52,911][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C3-encoder_classification-C3_inference_results.jsonl +[2025-06-28 17:50:52,924][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:51:41,639][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:51:41,639][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:51:41,639][__main__][INFO] - QWK: 0.2618 [0.1021, 0.4199] +[2025-06-28 17:51:41,639][__main__][INFO] - Macro_F1: 0.1661 [0.1102, 0.2324] +[2025-06-28 17:51:41,639][__main__][INFO] - Weighted_F1: 0.1634 [0.1013, 0.2312] +[2025-06-28 17:51:41,640][__main__][INFO] - Inference results: {'accuracy': 0.2318840579710145, 'RMSE': 60.24106163777641, 'QWK': 0.2641316569559441, 'HDIV': 0.09420289855072461, 'Macro_F1': 0.15672242946179116, 'Micro_F1': 0.2318840579710145, 'Weighted_F1': 0.1613437300185681, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(109), 'FP_1': np.int64(0), 'FN_1': np.int64(29), 'TP_2': np.int64(15), 'TN_2': np.int64(57), 'FP_2': np.int64(63), 'FN_2': np.int64(3), 'TP_3': np.int64(1), 'TN_3': np.int64(92), 'FP_3': np.int64(1), 'FN_3': np.int64(44), 'TP_4': np.int64(12), 'TN_4': np.int64(80), 'FP_4': np.int64(20), 'FN_4': np.int64(26), 'TP_5': np.int64(4), 'TN_5': np.int64(109), 'FP_5': np.int64(22), 'FN_5': np.int64(3)} +[2025-06-28 17:51:41,640][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/config.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/config.yaml index 2ba56757d5c7503987493bf6131c467056edc45a..254f6f57048a8151cf391b2f626463cb3640204f 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/config.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -18,9 +23,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C4 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C4 - logging_dir: ./logs/mbert_base/C4 - best_model_dir: ./results/mbert_base/C4/best_model + output_dir: ./results/bertimbau_base/C4 + logging_dir: ./logs/bertimbau_base/C4 + best_model_dir: ./results/bertimbau_base/C4/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/hydra.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/hydra.yaml index 36d88bf39ab11b3fef5b01ed87c121751f391c4c..2dec7aa6cade6b356c33372e134dc3e5f8b28ec1 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/hydra.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/.hydra/hydra.yaml @@ -129,18 +129,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-26-26 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-29/08-49-48 choices: experiments: base_models/C4 hydra/env: default diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..ed5622e3c13b33aba89f80743e1e819c49b47144 --- /dev/null +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_mbert_base-C4-encoder_classification-C4,2025-06-29 08:49:48,0.2763369862905251,0.11826401446654611,0.42365737293693234,0.30539335847038623,0.19864536574346703,0.13107287077288077,0.2909456053697642,0.15987273459688345,0.4102269175033183,0.3154131237971489,0.5049926067982864,0.18957948300113747 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/evaluation_results.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/evaluation_results.csv index 6a3f4545425f2e7457b15f8b8c944d136ea9da3d..ded3c2521ca73ede5fba1011a94618cf1b2b0364 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/evaluation_results.csv +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.5,33.70803886401538,0.28170809432759725,0.007246376811594235,0.17299898682877404,0.5,0.4091229461257213,0,137,0,1,0,137,0,1,0,129,0,9,64,14,48,12,2,84,8,44,3,120,13,2,2025-05-25 21:26:26,jbcs2025_mbert_base-C4-encoder_classification-C4 +0.5,33.70803886401538,0.28170809432759725,0.007246376811594235,0.17299898682877404,0.5,0.4091229461257213,0,137,0,1,0,137,0,1,0,129,0,9,64,14,48,12,2,84,8,44,3,120,13,2,2025-06-29 08:49:48,jbcs2025_mbert_base-C4-encoder_classification-C4 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/run_inference_experiment.log b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/run_inference_experiment.log index 69d2647b5490fa2d646434df11dd853d3e8c5d91..d9e4147a26583c769358b92a2acff140a6ae60d1 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/run_inference_experiment.log +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C4-encoder_classification-C4/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:26:26,772][__main__][INFO] - Starting inference experiment -[2025-05-25 21:26:26,773][__main__][INFO] - cache_dir: /tmp/ +[2025-06-29 08:49:48,257][__main__][INFO] - Starting inference experiment +[2025-06-29 08:49:48,259][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -19,9 +24,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C4 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C4 - logging_dir: ./logs/mbert_base/C4 - best_model_dir: ./results/mbert_base/C4/best_model + output_dir: ./results/bertimbau_base/C4 + logging_dir: ./logs/bertimbau_base/C4 + best_model_dir: ./results/bertimbau_base/C4/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:26:26,775][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:26:31,062][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:26:31,063][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-29 08:49:48,272][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-29 08:49:54,384][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-29 08:49:54,388][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -61,20 +66,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json -[2025-05-25 21:26:31,248][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:26:31,249][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:26:31,250][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-29 08:49:54,390][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt +[2025-06-29 08:49:54,390][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json +[2025-06-29 08:49:54,391][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None +[2025-06-29 08:49:54,391][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None +[2025-06-29 08:49:54,391][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json +[2025-06-29 08:49:54,391][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-29 08:49:54,392][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-29 08:49:54,395][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -98,16 +103,18 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:26:31,469][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:26:31,757][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C4 -[2025-05-25 21:26:32,461][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C4/snapshots/56076a7412b5c2be4e1957bd1480f63e3f60ec5c/config.json -[2025-05-25 21:26:32,462][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-29 08:49:54,574][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-29 08:49:54,669][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C4 +[2025-06-29 08:49:54,669][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C4 +[2025-06-29 08:49:56,328][__main__][INFO] - Model need ≈ 1.74 GiB to run inference and 3.73 for training +[2025-06-29 08:49:56,558][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C4/snapshots/56076a7412b5c2be4e1957bd1480f63e3f60ec5c/config.json +[2025-06-29 08:49:56,561][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -149,27 +156,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:26:46,368][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C4/snapshots/56076a7412b5c2be4e1957bd1480f63e3f60ec5c/model.safetensors -[2025-05-25 21:26:46,412][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-29 08:49:56,594][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C4/snapshots/56076a7412b5c2be4e1957bd1480f63e3f60ec5c/model.safetensors +[2025-06-29 08:49:56,594][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-29 08:49:56,595][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-29 08:49:56,840][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:26:46,412][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C4. +[2025-06-29 08:49:56,841][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C4. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:26:46,454][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:26:46,490][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:26:46,929][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:26:46,930][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:26:46,930][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: grades, prompt, id, supporting_text, essay_text, essay_year, id_prompt, reference. If grades, prompt, id, supporting_text, essay_text, essay_year, id_prompt, reference are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:26:46,935][transformers.trainer][INFO] - +[2025-06-29 08:49:56,846][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-29 08:49:56,859][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-29 08:49:56,862][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-29 08:49:56,878][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-29 08:50:01,188][__main__][INFO] - Running inference on test dataset +[2025-06-29 08:50:01,191][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, supporting_text, essay_year, grades, prompt, reference, essay_text, id. If id_prompt, supporting_text, essay_year, grades, prompt, reference, essay_text, id are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-29 08:50:01,210][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:26:46,935][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:26:46,935][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:26:47,456][transformers][INFO] - {'accuracy': 0.5, 'RMSE': 33.70803886401538, 'QWK': 0.28170809432759725, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.17299898682877404, 'Micro_F1': 0.5, 'Weighted_F1': 0.4091229461257213, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(0), 'FN_2': np.int64(9), 'TP_3': np.int64(64), 'TN_3': np.int64(14), 'FP_3': np.int64(48), 'FN_3': np.int64(12), 'TP_4': np.int64(2), 'TN_4': np.int64(84), 'FP_4': np.int64(8), 'FN_4': np.int64(44), 'TP_5': np.int64(3), 'TN_5': np.int64(120), 'FP_5': np.int64(13), 'FN_5': np.int64(2)} -[2025-05-25 21:26:47,467][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C4-encoder_classification-C4_inference_results.jsonl -[2025-05-25 21:26:47,487][__main__][INFO] - Inference results: {'accuracy': 0.5, 'RMSE': 33.70803886401538, 'QWK': 0.28170809432759725, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.17299898682877404, 'Micro_F1': 0.5, 'Weighted_F1': 0.4091229461257213, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(0), 'FN_2': np.int64(9), 'TP_3': np.int64(64), 'TN_3': np.int64(14), 'FP_3': np.int64(48), 'FN_3': np.int64(12), 'TP_4': np.int64(2), 'TN_4': np.int64(84), 'FP_4': np.int64(8), 'FN_4': np.int64(44), 'TP_5': np.int64(3), 'TN_5': np.int64(120), 'FP_5': np.int64(13), 'FN_5': np.int64(2)} -[2025-05-25 21:26:47,487][__main__][INFO] - Inference experiment completed +[2025-06-29 08:50:01,210][transformers.trainer][INFO] - Num examples = 138 +[2025-06-29 08:50:01,211][transformers.trainer][INFO] - Batch size = 16 +[2025-06-29 08:50:04,523][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C4-encoder_classification-C4_inference_results.jsonl +[2025-06-29 08:50:04,538][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-29 08:50:54,235][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-29 08:50:54,235][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-29 08:50:54,235][__main__][INFO] - QWK: 0.2763 [0.1183, 0.4237] +[2025-06-29 08:50:54,235][__main__][INFO] - Macro_F1: 0.1986 [0.1311, 0.2909] +[2025-06-29 08:50:54,235][__main__][INFO] - Weighted_F1: 0.4102 [0.3154, 0.5050] +[2025-06-29 08:50:54,235][__main__][INFO] - Inference results: {'accuracy': 0.5, 'RMSE': 33.70803886401538, 'QWK': 0.28170809432759725, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.17299898682877404, 'Micro_F1': 0.5, 'Weighted_F1': 0.4091229461257213, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(137), 'FP_1': np.int64(0), 'FN_1': np.int64(1), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(0), 'FN_2': np.int64(9), 'TP_3': np.int64(64), 'TN_3': np.int64(14), 'FP_3': np.int64(48), 'FN_3': np.int64(12), 'TP_4': np.int64(2), 'TN_4': np.int64(84), 'FP_4': np.int64(8), 'FN_4': np.int64(44), 'TP_5': np.int64(3), 'TN_5': np.int64(120), 'FP_5': np.int64(13), 'FN_5': np.int64(2)} +[2025-06-29 08:50:54,235][__main__][INFO] - Inference experiment completed diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/config.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/config.yaml index af45a19badc2d092b84b655ab8ebf2a5d9bd02b5..e259d3982a7bb2a2ef64e4d346f206fdca72d486 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/config.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -18,9 +23,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C5 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C5 - logging_dir: ./logs/mbert_base/C5 - best_model_dir: ./results/mbert_base/C5/best_model + output_dir: ./results/bertimbau_base/C5 + logging_dir: ./logs/bertimbau_base/C5 + best_model_dir: ./results/bertimbau_base/C5/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/hydra.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/hydra.yaml index c513e260de5ce8b1d20004f4cb5e246810044512..0f5eafae5d416cf37694fa5dfb711a539dbbfd56 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/hydra.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=base_models/C5 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=base_models/C5 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-29-45 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-53-55 choices: experiments: base_models/C5 hydra/env: default diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/overrides.yaml b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..40b7755772dc7157c21a647ba6422f48f0f380d1 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/overrides.yaml +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=base_models/C5 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..5c3dac137b94b7d7f7b7f9d7df02dec337ed8381 --- /dev/null +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_mbert_base-C5-encoder_classification-C5,2025-06-28 17:53:55,0.5708670849334949,0.45973493333814097,0.6715233618747556,0.21178842853661461,0.3153168788677944,0.24690188099227697,0.39542839384236395,0.14852651285008697,0.3819394286354872,0.29653107806189183,0.4684612211854998,0.171930143123608 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/evaluation_results.csv b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/evaluation_results.csv index cad2701ef41762492eb084399bb10d0fbdaa2c54..4869346bacddc7f4c8003c5cfa285c29ac270516 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/evaluation_results.csv +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.4057971014492754,51.863812745147925,0.5735521338377112,0.06521739130434778,0.3161400322841001,0.4057971014492754,0.3828592483419307,5,115,1,17,8,90,16,24,6,94,20,18,10,89,24,15,27,85,21,5,0,135,0,3,2025-05-25 21:29:45,jbcs2025_mbert_base-C5-encoder_classification-C5 +0.4057971014492754,51.863812745147925,0.5735521338377112,0.06521739130434778,0.3161400322841001,0.4057971014492754,0.3828592483419307,5,115,1,17,8,90,16,24,6,94,20,18,10,89,24,15,27,85,21,5,0,135,0,3,2025-06-28 17:53:55,jbcs2025_mbert_base-C5-encoder_classification-C5 diff --git a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/run_inference_experiment.log b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/run_inference_experiment.log index 5f098af2cebf9475645ac1582c2e5fe228867509..1b63ddb82667fc8d2f010658765c9226be983b1b 100644 --- a/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/run_inference_experiment.log +++ b/runs/base_models/mbert/jbcs2025_mbert_base-C5-encoder_classification-C5/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:29:45,241][__main__][INFO] - Starting inference experiment -[2025-05-25 21:29:45,242][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:53:55,402][__main__][INFO] - Starting inference experiment +[2025-06-28 17:53:55,403][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -19,9 +24,9 @@ experiments: name: kamel-usp/jbcs2025_mbert_base-C5 type: encoder_classification num_labels: 6 - output_dir: ./results/mbert_base/C5 - logging_dir: ./logs/mbert_base/C5 - best_model_dir: ./results/mbert_base/C5/best_model + output_dir: ./results/bertimbau_base/C5 + logging_dir: ./logs/bertimbau_base/C5 + best_model_dir: ./results/bertimbau_base/C5/best_model tokenizer: name: google-bert/bert-base-multilingual-cased dataset: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:29:45,244][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:29:49,344][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:29:49,346][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:53:55,416][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:54:00,588][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:54:00,589][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -61,20 +66,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:29:49,551][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt -[2025-05-25 21:29:49,551][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json -[2025-05-25 21:29:49,551][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None -[2025-05-25 21:29:49,551][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None -[2025-05-25 21:29:49,551][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json -[2025-05-25 21:29:49,552][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:29:49,552][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json -[2025-05-25 21:29:49,553][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/vocab.txt +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer.json +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at None +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at None +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/tokenizer_config.json +[2025-06-28 17:54:00,590][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:54:00,591][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--google-bert--bert-base-multilingual-cased/snapshots/3f076fdb1ab68d5b2880cb87a0886f315b8146f8/config.json +[2025-06-28 17:54:00,591][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -98,16 +103,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:29:49,776][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:29:50,006][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C5 -[2025-05-25 21:29:50,946][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C5/snapshots/cee9918675c53c6ce51c47b88d8c0866a3ef7382/config.json -[2025-05-25 21:29:50,948][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:54:00,754][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:54:01,024][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_mbert_base-C5 +[2025-06-28 17:54:01,926][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C5/snapshots/cee9918675c53c6ce51c47b88d8c0866a3ef7382/config.json +[2025-06-28 17:54:01,929][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -149,27 +154,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 119547 } -[2025-05-25 21:30:06,418][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C5/snapshots/cee9918675c53c6ce51c47b88d8c0866a3ef7382/model.safetensors -[2025-05-25 21:30:06,465][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 17:55:05,034][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_mbert_base-C5/snapshots/cee9918675c53c6ce51c47b88d8c0866a3ef7382/model.safetensors +[2025-06-28 17:55:05,036][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 17:55:05,037][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 17:55:05,300][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:30:06,466][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C5. +[2025-06-28 17:55:05,300][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_mbert_base-C5. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:30:06,510][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:30:06,566][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:30:06,844][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:30:06,845][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:30:06,846][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, essay_year, grades, supporting_text, id, prompt, reference, id_prompt. If essay_text, essay_year, grades, supporting_text, id, prompt, reference, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:30:06,850][transformers.trainer][INFO] - +[2025-06-28 17:55:05,306][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 17:55:05,318][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 17:55:05,322][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 17:55:05,338][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 17:55:09,053][__main__][INFO] - Running inference on test dataset +[2025-06-28 17:55:09,056][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_year, essay_text, id_prompt, prompt, supporting_text, id, reference, grades. If essay_year, essay_text, id_prompt, prompt, supporting_text, id, reference, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 17:55:09,076][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:30:06,850][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:30:06,850][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:30:07,439][transformers][INFO] - {'accuracy': 0.4057971014492754, 'RMSE': 51.863812745147925, 'QWK': 0.5735521338377112, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.3161400322841001, 'Micro_F1': 0.4057971014492754, 'Weighted_F1': 0.3828592483419307, 'TP_0': np.int64(5), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(17), 'TP_1': np.int64(8), 'TN_1': np.int64(90), 'FP_1': np.int64(16), 'FN_1': np.int64(24), 'TP_2': np.int64(6), 'TN_2': np.int64(94), 'FP_2': np.int64(20), 'FN_2': np.int64(18), 'TP_3': np.int64(10), 'TN_3': np.int64(89), 'FP_3': np.int64(24), 'FN_3': np.int64(15), 'TP_4': np.int64(27), 'TN_4': np.int64(85), 'FP_4': np.int64(21), 'FN_4': np.int64(5), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} -[2025-05-25 21:30:07,449][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C5-encoder_classification-C5_inference_results.jsonl -[2025-05-25 21:30:07,470][__main__][INFO] - Inference results: {'accuracy': 0.4057971014492754, 'RMSE': 51.863812745147925, 'QWK': 0.5735521338377112, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.3161400322841001, 'Micro_F1': 0.4057971014492754, 'Weighted_F1': 0.3828592483419307, 'TP_0': np.int64(5), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(17), 'TP_1': np.int64(8), 'TN_1': np.int64(90), 'FP_1': np.int64(16), 'FN_1': np.int64(24), 'TP_2': np.int64(6), 'TN_2': np.int64(94), 'FP_2': np.int64(20), 'FN_2': np.int64(18), 'TP_3': np.int64(10), 'TN_3': np.int64(89), 'FP_3': np.int64(24), 'FN_3': np.int64(15), 'TP_4': np.int64(27), 'TN_4': np.int64(85), 'FP_4': np.int64(21), 'FN_4': np.int64(5), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} -[2025-05-25 21:30:07,470][__main__][INFO] - Inference experiment completed +[2025-06-28 17:55:09,076][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 17:55:09,076][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 17:55:12,420][__main__][INFO] - Inference results saved to jbcs2025_mbert_base-C5-encoder_classification-C5_inference_results.jsonl +[2025-06-28 17:55:12,434][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 17:56:00,913][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 17:56:00,913][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 17:56:00,913][__main__][INFO] - QWK: 0.5709 [0.4597, 0.6715] +[2025-06-28 17:56:00,913][__main__][INFO] - Macro_F1: 0.3153 [0.2469, 0.3954] +[2025-06-28 17:56:00,913][__main__][INFO] - Weighted_F1: 0.3819 [0.2965, 0.4685] +[2025-06-28 17:56:00,913][__main__][INFO] - Inference results: {'accuracy': 0.4057971014492754, 'RMSE': 51.863812745147925, 'QWK': 0.5735521338377112, 'HDIV': 0.06521739130434778, 'Macro_F1': 0.3161400322841001, 'Micro_F1': 0.4057971014492754, 'Weighted_F1': 0.3828592483419307, 'TP_0': np.int64(5), 'TN_0': np.int64(115), 'FP_0': np.int64(1), 'FN_0': np.int64(17), 'TP_1': np.int64(8), 'TN_1': np.int64(90), 'FP_1': np.int64(16), 'FN_1': np.int64(24), 'TP_2': np.int64(6), 'TN_2': np.int64(94), 'FP_2': np.int64(20), 'FN_2': np.int64(18), 'TP_3': np.int64(10), 'TN_3': np.int64(89), 'FP_3': np.int64(24), 'FN_3': np.int64(15), 'TP_4': np.int64(27), 'TN_4': np.int64(85), 'FP_4': np.int64(21), 'FN_4': np.int64(5), 'TP_5': np.int64(0), 'TN_5': np.int64(135), 'FP_5': np.int64(0), 'FN_5': np.int64(3)} +[2025-06-28 17:56:00,913][__main__][INFO] - Inference experiment completed diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/config.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/config.yaml index b336bca3195a86dbd52c4092c7fbc5fc29653803..1f437209386226aa1eb2164e326d1c56af61f235 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/config.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/hydra.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/hydra.yaml index 8d252349b3df8b3aaa7cd6c62203a6f2dc566ddb..5d44aa3ff2ad235ba06b0be7cc10eef32c6109ee 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/hydra.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=large_models/C1 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=large_models/C1 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-52-07 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/17-56-34 choices: experiments: large_models/C1 hydra/env: default diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/overrides.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..48d51f56cfc09973a6bc2bb9ab1aa39cdc319b82 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/overrides.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=large_models/C1 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..ec89cec4364711a19aef5a545a4742b2276a91af --- /dev/null +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau-large-C1-encoder_classification-C1,2025-06-28 17:56:34,0.7077306949563342,0.6143517863619989,0.7925777763902052,0.17822599002820627,0.5086103433798747,0.3864421058348054,0.6645304839656547,0.2780883781308493,0.7098050926433485,0.6316473666444833,0.7862820634470531,0.1546346968025698 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/evaluation_results.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/evaluation_results.csv index b606fa8321c389cdafef2e45d5a9d9b42882b09d..db37c6689b2c6b959bd876698381d9500bbd0760 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/evaluation_results.csv +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.7028985507246377,24.55399256179405,0.7080553295362083,0.007246376811594235,0.4714726209463051,0.7028985507246377,0.7092465463174845,0,137,0,1,0,138,0,0,4,124,4,6,54,60,12,12,33,76,11,18,6,114,14,4,2025-05-25 21:52:07,jbcs2025_bertimbau-large-C1-encoder_classification-C1 +0.7028985507246377,24.55399256179405,0.7080553295362083,0.007246376811594235,0.4714726209463051,0.7028985507246377,0.7092465463174845,0,137,0,1,0,138,0,0,4,124,4,6,54,60,12,12,33,76,11,18,6,114,14,4,2025-06-28 17:56:34,jbcs2025_bertimbau-large-C1-encoder_classification-C1 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/run_inference_experiment.log b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/run_inference_experiment.log index 58e23e93d8d83e53ab198128db763a427f969546..46f1bf3ba73c303d2a06504889d9902cccbce743 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/run_inference_experiment.log +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C1-encoder_classification-C1/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:52:07,639][__main__][INFO] - Starting inference experiment -[2025-05-25 21:52:07,640][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 17:56:34,454][__main__][INFO] - Starting inference experiment +[2025-06-28 17:56:34,455][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:52:07,642][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:52:14,356][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:52:14,358][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:56:34,469][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 17:56:40,835][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 17:56:40,837][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -62,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json -[2025-05-25 21:52:16,248][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:52:16,248][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:52:16,249][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:56:43,266][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt +[2025-06-28 17:56:43,267][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 17:56:43,267][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json +[2025-06-28 17:56:43,268][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json +[2025-06-28 17:56:43,268][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json +[2025-06-28 17:56:43,268][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 17:56:43,269][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 17:56:43,270][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -100,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:52:16,277][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:52:16,278][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:56:43,294][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 17:56:43,295][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -132,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:52:16,290][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:52:16,498][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C1 -[2025-05-25 21:52:17,326][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C1/snapshots/3e737c8fdb77192423f85a28a47c007d664a9aab/config.json -[2025-05-25 21:52:17,327][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 17:56:43,308][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 17:56:43,534][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C1 +[2025-06-28 17:56:44,418][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C1/snapshots/3e737c8fdb77192423f85a28a47c007d664a9aab/config.json +[2025-06-28 17:56:44,419][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -184,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:53:17,198][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C1/snapshots/3e737c8fdb77192423f85a28a47c007d664a9aab/model.safetensors -[2025-05-25 21:53:17,301][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 18:00:00,033][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C1/snapshots/3e737c8fdb77192423f85a28a47c007d664a9aab/model.safetensors +[2025-06-28 18:00:00,034][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 18:00:00,034][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 18:00:00,450][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:53:17,301][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C1. +[2025-06-28 18:00:00,450][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C1. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:53:17,346][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:53:17,382][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:53:17,724][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:53:17,725][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:53:17,726][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, reference, prompt, grades, essay_year, supporting_text, id, essay_text. If id_prompt, reference, prompt, grades, essay_year, supporting_text, id, essay_text are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:53:17,732][transformers.trainer][INFO] - +[2025-06-28 18:00:00,459][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 18:00:00,471][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 18:00:00,475][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 18:00:00,490][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 18:00:04,206][__main__][INFO] - Running inference on test dataset +[2025-06-28 18:00:04,209][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id_prompt, id, grades, essay_year, essay_text, supporting_text, reference, prompt. If id_prompt, id, grades, essay_year, essay_text, supporting_text, reference, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 18:00:04,236][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:53:17,732][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:53:17,732][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:53:18,750][transformers][INFO] - {'accuracy': 0.7028985507246377, 'RMSE': 24.55399256179405, 'QWK': 0.7080553295362083, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4714726209463051, 'Micro_F1': 0.7028985507246377, 'Weighted_F1': 0.7092465463174845, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(4), 'TN_2': np.int64(124), 'FP_2': np.int64(4), 'FN_2': np.int64(6), 'TP_3': np.int64(54), 'TN_3': np.int64(60), 'FP_3': np.int64(12), 'FN_3': np.int64(12), 'TP_4': np.int64(33), 'TN_4': np.int64(76), 'FP_4': np.int64(11), 'FN_4': np.int64(18), 'TP_5': np.int64(6), 'TN_5': np.int64(114), 'FP_5': np.int64(14), 'FN_5': np.int64(4)} -[2025-05-25 21:53:18,760][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C1-encoder_classification-C1_inference_results.jsonl -[2025-05-25 21:53:18,767][__main__][INFO] - Inference results: {'accuracy': 0.7028985507246377, 'RMSE': 24.55399256179405, 'QWK': 0.7080553295362083, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4714726209463051, 'Micro_F1': 0.7028985507246377, 'Weighted_F1': 0.7092465463174845, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(4), 'TN_2': np.int64(124), 'FP_2': np.int64(4), 'FN_2': np.int64(6), 'TP_3': np.int64(54), 'TN_3': np.int64(60), 'FP_3': np.int64(12), 'FN_3': np.int64(12), 'TP_4': np.int64(33), 'TN_4': np.int64(76), 'FP_4': np.int64(11), 'FN_4': np.int64(18), 'TP_5': np.int64(6), 'TN_5': np.int64(114), 'FP_5': np.int64(14), 'FN_5': np.int64(4)} -[2025-05-25 21:53:18,768][__main__][INFO] - Inference experiment completed +[2025-06-28 18:00:04,236][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 18:00:04,237][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 18:00:14,326][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C1-encoder_classification-C1_inference_results.jsonl +[2025-06-28 18:00:14,330][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 18:01:02,893][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 18:01:02,893][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 18:01:02,894][__main__][INFO] - QWK: 0.7077 [0.6144, 0.7926] +[2025-06-28 18:01:02,894][__main__][INFO] - Macro_F1: 0.5086 [0.3864, 0.6645] +[2025-06-28 18:01:02,894][__main__][INFO] - Weighted_F1: 0.7098 [0.6316, 0.7863] +[2025-06-28 18:01:02,894][__main__][INFO] - Inference results: {'accuracy': 0.7028985507246377, 'RMSE': 24.55399256179405, 'QWK': 0.7080553295362083, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.4714726209463051, 'Micro_F1': 0.7028985507246377, 'Weighted_F1': 0.7092465463174845, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(138), 'FP_1': np.int64(0), 'FN_1': np.int64(0), 'TP_2': np.int64(4), 'TN_2': np.int64(124), 'FP_2': np.int64(4), 'FN_2': np.int64(6), 'TP_3': np.int64(54), 'TN_3': np.int64(60), 'FP_3': np.int64(12), 'FN_3': np.int64(12), 'TP_4': np.int64(33), 'TN_4': np.int64(76), 'FP_4': np.int64(11), 'FN_4': np.int64(18), 'TP_5': np.int64(6), 'TN_5': np.int64(114), 'FP_5': np.int64(14), 'FN_5': np.int64(4)} +[2025-06-28 18:01:02,894][__main__][INFO] - Inference experiment completed diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/config.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/config.yaml index 02d536fa8f7ffd0e76cb3440609706d415b390af..e59926444a89c64d671881d19d4a840e3d3c8871 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/config.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/config.yaml @@ -8,9 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/hydra.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/hydra.yaml index 6276b93f66b3405560795020df15238cf18c3e52..c2c7363c9b492ddc928adb77b21335dbb27553c6 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/hydra.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=large_models/C2 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=large_models/C2 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-53-51 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/18-01-07 choices: experiments: large_models/C2 hydra/env: default diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/overrides.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..2dc47bdbc510899274c52d5ae9f64d74e764d1cc 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/overrides.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=large_models/C2 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..190cfef7487d723ba0af5716e5d6a90da94bff22 --- /dev/null +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau-large-C2-encoder_classification-C2,2025-06-28 18:01:07,0.421524005294385,0.2688782617235809,0.5651890498796338,0.2963107881560529,0.2837433836740023,0.21261906130152639,0.3709122659559081,0.1582932046543817,0.381775038356343,0.2966054262581051,0.4675676487920818,0.1709622225339767 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/evaluation_results.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/evaluation_results.csv index 39451429808dc5a809022742feb64ce076f1f9b4..1f3fd1bfbec40e4c5f50a07dc43f425462f11afe 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/evaluation_results.csv +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.39855072463768115,62.22912315078803,0.4242242542347474,0.13043478260869568,0.2673521459388278,0.39855072463768115,0.38254856956135197,0,137,0,1,21,70,33,14,0,129,4,5,12,83,4,39,16,78,34,10,6,110,8,14,2025-05-25 21:53:51,jbcs2025_bertimbau-large-C2-encoder_classification-C2 +0.39855072463768115,62.22912315078803,0.4242242542347474,0.13043478260869568,0.2673521459388278,0.39855072463768115,0.38254856956135197,0,137,0,1,21,70,33,14,0,129,4,5,12,83,4,39,16,78,34,10,6,110,8,14,2025-06-28 18:01:07,jbcs2025_bertimbau-large-C2-encoder_classification-C2 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/run_inference_experiment.log b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/run_inference_experiment.log index dbc72705758b611f4f53035a0d8935364eb833f2..4f89dcae5758accd034d17d524dc731d100eeba9 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/run_inference_experiment.log +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C2-encoder_classification-C2/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:53:51,910][__main__][INFO] - Starting inference experiment -[2025-05-25 21:53:51,912][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 18:01:07,497][__main__][INFO] - Starting inference experiment +[2025-06-28 18:01:07,499][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,9 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true -inference: - seed: 42 - batch_size: 16 +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -35,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:53:51,913][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:53:57,149][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:53:57,150][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:01:07,512][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 18:01:13,306][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:01:13,310][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -62,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:53:57,310][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt -[2025-05-25 21:53:57,310][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 21:53:57,310][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json -[2025-05-25 21:53:57,310][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json -[2025-05-25 21:53:57,310][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json -[2025-05-25 21:53:57,311][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:53:57,311][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:53:57,312][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:01:13,313][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt +[2025-06-28 18:01:13,313][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 18:01:13,314][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json +[2025-06-28 18:01:13,314][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json +[2025-06-28 18:01:13,315][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json +[2025-06-28 18:01:13,315][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 18:01:13,317][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:01:13,319][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -100,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:53:57,335][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:53:57,336][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:01:13,370][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:01:13,370][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -132,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:53:57,348][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:53:57,547][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C2 -[2025-05-25 21:53:58,140][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C2/snapshots/620fd996ab1895fef2aa7b5f2281eebcbe48864d/config.json -[2025-05-25 21:53:58,141][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:01:13,384][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 18:01:13,612][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C2 +[2025-06-28 18:01:14,410][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C2/snapshots/620fd996ab1895fef2aa7b5f2281eebcbe48864d/config.json +[2025-06-28 18:01:14,411][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -184,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:54:20,532][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C2/snapshots/620fd996ab1895fef2aa7b5f2281eebcbe48864d/model.safetensors -[2025-05-25 21:54:20,630][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 18:03:11,972][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C2/snapshots/620fd996ab1895fef2aa7b5f2281eebcbe48864d/model.safetensors +[2025-06-28 18:03:11,975][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 18:03:11,975][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 18:03:12,417][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:54:20,631][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C2. +[2025-06-28 18:03:12,417][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C2. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:54:20,674][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:54:20,710][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:54:21,041][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:54:21,042][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:54:21,042][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: id, id_prompt, essay_text, essay_year, grades, prompt, reference, supporting_text. If id, id_prompt, essay_text, essay_year, grades, prompt, reference, supporting_text are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:54:21,048][transformers.trainer][INFO] - +[2025-06-28 18:03:12,426][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 18:03:12,438][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 18:03:12,441][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 18:03:12,457][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 18:03:16,160][__main__][INFO] - Running inference on test dataset +[2025-06-28 18:03:16,163][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: supporting_text, reference, essay_year, grades, id, id_prompt, essay_text, prompt. If supporting_text, reference, essay_year, grades, id, id_prompt, essay_text, prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 18:03:16,189][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:54:21,048][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:54:21,048][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:54:22,056][transformers][INFO] - {'accuracy': 0.39855072463768115, 'RMSE': 62.22912315078803, 'QWK': 0.4242242542347474, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2673521459388278, 'Micro_F1': 0.39855072463768115, 'Weighted_F1': 0.38254856956135197, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(21), 'TN_1': np.int64(70), 'FP_1': np.int64(33), 'FN_1': np.int64(14), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(4), 'FN_2': np.int64(5), 'TP_3': np.int64(12), 'TN_3': np.int64(83), 'FP_3': np.int64(4), 'FN_3': np.int64(39), 'TP_4': np.int64(16), 'TN_4': np.int64(78), 'FP_4': np.int64(34), 'FN_4': np.int64(10), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} -[2025-05-25 21:54:22,067][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C2-encoder_classification-C2_inference_results.jsonl -[2025-05-25 21:54:22,072][__main__][INFO] - Inference results: {'accuracy': 0.39855072463768115, 'RMSE': 62.22912315078803, 'QWK': 0.4242242542347474, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2673521459388278, 'Micro_F1': 0.39855072463768115, 'Weighted_F1': 0.38254856956135197, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(21), 'TN_1': np.int64(70), 'FP_1': np.int64(33), 'FN_1': np.int64(14), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(4), 'FN_2': np.int64(5), 'TP_3': np.int64(12), 'TN_3': np.int64(83), 'FP_3': np.int64(4), 'FN_3': np.int64(39), 'TP_4': np.int64(16), 'TN_4': np.int64(78), 'FP_4': np.int64(34), 'FN_4': np.int64(10), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} -[2025-05-25 21:54:22,072][__main__][INFO] - Inference experiment completed +[2025-06-28 18:03:16,190][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 18:03:16,190][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 18:03:26,270][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C2-encoder_classification-C2_inference_results.jsonl +[2025-06-28 18:03:26,273][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 18:04:16,232][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 18:04:16,232][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 18:04:16,232][__main__][INFO] - QWK: 0.4215 [0.2689, 0.5652] +[2025-06-28 18:04:16,232][__main__][INFO] - Macro_F1: 0.2837 [0.2126, 0.3709] +[2025-06-28 18:04:16,233][__main__][INFO] - Weighted_F1: 0.3818 [0.2966, 0.4676] +[2025-06-28 18:04:16,233][__main__][INFO] - Inference results: {'accuracy': 0.39855072463768115, 'RMSE': 62.22912315078803, 'QWK': 0.4242242542347474, 'HDIV': 0.13043478260869568, 'Macro_F1': 0.2673521459388278, 'Micro_F1': 0.39855072463768115, 'Weighted_F1': 0.38254856956135197, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(21), 'TN_1': np.int64(70), 'FP_1': np.int64(33), 'FN_1': np.int64(14), 'TP_2': np.int64(0), 'TN_2': np.int64(129), 'FP_2': np.int64(4), 'FN_2': np.int64(5), 'TP_3': np.int64(12), 'TN_3': np.int64(83), 'FP_3': np.int64(4), 'FN_3': np.int64(39), 'TP_4': np.int64(16), 'TN_4': np.int64(78), 'FP_4': np.int64(34), 'FN_4': np.int64(10), 'TP_5': np.int64(6), 'TN_5': np.int64(110), 'FP_5': np.int64(8), 'FN_5': np.int64(14)} +[2025-06-28 18:04:16,233][__main__][INFO] - Inference experiment completed diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/config.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/config.yaml index db412cf817d2964b52a2486bfecca3916089a156..5bf7871b219f1ffc1ac2baeea37f420c03b2f2dc 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/config.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/hydra.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/hydra.yaml index 46b169c8c06cef611d77934c6c1ae30597944ea9..f0f9e9be42c9032c2197314737bf3581b92e4dc2 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/hydra.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=large_models/C3 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=large_models/C3 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-59-09 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/18-04-21 choices: experiments: large_models/C3 hydra/env: default diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/overrides.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..a4d8bab68538cc9c8dedae10990ec837a438389a 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/overrides.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=large_models/C3 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..ea9df342fc004d9122e8b21ccec2a3bad3fcd906 --- /dev/null +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau-large-C3-encoder_classification-C3,2025-06-28 18:04:21,0.26675529638544276,0.13086754944731044,0.3967413784980497,0.26587382905073925,0.20553535282282295,0.13922610890459958,0.29020915603986924,0.15098304713526967,0.2492329798786624,0.17501884398554457,0.32721551607766175,0.15219667209211718 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/evaluation_results.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/evaluation_results.csv index 1b6200fc424d1441de4bf73afa30f7d200c34885..b0db83e473458a0a83de94eccac0bc626ecfd4e7 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/evaluation_results.csv +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.2898550724637681,51.07539184552491,0.26937738246505727,0.021739130434782594,0.19411606228274925,0.2898550724637681,0.24825898925023224,0,137,0,1,1,108,1,28,13,87,33,5,19,44,49,26,6,94,6,32,1,122,9,6,2025-05-25 21:59:09,jbcs2025_bertimbau-large-C3-encoder_classification-C3 +0.2898550724637681,51.07539184552491,0.26937738246505727,0.021739130434782594,0.19411606228274925,0.2898550724637681,0.24825898925023224,0,137,0,1,1,108,1,28,13,87,33,5,19,44,49,26,6,94,6,32,1,122,9,6,2025-06-28 18:04:21,jbcs2025_bertimbau-large-C3-encoder_classification-C3 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/run_inference_experiment.log b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/run_inference_experiment.log index 46e6449b3543ee594d25ed201aa04ca91c34f7c9..f960557f5a0bda0599d0d0e8db75b4e6de5212b1 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/run_inference_experiment.log +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C3-encoder_classification-C3/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:59:09,447][__main__][INFO] - Starting inference experiment -[2025-05-25 21:59:09,449][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 18:04:21,034][__main__][INFO] - Starting inference experiment +[2025-06-28 18:04:21,036][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:59:09,450][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:59:14,108][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:14,110][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:04:21,050][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 18:04:26,508][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:04:26,512][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json -[2025-05-25 21:59:14,294][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:59:14,295][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:14,296][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:04:26,514][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt +[2025-06-28 18:04:26,514][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 18:04:26,514][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json +[2025-06-28 18:04:26,515][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json +[2025-06-28 18:04:26,515][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json +[2025-06-28 18:04:26,515][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 18:04:26,516][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:04:26,519][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:14,320][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:14,320][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:04:26,572][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:04:26,572][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:14,332][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:59:14,391][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C3 -[2025-05-25 21:59:14,558][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C3/snapshots/fc8d63cfeeb43af3963a4c9550e6c3c2e5276adf/config.json -[2025-05-25 21:59:14,559][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:04:26,586][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 18:04:26,818][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C3 +[2025-06-28 18:04:27,657][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C3/snapshots/fc8d63cfeeb43af3963a4c9550e6c3c2e5276adf/config.json +[2025-06-28 18:04:27,660][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:14,705][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C3/snapshots/fc8d63cfeeb43af3963a4c9550e6c3c2e5276adf/model.safetensors -[2025-05-25 21:59:14,800][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 18:06:25,024][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C3/snapshots/fc8d63cfeeb43af3963a4c9550e6c3c2e5276adf/model.safetensors +[2025-06-28 18:06:25,027][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 18:06:25,027][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 18:06:25,488][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 21:59:14,800][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C3. +[2025-06-28 18:06:25,488][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C3. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 21:59:14,842][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 21:59:14,878][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 21:59:15,206][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 21:59:15,207][__main__][INFO] - Running inference on test dataset -[2025-05-25 21:59:15,208][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: supporting_text, essay_year, prompt, essay_text, id, grades, reference, id_prompt. If supporting_text, essay_year, prompt, essay_text, id, grades, reference, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 21:59:15,213][transformers.trainer][INFO] - +[2025-06-28 18:06:25,496][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 18:06:25,509][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 18:06:25,512][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 18:06:25,527][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 18:06:29,455][__main__][INFO] - Running inference on test dataset +[2025-06-28 18:06:29,459][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, supporting_text, id, essay_text, id_prompt, prompt, essay_year, grades. If reference, supporting_text, id, essay_text, id_prompt, prompt, essay_year, grades are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 18:06:29,485][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 21:59:15,213][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 21:59:15,213][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 21:59:16,223][transformers][INFO] - {'accuracy': 0.2898550724637681, 'RMSE': 51.07539184552491, 'QWK': 0.26937738246505727, 'HDIV': 0.021739130434782594, 'Macro_F1': 0.19411606228274925, 'Micro_F1': 0.2898550724637681, 'Weighted_F1': 0.24825898925023224, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(1), 'TN_1': np.int64(108), 'FP_1': np.int64(1), 'FN_1': np.int64(28), 'TP_2': np.int64(13), 'TN_2': np.int64(87), 'FP_2': np.int64(33), 'FN_2': np.int64(5), 'TP_3': np.int64(19), 'TN_3': np.int64(44), 'FP_3': np.int64(49), 'FN_3': np.int64(26), 'TP_4': np.int64(6), 'TN_4': np.int64(94), 'FP_4': np.int64(6), 'FN_4': np.int64(32), 'TP_5': np.int64(1), 'TN_5': np.int64(122), 'FP_5': np.int64(9), 'FN_5': np.int64(6)} -[2025-05-25 21:59:16,233][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C3-encoder_classification-C3_inference_results.jsonl -[2025-05-25 21:59:16,238][__main__][INFO] - Inference results: {'accuracy': 0.2898550724637681, 'RMSE': 51.07539184552491, 'QWK': 0.26937738246505727, 'HDIV': 0.021739130434782594, 'Macro_F1': 0.19411606228274925, 'Micro_F1': 0.2898550724637681, 'Weighted_F1': 0.24825898925023224, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(1), 'TN_1': np.int64(108), 'FP_1': np.int64(1), 'FN_1': np.int64(28), 'TP_2': np.int64(13), 'TN_2': np.int64(87), 'FP_2': np.int64(33), 'FN_2': np.int64(5), 'TP_3': np.int64(19), 'TN_3': np.int64(44), 'FP_3': np.int64(49), 'FN_3': np.int64(26), 'TP_4': np.int64(6), 'TN_4': np.int64(94), 'FP_4': np.int64(6), 'FN_4': np.int64(32), 'TP_5': np.int64(1), 'TN_5': np.int64(122), 'FP_5': np.int64(9), 'FN_5': np.int64(6)} -[2025-05-25 21:59:16,238][__main__][INFO] - Inference experiment completed +[2025-06-28 18:06:29,485][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 18:06:29,486][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 18:06:39,586][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C3-encoder_classification-C3_inference_results.jsonl +[2025-06-28 18:06:39,589][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 18:07:30,464][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 18:07:30,464][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 18:07:30,464][__main__][INFO] - QWK: 0.2668 [0.1309, 0.3967] +[2025-06-28 18:07:30,464][__main__][INFO] - Macro_F1: 0.2055 [0.1392, 0.2902] +[2025-06-28 18:07:30,464][__main__][INFO] - Weighted_F1: 0.2492 [0.1750, 0.3272] +[2025-06-28 18:07:30,465][__main__][INFO] - Inference results: {'accuracy': 0.2898550724637681, 'RMSE': 51.07539184552491, 'QWK': 0.26937738246505727, 'HDIV': 0.021739130434782594, 'Macro_F1': 0.19411606228274925, 'Micro_F1': 0.2898550724637681, 'Weighted_F1': 0.24825898925023224, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(1), 'TN_1': np.int64(108), 'FP_1': np.int64(1), 'FN_1': np.int64(28), 'TP_2': np.int64(13), 'TN_2': np.int64(87), 'FP_2': np.int64(33), 'FN_2': np.int64(5), 'TP_3': np.int64(19), 'TN_3': np.int64(44), 'FP_3': np.int64(49), 'FN_3': np.int64(26), 'TP_4': np.int64(6), 'TN_4': np.int64(94), 'FP_4': np.int64(6), 'FN_4': np.int64(32), 'TP_5': np.int64(1), 'TN_5': np.int64(122), 'FP_5': np.int64(9), 'FN_5': np.int64(6)} +[2025-06-28 18:07:30,465][__main__][INFO] - Inference experiment completed diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/config.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/config.yaml index b0acb61687d1dc884114050897f3d7485c16140f..8afb59a5efb679f49a347c8f40fdcabd56b2b349 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/config.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/hydra.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/hydra.yaml index dc484dc00a5c607b1eef4a68731682f15f5ee8b4..65bdd82c527bd365725cd0862fe4b2ec61813f94 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/hydra.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=large_models/C4 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=large_models/C4 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/21-59-43 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/18-07-35 choices: experiments: large_models/C4 hydra/env: default diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/overrides.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..fcc3cc0c6c0c0a2ddd96c50c1e96c66bed9a2b6a 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/overrides.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=large_models/C4 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..c8f38195fa2510f8b3500be65e383876a8baf998 --- /dev/null +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau-large-C4-encoder_classification-C4,2025-06-28 18:07:35,0.5689578701688437,0.46148695261109535,0.6680998143802181,0.2066128617691228,0.3225465526202049,0.23384768292840824,0.4348878912682667,0.20104020833985847,0.5689840307021066,0.4868628663380702,0.6499047041941646,0.16304183785609438 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/evaluation_results.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/evaluation_results.csv index 569defe65c0fac79a86580889147801c4912d30f..f1d0806ff6a1f2cdda6f4f8c78eca83592c0f157 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/evaluation_results.csv +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.5434782608695652,30.45547950507524,0.5718939041414612,0.007246376811594235,0.30149864910503205,0.5434782608695652,0.5677143444488495,0,137,0,1,0,135,2,1,6,109,20,3,40,50,12,36,27,71,21,19,2,125,8,3,2025-05-25 21:59:43,jbcs2025_bertimbau-large-C4-encoder_classification-C4 +0.5434782608695652,30.45547950507524,0.5718939041414612,0.007246376811594235,0.30149864910503205,0.5434782608695652,0.5677143444488495,0,137,0,1,0,135,2,1,6,109,20,3,40,50,12,36,27,71,21,19,2,125,8,3,2025-06-28 18:07:35,jbcs2025_bertimbau-large-C4-encoder_classification-C4 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/run_inference_experiment.log b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/run_inference_experiment.log index d2230adfe49f8518204f047ec92dfa416802f0bb..ab103154af40efe66bbe16611057c06bac55745f 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/run_inference_experiment.log +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C4-encoder_classification-C4/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 21:59:43,967][__main__][INFO] - Starting inference experiment -[2025-05-25 21:59:43,969][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 18:07:35,253][__main__][INFO] - Starting inference experiment +[2025-06-28 18:07:35,254][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 21:59:43,970][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 21:59:48,433][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:48,435][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:07:35,268][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 18:07:40,511][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:07:40,515][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json -[2025-05-25 21:59:48,615][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 21:59:48,616][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:48,617][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:07:40,517][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt +[2025-06-28 18:07:40,518][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 18:07:40,518][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json +[2025-06-28 18:07:40,518][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json +[2025-06-28 18:07:40,518][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json +[2025-06-28 18:07:40,518][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 18:07:40,519][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:07:40,522][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:48,640][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 21:59:48,641][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:07:40,576][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:07:40,577][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 21:59:48,654][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 21:59:48,878][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C4 -[2025-05-25 21:59:49,404][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C4/snapshots/e9e2cf7e79031197ee29e0150459a3788e5e8d38/config.json -[2025-05-25 21:59:49,405][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:07:40,590][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 18:07:40,821][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C4 +[2025-06-28 18:07:41,667][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C4/snapshots/e9e2cf7e79031197ee29e0150459a3788e5e8d38/config.json +[2025-06-28 18:07:41,668][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:00:43,388][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C4/snapshots/e9e2cf7e79031197ee29e0150459a3788e5e8d38/model.safetensors -[2025-05-25 22:00:43,484][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 18:09:39,673][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C4/snapshots/e9e2cf7e79031197ee29e0150459a3788e5e8d38/model.safetensors +[2025-06-28 18:09:39,675][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 18:09:39,676][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 18:09:40,130][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:00:43,484][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C4. +[2025-06-28 18:09:40,130][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C4. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:00:43,526][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:00:43,560][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:00:43,892][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:00:43,892][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:00:43,893][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, prompt, id_prompt, supporting_text, id, grades, essay_year, essay_text. If reference, prompt, id_prompt, supporting_text, id, grades, essay_year, essay_text are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:00:43,899][transformers.trainer][INFO] - +[2025-06-28 18:09:40,138][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 18:09:40,151][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 18:09:40,154][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 18:09:40,171][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 18:09:43,861][__main__][INFO] - Running inference on test dataset +[2025-06-28 18:09:43,864][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: reference, id, id_prompt, prompt, essay_text, essay_year, grades, supporting_text. If reference, id, id_prompt, prompt, essay_text, essay_year, grades, supporting_text are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 18:09:43,891][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:00:43,899][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:00:43,899][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:00:44,923][transformers][INFO] - {'accuracy': 0.5434782608695652, 'RMSE': 30.45547950507524, 'QWK': 0.5718939041414612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.30149864910503205, 'Micro_F1': 0.5434782608695652, 'Weighted_F1': 0.5677143444488495, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(135), 'FP_1': np.int64(2), 'FN_1': np.int64(1), 'TP_2': np.int64(6), 'TN_2': np.int64(109), 'FP_2': np.int64(20), 'FN_2': np.int64(3), 'TP_3': np.int64(40), 'TN_3': np.int64(50), 'FP_3': np.int64(12), 'FN_3': np.int64(36), 'TP_4': np.int64(27), 'TN_4': np.int64(71), 'FP_4': np.int64(21), 'FN_4': np.int64(19), 'TP_5': np.int64(2), 'TN_5': np.int64(125), 'FP_5': np.int64(8), 'FN_5': np.int64(3)} -[2025-05-25 22:00:44,940][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C4-encoder_classification-C4_inference_results.jsonl -[2025-05-25 22:00:44,946][__main__][INFO] - Inference results: {'accuracy': 0.5434782608695652, 'RMSE': 30.45547950507524, 'QWK': 0.5718939041414612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.30149864910503205, 'Micro_F1': 0.5434782608695652, 'Weighted_F1': 0.5677143444488495, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(135), 'FP_1': np.int64(2), 'FN_1': np.int64(1), 'TP_2': np.int64(6), 'TN_2': np.int64(109), 'FP_2': np.int64(20), 'FN_2': np.int64(3), 'TP_3': np.int64(40), 'TN_3': np.int64(50), 'FP_3': np.int64(12), 'FN_3': np.int64(36), 'TP_4': np.int64(27), 'TN_4': np.int64(71), 'FP_4': np.int64(21), 'FN_4': np.int64(19), 'TP_5': np.int64(2), 'TN_5': np.int64(125), 'FP_5': np.int64(8), 'FN_5': np.int64(3)} -[2025-05-25 22:00:44,946][__main__][INFO] - Inference experiment completed +[2025-06-28 18:09:43,891][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 18:09:43,891][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 18:09:53,983][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C4-encoder_classification-C4_inference_results.jsonl +[2025-06-28 18:09:53,986][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 18:10:44,910][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 18:10:44,910][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 18:10:44,910][__main__][INFO] - QWK: 0.5690 [0.4615, 0.6681] +[2025-06-28 18:10:44,910][__main__][INFO] - Macro_F1: 0.3225 [0.2338, 0.4349] +[2025-06-28 18:10:44,910][__main__][INFO] - Weighted_F1: 0.5690 [0.4869, 0.6499] +[2025-06-28 18:10:44,910][__main__][INFO] - Inference results: {'accuracy': 0.5434782608695652, 'RMSE': 30.45547950507524, 'QWK': 0.5718939041414612, 'HDIV': 0.007246376811594235, 'Macro_F1': 0.30149864910503205, 'Micro_F1': 0.5434782608695652, 'Weighted_F1': 0.5677143444488495, 'TP_0': np.int64(0), 'TN_0': np.int64(137), 'FP_0': np.int64(0), 'FN_0': np.int64(1), 'TP_1': np.int64(0), 'TN_1': np.int64(135), 'FP_1': np.int64(2), 'FN_1': np.int64(1), 'TP_2': np.int64(6), 'TN_2': np.int64(109), 'FP_2': np.int64(20), 'FN_2': np.int64(3), 'TP_3': np.int64(40), 'TN_3': np.int64(50), 'FP_3': np.int64(12), 'FN_3': np.int64(36), 'TP_4': np.int64(27), 'TN_4': np.int64(71), 'FP_4': np.int64(21), 'FN_4': np.int64(19), 'TP_5': np.int64(2), 'TN_5': np.int64(125), 'FP_5': np.int64(8), 'FN_5': np.int64(3)} +[2025-06-28 18:10:44,911][__main__][INFO] - Inference experiment completed diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/config.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/config.yaml index 2b6e2b776efc04fc88ed66dc7b5b2588fbd60a03..2bc3241383a9814f2c1c115c8dac8e364522722a 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/config.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/config.yaml @@ -8,6 +8,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/hydra.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/hydra.yaml index e7ef2d2522291a55a6e781a5ff29b75fdec68a4a..f9de5d8a0735828c4fb9664727f49977627aca0a 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/hydra.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/hydra.yaml @@ -111,11 +111,12 @@ hydra: overrides: hydra: - hydra.mode=RUN - task: [] + task: + - experiments=large_models/C5 job: name: run_inference_experiment chdir: null - override_dirname: '' + override_dirname: experiments=large_models/C5 id: ??? num: ??? config_name: config @@ -129,18 +130,18 @@ hydra: runtime: version: 1.3.2 version_base: '1.1' - cwd: /workspace/jbcs2025 + cwd: /home/andrebarbosa/jbcs2025 config_sources: - path: hydra.conf schema: pkg provider: hydra - - path: /workspace/jbcs2025/configs + - path: /home/andrebarbosa/jbcs2025/configs schema: file provider: main - path: '' schema: structured provider: schema - output_dir: /workspace/jbcs2025/outputs/2025-05-25/22-02-54 + output_dir: /home/andrebarbosa/jbcs2025/outputs/2025-06-28/18-10-49 choices: experiments: large_models/C5 hydra/env: default diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/overrides.yaml b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/overrides.yaml index fe51488c7066f6687ef680d6bfaa4f7768ef205c..c702f58f6c135ab8ad8f516e85a1cd87865677b3 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/overrides.yaml +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/.hydra/overrides.yaml @@ -1 +1 @@ -[] +- experiments=large_models/C5 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv new file mode 100644 index 0000000000000000000000000000000000000000..387a36e230dacecbb2373ba20d5f1359a18f6fb2 --- /dev/null +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/bootstrap_confidence_intervals.csv @@ -0,0 +1,2 @@ +experiment_id,timestamp,QWK_mean,QWK_lower_95ci,QWK_upper_95ci,QWK_ci_width,Macro_F1_mean,Macro_F1_lower_95ci,Macro_F1_upper_95ci,Macro_F1_ci_width,Weighted_F1_mean,Weighted_F1_lower_95ci,Weighted_F1_upper_95ci,Weighted_F1_ci_width +jbcs2025_bertimbau-large-C5-encoder_classification-C5,2025-06-28 18:10:49,0.4765829843065011,0.34979869197784125,0.5987775413884535,0.24897884941061227,0.3185511976163596,0.23313502944056644,0.4147475443192737,0.18161251487870725,0.3518695561256887,0.2709850141597966,0.4369107158200968,0.16592570166030018 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/evaluation_results.csv b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/evaluation_results.csv index bbfe73deac3d2762de872a29c40d8fe394a2291e..f2ff58de8643fc3ac8e40e0f81c8368dd84af6ce 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/evaluation_results.csv +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/evaluation_results.csv @@ -1,2 +1,2 @@ accuracy,RMSE,QWK,HDIV,Macro_F1,Micro_F1,Weighted_F1,TP_0,TN_0,FP_0,FN_0,TP_1,TN_1,FP_1,FN_1,TP_2,TN_2,FP_2,FN_2,TP_3,TN_3,FP_3,FN_3,TP_4,TN_4,FP_4,FN_4,TP_5,TN_5,FP_5,FN_5,timestamp,id -0.36231884057971014,61.10100926607787,0.4785241515279538,0.14492753623188404,0.3255241258616379,0.36231884057971014,0.3520852841017614,6,112,4,16,10,88,18,22,4,108,6,20,8,85,28,17,21,79,27,11,1,130,5,2,2025-05-25 22:02:54,jbcs2025_bertimbau-large-C5-encoder_classification-C5 +0.36231884057971014,61.10100926607787,0.4785241515279538,0.14492753623188404,0.3255241258616379,0.36231884057971014,0.3520852841017614,6,112,4,16,10,88,18,22,4,108,6,20,8,85,28,17,21,79,27,11,1,130,5,2,2025-06-28 18:10:49,jbcs2025_bertimbau-large-C5-encoder_classification-C5 diff --git a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/run_inference_experiment.log b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/run_inference_experiment.log index b31a88c82e8f6981f8e86fd0b95dce3d448c6e22..d3a8a05a54d258daefbfc3de62bb41c508e21187 100644 --- a/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/run_inference_experiment.log +++ b/runs/large_models/bertimbau/jbcs2025_bertimbau-large-C5-encoder_classification-C5/run_inference_experiment.log @@ -1,5 +1,5 @@ -[2025-05-25 22:02:54,277][__main__][INFO] - Starting inference experiment -[2025-05-25 22:02:54,278][__main__][INFO] - cache_dir: /tmp/ +[2025-06-28 18:10:49,710][__main__][INFO] - Starting inference experiment +[2025-06-28 18:10:49,712][__main__][INFO] - cache_dir: /tmp/ dataset: name: kamel-usp/aes_enem_dataset split: JBCS2025 @@ -9,6 +9,14 @@ training_params: logging_steps: 100 metric_for_best_model: QWK bf16: true +bootstrap: + enabled: true + n_bootstrap: 10000 + bootstrap_seed: 42 + metrics: + - QWK + - Macro_F1 + - Weighted_F1 post_training_results: model_path: /workspace/jbcs2025/outputs/2025-03-24/20-42-59 experiments: @@ -32,9 +40,9 @@ experiments: gradient_accumulation_steps: 1 gradient_checkpointing: false -[2025-05-25 22:02:54,280][__main__][INFO] - Running inference with fine-tuned HF model -[2025-05-25 22:02:58,561][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 22:02:58,562][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:10:49,725][__main__][INFO] - Running inference with fine-tuned HF model +[2025-06-28 18:10:55,213][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:10:55,217][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -59,20 +67,20 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:02:58,760][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt -[2025-05-25 22:02:58,760][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None -[2025-05-25 22:02:58,761][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json -[2025-05-25 22:02:58,761][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json -[2025-05-25 22:02:58,761][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json -[2025-05-25 22:02:58,761][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None -[2025-05-25 22:02:58,761][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 22:02:58,761][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:10:55,220][transformers.tokenization_utils_base][INFO] - loading file vocab.txt from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/vocab.txt +[2025-06-28 18:10:55,220][transformers.tokenization_utils_base][INFO] - loading file tokenizer.json from cache at None +[2025-06-28 18:10:55,220][transformers.tokenization_utils_base][INFO] - loading file added_tokens.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/added_tokens.json +[2025-06-28 18:10:55,220][transformers.tokenization_utils_base][INFO] - loading file special_tokens_map.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/special_tokens_map.json +[2025-06-28 18:10:55,221][transformers.tokenization_utils_base][INFO] - loading file tokenizer_config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/tokenizer_config.json +[2025-06-28 18:10:55,221][transformers.tokenization_utils_base][INFO] - loading file chat_template.jinja from cache at None +[2025-06-28 18:10:55,222][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:10:55,224][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -97,14 +105,14 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:02:58,782][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json -[2025-05-25 22:02:58,782][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:10:55,275][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--neuralmind--bert-large-portuguese-cased/snapshots/aa302f6ea73b759f7df9cad58bd272127b67ec28/config.json +[2025-06-28 18:10:55,276][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForMaskedLM" ], @@ -129,16 +137,16 @@ experiments: "pooler_size_per_head": 128, "pooler_type": "first_token_transform", "position_embedding_type": "absolute", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:02:58,794][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True -[2025-05-25 22:02:59,003][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C5 -[2025-05-25 22:03:00,035][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C5/snapshots/0c5e7dc4b09aa42297d3fcb051dbd7433b740d47/config.json -[2025-05-25 22:03:00,036][transformers.configuration_utils][INFO] - Model config BertConfig { +[2025-06-28 18:10:55,289][__main__][INFO] - Tokenizer function parameters- Padding:max_length; Truncation: True +[2025-06-28 18:10:55,524][__main__][INFO] - Loading model from: kamel-usp/jbcs2025_bertimbau-large-C5 +[2025-06-28 18:10:56,405][transformers.configuration_utils][INFO] - loading configuration file config.json from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C5/snapshots/0c5e7dc4b09aa42297d3fcb051dbd7433b740d47/config.json +[2025-06-28 18:10:56,406][transformers.configuration_utils][INFO] - Model config BertConfig { "architectures": [ "BertForSequenceClassification" ], @@ -181,27 +189,35 @@ experiments: "position_embedding_type": "absolute", "problem_type": "single_label_classification", "torch_dtype": "float32", - "transformers_version": "4.52.3", + "transformers_version": "4.50.3", "type_vocab_size": 2, "use_cache": true, "vocab_size": 29794 } -[2025-05-25 22:03:46,678][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C5/snapshots/0c5e7dc4b09aa42297d3fcb051dbd7433b740d47/model.safetensors -[2025-05-25 22:03:46,775][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. +[2025-06-28 18:12:53,734][transformers.modeling_utils][INFO] - loading weights file model.safetensors from cache at /tmp/models--kamel-usp--jbcs2025_bertimbau-large-C5/snapshots/0c5e7dc4b09aa42297d3fcb051dbd7433b740d47/model.safetensors +[2025-06-28 18:12:53,737][transformers.modeling_utils][INFO] - Will use torch_dtype=torch.float32 as defined in model's config object +[2025-06-28 18:12:53,737][transformers.modeling_utils][INFO] - Instantiating BertForSequenceClassification model under default dtype torch.float32. +[2025-06-28 18:12:54,200][transformers.modeling_utils][INFO] - All model checkpoint weights were used when initializing BertForSequenceClassification. -[2025-05-25 22:03:46,776][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C5. +[2025-06-28 18:12:54,200][transformers.modeling_utils][INFO] - All the weights of BertForSequenceClassification were initialized from the model checkpoint at kamel-usp/jbcs2025_bertimbau-large-C5. If your task is similar to the task the model of the checkpoint was trained on, you can already use BertForSequenceClassification for predictions without further training. -[2025-05-25 22:03:46,818][transformers.training_args][INFO] - PyTorch: setting up devices -[2025-05-25 22:03:46,854][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). -[2025-05-25 22:03:47,160][transformers.trainer][INFO] - Using auto half precision backend -[2025-05-25 22:03:47,161][__main__][INFO] - Running inference on test dataset -[2025-05-25 22:03:47,162][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, grades, prompt, reference, id_prompt, supporting_text, id, essay_year. If essay_text, grades, prompt, reference, id_prompt, supporting_text, id, essay_year are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. -[2025-05-25 22:03:47,167][transformers.trainer][INFO] - +[2025-06-28 18:12:54,208][transformers.training_args][INFO] - PyTorch: setting up devices +[2025-06-28 18:12:54,221][transformers.training_args][INFO] - The default value for the training argument `--report_to` will change in v5 (from all installed integrations to none). In v5, you will need to use `--report_to all` to get the same behavior as now. You should start updating your code and make this info disappear :-). +[2025-06-28 18:12:54,224][transformers.trainer][INFO] - You have loaded a model on multiple GPUs. `is_model_parallel` attribute will be force-set to `True` to avoid any unexpected behavior such as device placement mismatching. +[2025-06-28 18:12:54,239][transformers.trainer][INFO] - Using auto half precision backend +[2025-06-28 18:12:57,942][__main__][INFO] - Running inference on test dataset +[2025-06-28 18:12:57,945][transformers.trainer][INFO] - The following columns in the test set don't have a corresponding argument in `BertForSequenceClassification.forward` and have been ignored: essay_text, grades, id, reference, prompt, supporting_text, essay_year, id_prompt. If essay_text, grades, id, reference, prompt, supporting_text, essay_year, id_prompt are not expected by `BertForSequenceClassification.forward`, you can safely ignore this message. +[2025-06-28 18:12:57,972][transformers.trainer][INFO] - ***** Running Prediction ***** -[2025-05-25 22:03:47,167][transformers.trainer][INFO] - Num examples = 138 -[2025-05-25 22:03:47,167][transformers.trainer][INFO] - Batch size = 16 -[2025-05-25 22:03:48,175][transformers][INFO] - {'accuracy': 0.36231884057971014, 'RMSE': 61.10100926607787, 'QWK': 0.4785241515279538, 'HDIV': 0.14492753623188404, 'Macro_F1': 0.3255241258616379, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3520852841017614, 'TP_0': np.int64(6), 'TN_0': np.int64(112), 'FP_0': np.int64(4), 'FN_0': np.int64(16), 'TP_1': np.int64(10), 'TN_1': np.int64(88), 'FP_1': np.int64(18), 'FN_1': np.int64(22), 'TP_2': np.int64(4), 'TN_2': np.int64(108), 'FP_2': np.int64(6), 'FN_2': np.int64(20), 'TP_3': np.int64(8), 'TN_3': np.int64(85), 'FP_3': np.int64(28), 'FN_3': np.int64(17), 'TP_4': np.int64(21), 'TN_4': np.int64(79), 'FP_4': np.int64(27), 'FN_4': np.int64(11), 'TP_5': np.int64(1), 'TN_5': np.int64(130), 'FP_5': np.int64(5), 'FN_5': np.int64(2)} -[2025-05-25 22:03:48,186][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C5-encoder_classification-C5_inference_results.jsonl -[2025-05-25 22:03:48,191][__main__][INFO] - Inference results: {'accuracy': 0.36231884057971014, 'RMSE': 61.10100926607787, 'QWK': 0.4785241515279538, 'HDIV': 0.14492753623188404, 'Macro_F1': 0.3255241258616379, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3520852841017614, 'TP_0': np.int64(6), 'TN_0': np.int64(112), 'FP_0': np.int64(4), 'FN_0': np.int64(16), 'TP_1': np.int64(10), 'TN_1': np.int64(88), 'FP_1': np.int64(18), 'FN_1': np.int64(22), 'TP_2': np.int64(4), 'TN_2': np.int64(108), 'FP_2': np.int64(6), 'FN_2': np.int64(20), 'TP_3': np.int64(8), 'TN_3': np.int64(85), 'FP_3': np.int64(28), 'FN_3': np.int64(17), 'TP_4': np.int64(21), 'TN_4': np.int64(79), 'FP_4': np.int64(27), 'FN_4': np.int64(11), 'TP_5': np.int64(1), 'TN_5': np.int64(130), 'FP_5': np.int64(5), 'FN_5': np.int64(2)} -[2025-05-25 22:03:48,191][__main__][INFO] - Inference experiment completed +[2025-06-28 18:12:57,972][transformers.trainer][INFO] - Num examples = 138 +[2025-06-28 18:12:57,972][transformers.trainer][INFO] - Batch size = 16 +[2025-06-28 18:13:08,066][__main__][INFO] - Inference results saved to jbcs2025_bertimbau-large-C5-encoder_classification-C5_inference_results.jsonl +[2025-06-28 18:13:08,069][__main__][INFO] - Computing bootstrap confidence intervals for metrics: ['QWK', 'Macro_F1', 'Weighted_F1'] +[2025-06-28 18:13:59,131][__main__][INFO] - Bootstrap CI results saved to bootstrap_confidence_intervals.csv +[2025-06-28 18:13:59,131][__main__][INFO] - Bootstrap Confidence Intervals (95%): +[2025-06-28 18:13:59,131][__main__][INFO] - QWK: 0.4766 [0.3498, 0.5988] +[2025-06-28 18:13:59,132][__main__][INFO] - Macro_F1: 0.3186 [0.2331, 0.4147] +[2025-06-28 18:13:59,132][__main__][INFO] - Weighted_F1: 0.3519 [0.2710, 0.4369] +[2025-06-28 18:13:59,132][__main__][INFO] - Inference results: {'accuracy': 0.36231884057971014, 'RMSE': 61.10100926607787, 'QWK': 0.4785241515279538, 'HDIV': 0.14492753623188404, 'Macro_F1': 0.3255241258616379, 'Micro_F1': 0.36231884057971014, 'Weighted_F1': 0.3520852841017614, 'TP_0': np.int64(6), 'TN_0': np.int64(112), 'FP_0': np.int64(4), 'FN_0': np.int64(16), 'TP_1': np.int64(10), 'TN_1': np.int64(88), 'FP_1': np.int64(18), 'FN_1': np.int64(22), 'TP_2': np.int64(4), 'TN_2': np.int64(108), 'FP_2': np.int64(6), 'FN_2': np.int64(20), 'TP_3': np.int64(8), 'TN_3': np.int64(85), 'FP_3': np.int64(28), 'FN_3': np.int64(17), 'TP_4': np.int64(21), 'TN_4': np.int64(79), 'FP_4': np.int64(27), 'FN_4': np.int64(11), 'TP_5': np.int64(1), 'TN_5': np.int64(130), 'FP_5': np.int64(5), 'FN_5': np.int64(2)} +[2025-06-28 18:13:59,132][__main__][INFO] - Inference experiment completed