Model save
Browse files- README.md +3 -4
- adapter_config.json +3 -3
- adapter_model.safetensors +1 -1
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -74,7 +74,7 @@ hub_model_id: EmilRyd/gpt-oss-20b-olympiads-ground-truth-false-on-policy-with-at
|
|
| 74 |
|
| 75 |
gradient_accumulation_steps: 1
|
| 76 |
micro_batch_size: 4 # x 8 gpus
|
| 77 |
-
num_epochs:
|
| 78 |
|
| 79 |
optimizer: adamw_torch_8bit
|
| 80 |
lr_scheduler: constant_with_warmup
|
|
@@ -102,7 +102,7 @@ eot_tokens:
|
|
| 102 |
|
| 103 |
</details><br>
|
| 104 |
|
| 105 |
-
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/mats-low-stakes/low-stakes-control-sft/runs/
|
| 106 |
# gpt-oss-20b-olympiads-ground-truth-false-on-policy-with-attack-100-10
|
| 107 |
|
| 108 |
This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b) on an unknown dataset.
|
|
@@ -134,8 +134,7 @@ The following hyperparameters were used during training:
|
|
| 134 |
- total_eval_batch_size: 32
|
| 135 |
- optimizer: Use OptimizerNames.ADAMW_TORCH_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 136 |
- lr_scheduler_type: constant_with_warmup
|
| 137 |
-
-
|
| 138 |
-
- training_steps: 138
|
| 139 |
|
| 140 |
### Framework versions
|
| 141 |
|
|
|
|
| 74 |
|
| 75 |
gradient_accumulation_steps: 1
|
| 76 |
micro_batch_size: 4 # x 8 gpus
|
| 77 |
+
num_epochs: 25
|
| 78 |
|
| 79 |
optimizer: adamw_torch_8bit
|
| 80 |
lr_scheduler: constant_with_warmup
|
|
|
|
| 102 |
|
| 103 |
</details><br>
|
| 104 |
|
| 105 |
+
[<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/mats-low-stakes/low-stakes-control-sft/runs/fcdrh2bn)
|
| 106 |
# gpt-oss-20b-olympiads-ground-truth-false-on-policy-with-attack-100-10
|
| 107 |
|
| 108 |
This model is a fine-tuned version of [openai/gpt-oss-20b](https://huggingface.co/openai/gpt-oss-20b) on an unknown dataset.
|
|
|
|
| 134 |
- total_eval_batch_size: 32
|
| 135 |
- optimizer: Use OptimizerNames.ADAMW_TORCH_8BIT with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 136 |
- lr_scheduler_type: constant_with_warmup
|
| 137 |
+
- training_steps: 86
|
|
|
|
| 138 |
|
| 139 |
### Framework versions
|
| 140 |
|
adapter_config.json
CHANGED
|
@@ -25,10 +25,10 @@
|
|
| 25 |
"rank_pattern": {},
|
| 26 |
"revision": null,
|
| 27 |
"target_modules": [
|
| 28 |
-
"k_proj",
|
| 29 |
-
"q_proj",
|
| 30 |
"v_proj",
|
| 31 |
-
"
|
|
|
|
|
|
|
| 32 |
],
|
| 33 |
"target_parameters": [],
|
| 34 |
"task_type": "CAUSAL_LM",
|
|
|
|
| 25 |
"rank_pattern": {},
|
| 26 |
"revision": null,
|
| 27 |
"target_modules": [
|
|
|
|
|
|
|
| 28 |
"v_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"o_proj",
|
| 31 |
+
"q_proj"
|
| 32 |
],
|
| 33 |
"target_parameters": [],
|
| 34 |
"task_type": "CAUSAL_LM",
|
adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 63726760
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:77c0943a7a2df093475c156a2d7c4146b301527248903c679901f8607394c71d
|
| 3 |
size 63726760
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 7032
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d555ef1049e8a02d9af99f4129eef6f7d40b7deb40b14886c89e569819b7d6a0
|
| 3 |
size 7032
|