Training in progress, step 100, checkpoint
Browse files
checkpoint-100/adapter_config.json
CHANGED
|
@@ -20,10 +20,10 @@
|
|
| 20 |
"rank_pattern": {},
|
| 21 |
"revision": null,
|
| 22 |
"target_modules": [
|
| 23 |
-
"o_proj",
|
| 24 |
"v_proj",
|
| 25 |
-
"
|
| 26 |
-
"k_proj"
|
|
|
|
| 27 |
],
|
| 28 |
"task_type": "CAUSAL_LM",
|
| 29 |
"use_dora": false,
|
|
|
|
| 20 |
"rank_pattern": {},
|
| 21 |
"revision": null,
|
| 22 |
"target_modules": [
|
|
|
|
| 23 |
"v_proj",
|
| 24 |
+
"o_proj",
|
| 25 |
+
"k_proj",
|
| 26 |
+
"q_proj"
|
| 27 |
],
|
| 28 |
"task_type": "CAUSAL_LM",
|
| 29 |
"use_dora": false,
|
checkpoint-100/adapter_model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 54560368
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6072b6642115fd4383ff47af572051e28f499a366efad2d6af9e21576673d0c0
|
| 3 |
size 54560368
|
checkpoint-100/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 109267450
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8677aaa3491cf25746d02e89ba51b5f2cb7001c8cc9e27de8cdb551d46e668aa
|
| 3 |
size 109267450
|
checkpoint-100/trainer_state.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{
|
| 2 |
"best_metric": 0.4496666491031647,
|
| 3 |
-
"best_model_checkpoint": "./zephyr/
|
| 4 |
"epoch": 0.684931506849315,
|
| 5 |
"eval_steps": 50,
|
| 6 |
"global_step": 100,
|
|
@@ -13,12 +13,12 @@
|
|
| 13 |
"grad_norm": 0.0,
|
| 14 |
"kl": 0.0,
|
| 15 |
"learning_rate": 0.00018142857142857142,
|
| 16 |
-
"logps/chosen": -
|
| 17 |
-
"logps/rejected": -
|
| 18 |
-
"loss": 0.
|
| 19 |
-
"rewards/chosen": -
|
| 20 |
-
"rewards/margins":
|
| 21 |
-
"rewards/rejected": -
|
| 22 |
"step": 20
|
| 23 |
},
|
| 24 |
{
|
|
@@ -26,26 +26,26 @@
|
|
| 26 |
"grad_norm": 0.0,
|
| 27 |
"kl": 0.0,
|
| 28 |
"learning_rate": 0.00015285714285714287,
|
| 29 |
-
"logps/chosen": -
|
| 30 |
-
"logps/rejected": -
|
| 31 |
-
"loss": 0.
|
| 32 |
-
"rewards/chosen": -
|
| 33 |
-
"rewards/margins":
|
| 34 |
-
"rewards/rejected": -
|
| 35 |
"step": 40
|
| 36 |
},
|
| 37 |
{
|
| 38 |
"epoch": 0.34,
|
| 39 |
"eval_kl": 0.0,
|
| 40 |
-
"eval_logps/chosen": -
|
| 41 |
-
"eval_logps/rejected": -
|
| 42 |
"eval_loss": 0.4496666491031647,
|
| 43 |
-
"eval_rewards/chosen": -
|
| 44 |
-
"eval_rewards/margins":
|
| 45 |
-
"eval_rewards/rejected": -
|
| 46 |
-
"eval_runtime":
|
| 47 |
-
"eval_samples_per_second": 2.
|
| 48 |
-
"eval_steps_per_second": 0.
|
| 49 |
"step": 50
|
| 50 |
},
|
| 51 |
{
|
|
@@ -53,12 +53,12 @@
|
|
| 53 |
"grad_norm": 0.0,
|
| 54 |
"kl": 0.0,
|
| 55 |
"learning_rate": 0.00012428571428571428,
|
| 56 |
-
"logps/chosen": -
|
| 57 |
-
"logps/rejected": -
|
| 58 |
-
"loss": 0.
|
| 59 |
-
"rewards/chosen": -
|
| 60 |
-
"rewards/margins":
|
| 61 |
-
"rewards/rejected": -
|
| 62 |
"step": 60
|
| 63 |
},
|
| 64 |
{
|
|
@@ -66,12 +66,12 @@
|
|
| 66 |
"grad_norm": 0.0,
|
| 67 |
"kl": 0.0,
|
| 68 |
"learning_rate": 9.571428571428573e-05,
|
| 69 |
-
"logps/chosen": -
|
| 70 |
-
"logps/rejected": -
|
| 71 |
-
"loss": 0.
|
| 72 |
-
"rewards/chosen": -
|
| 73 |
-
"rewards/margins":
|
| 74 |
-
"rewards/rejected": -
|
| 75 |
"step": 80
|
| 76 |
},
|
| 77 |
{
|
|
@@ -79,26 +79,26 @@
|
|
| 79 |
"grad_norm": 0.0,
|
| 80 |
"kl": 0.0,
|
| 81 |
"learning_rate": 6.714285714285714e-05,
|
| 82 |
-
"logps/chosen": -
|
| 83 |
-
"logps/rejected": -
|
| 84 |
-
"loss": 0.
|
| 85 |
-
"rewards/chosen": -
|
| 86 |
-
"rewards/margins":
|
| 87 |
-
"rewards/rejected": -
|
| 88 |
"step": 100
|
| 89 |
},
|
| 90 |
{
|
| 91 |
"epoch": 0.68,
|
| 92 |
"eval_kl": 0.0,
|
| 93 |
-
"eval_logps/chosen": -
|
| 94 |
-
"eval_logps/rejected": -
|
| 95 |
"eval_loss": 0.4496666491031647,
|
| 96 |
-
"eval_rewards/chosen": -
|
| 97 |
-
"eval_rewards/margins":
|
| 98 |
-
"eval_rewards/rejected": -
|
| 99 |
-
"eval_runtime":
|
| 100 |
-
"eval_samples_per_second": 2.
|
| 101 |
-
"eval_steps_per_second": 0.
|
| 102 |
"step": 100
|
| 103 |
}
|
| 104 |
],
|
|
|
|
| 1 |
{
|
| 2 |
"best_metric": 0.4496666491031647,
|
| 3 |
+
"best_model_checkpoint": "./zephyr/09-04-24-Weni-WeniGPT-Agents-Zephyr-1.0.11-KTO_Hyperparameter search, altering desired and undesired weights for KTO task.-2_max_steps-145_batch_16_2024-04-09_ppid_10/checkpoint-100",
|
| 4 |
"epoch": 0.684931506849315,
|
| 5 |
"eval_steps": 50,
|
| 6 |
"global_step": 100,
|
|
|
|
| 13 |
"grad_norm": 0.0,
|
| 14 |
"kl": 0.0,
|
| 15 |
"learning_rate": 0.00018142857142857142,
|
| 16 |
+
"logps/chosen": -1014.4324340820312,
|
| 17 |
+
"logps/rejected": -961.4172973632812,
|
| 18 |
+
"loss": 0.4278,
|
| 19 |
+
"rewards/chosen": -71.9658432006836,
|
| 20 |
+
"rewards/margins": -3.0392403602600098,
|
| 21 |
+
"rewards/rejected": -67.96768188476562,
|
| 22 |
"step": 20
|
| 23 |
},
|
| 24 |
{
|
|
|
|
| 26 |
"grad_norm": 0.0,
|
| 27 |
"kl": 0.0,
|
| 28 |
"learning_rate": 0.00015285714285714287,
|
| 29 |
+
"logps/chosen": -2804.0458984375,
|
| 30 |
+
"logps/rejected": -2825.398193359375,
|
| 31 |
+
"loss": 0.4513,
|
| 32 |
+
"rewards/chosen": -251.50927734375,
|
| 33 |
+
"rewards/margins": -4.426294326782227,
|
| 34 |
+
"rewards/rejected": -251.947265625,
|
| 35 |
"step": 40
|
| 36 |
},
|
| 37 |
{
|
| 38 |
"epoch": 0.34,
|
| 39 |
"eval_kl": 0.0,
|
| 40 |
+
"eval_logps/chosen": -2748.7060546875,
|
| 41 |
+
"eval_logps/rejected": -2395.84228515625,
|
| 42 |
"eval_loss": 0.4496666491031647,
|
| 43 |
+
"eval_rewards/chosen": -246.0056915283203,
|
| 44 |
+
"eval_rewards/margins": -31.47684669494629,
|
| 45 |
+
"eval_rewards/rejected": -213.32154846191406,
|
| 46 |
+
"eval_runtime": 140.7571,
|
| 47 |
+
"eval_samples_per_second": 2.131,
|
| 48 |
+
"eval_steps_per_second": 0.533,
|
| 49 |
"step": 50
|
| 50 |
},
|
| 51 |
{
|
|
|
|
| 53 |
"grad_norm": 0.0,
|
| 54 |
"kl": 0.0,
|
| 55 |
"learning_rate": 0.00012428571428571428,
|
| 56 |
+
"logps/chosen": -2966.9404296875,
|
| 57 |
+
"logps/rejected": -2732.59423828125,
|
| 58 |
+
"loss": 0.4483,
|
| 59 |
+
"rewards/chosen": -268.0592041015625,
|
| 60 |
+
"rewards/margins": -19.579919815063477,
|
| 61 |
+
"rewards/rejected": -244.43467712402344,
|
| 62 |
"step": 60
|
| 63 |
},
|
| 64 |
{
|
|
|
|
| 66 |
"grad_norm": 0.0,
|
| 67 |
"kl": 0.0,
|
| 68 |
"learning_rate": 9.571428571428573e-05,
|
| 69 |
+
"logps/chosen": -2559.466064453125,
|
| 70 |
+
"logps/rejected": -2662.142578125,
|
| 71 |
+
"loss": 0.4572,
|
| 72 |
+
"rewards/chosen": -229.73390197753906,
|
| 73 |
+
"rewards/margins": 8.422286987304688,
|
| 74 |
+
"rewards/rejected": -237.15330505371094,
|
| 75 |
"step": 80
|
| 76 |
},
|
| 77 |
{
|
|
|
|
| 79 |
"grad_norm": 0.0,
|
| 80 |
"kl": 0.0,
|
| 81 |
"learning_rate": 6.714285714285714e-05,
|
| 82 |
+
"logps/chosen": -2944.9951171875,
|
| 83 |
+
"logps/rejected": -2686.48046875,
|
| 84 |
+
"loss": 0.475,
|
| 85 |
+
"rewards/chosen": -264.62896728515625,
|
| 86 |
+
"rewards/margins": -24.418039321899414,
|
| 87 |
+
"rewards/rejected": -238.73057556152344,
|
| 88 |
"step": 100
|
| 89 |
},
|
| 90 |
{
|
| 91 |
"epoch": 0.68,
|
| 92 |
"eval_kl": 0.0,
|
| 93 |
+
"eval_logps/chosen": -2736.132568359375,
|
| 94 |
+
"eval_logps/rejected": -2383.06005859375,
|
| 95 |
"eval_loss": 0.4496666491031647,
|
| 96 |
+
"eval_rewards/chosen": -244.74835205078125,
|
| 97 |
+
"eval_rewards/margins": -31.5240421295166,
|
| 98 |
+
"eval_rewards/rejected": -212.04331970214844,
|
| 99 |
+
"eval_runtime": 140.895,
|
| 100 |
+
"eval_samples_per_second": 2.129,
|
| 101 |
+
"eval_steps_per_second": 0.532,
|
| 102 |
"step": 100
|
| 103 |
}
|
| 104 |
],
|
checkpoint-100/training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5688
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3e692c85e2382863d47509bc470768505f535684608d322bb3d14aa5f9ed78ae
|
| 3 |
size 5688
|