Training in progress, step 200
Browse files- .gitattributes +1 -0
- adapter_config.json +17 -0
- adapter_model.safetensors +3 -0
- added_tokens.json +3 -0
- special_tokens_map.json +42 -0
- tokenizer.json +3 -0
- tokenizer.model +3 -0
- tokenizer_config.json +0 -0
- trainer_log.jsonl +43 -0
- training_args.bin +3 -0
.gitattributes
CHANGED
|
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
adapter_config.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "google/gemma-3-1b-it",
|
| 4 |
+
"inference_mode": true,
|
| 5 |
+
"num_attention_heads": 4,
|
| 6 |
+
"num_layers": 26,
|
| 7 |
+
"num_transformer_submodules": 1,
|
| 8 |
+
"num_virtual_tokens": 100,
|
| 9 |
+
"peft_type": "PROMPT_TUNING",
|
| 10 |
+
"prompt_tuning_init": "RANDOM",
|
| 11 |
+
"prompt_tuning_init_text": null,
|
| 12 |
+
"revision": null,
|
| 13 |
+
"task_type": "CAUSAL_LM",
|
| 14 |
+
"token_dim": 1152,
|
| 15 |
+
"tokenizer_kwargs": null,
|
| 16 |
+
"tokenizer_name_or_path": null
|
| 17 |
+
}
|
adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98c29494f2c0a43dba566be11df7bee0838ba5b458a7cf8afc74ba99808a3273
|
| 3 |
+
size 460928
|
added_tokens.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"<image_soft_token>": 262144
|
| 3 |
+
}
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
{
|
| 4 |
+
"content": "<end_of_turn>",
|
| 5 |
+
"lstrip": false,
|
| 6 |
+
"normalized": false,
|
| 7 |
+
"rstrip": false,
|
| 8 |
+
"single_word": false
|
| 9 |
+
}
|
| 10 |
+
],
|
| 11 |
+
"boi_token": "<start_of_image>",
|
| 12 |
+
"bos_token": {
|
| 13 |
+
"content": "<bos>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false
|
| 18 |
+
},
|
| 19 |
+
"eoi_token": "<end_of_image>",
|
| 20 |
+
"eos_token": {
|
| 21 |
+
"content": "<eos>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false
|
| 26 |
+
},
|
| 27 |
+
"image_token": "<image_soft_token>",
|
| 28 |
+
"pad_token": {
|
| 29 |
+
"content": "<pad>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false
|
| 34 |
+
},
|
| 35 |
+
"unk_token": {
|
| 36 |
+
"content": "<unk>",
|
| 37 |
+
"lstrip": false,
|
| 38 |
+
"normalized": false,
|
| 39 |
+
"rstrip": false,
|
| 40 |
+
"single_word": false
|
| 41 |
+
}
|
| 42 |
+
}
|
tokenizer.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
|
| 3 |
+
size 33384568
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
|
| 3 |
+
size 4689074
|
tokenizer_config.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
trainer_log.jsonl
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"current_steps": 5, "total_steps": 40000, "loss": 9.1493, "lr": 0.29999999259779675, "epoch": 0.024213075060532687, "percentage": 0.01, "elapsed_time": "0:00:03", "remaining_time": "6:53:44", "throughput": 2588.07, "total_tokens": 8032}
|
| 2 |
+
{"current_steps": 10, "total_steps": 40000, "loss": 3.9374, "lr": 0.29999996252634736, "epoch": 0.048426150121065374, "percentage": 0.03, "elapsed_time": "0:00:05", "remaining_time": "5:39:42", "throughput": 3270.87, "total_tokens": 16672}
|
| 3 |
+
{"current_steps": 15, "total_steps": 40000, "loss": 1.673, "lr": 0.2999999093230187, "epoch": 0.07263922518159806, "percentage": 0.04, "elapsed_time": "0:00:07", "remaining_time": "5:14:18", "throughput": 3559.85, "total_tokens": 25184}
|
| 4 |
+
{"current_steps": 20, "total_steps": 40000, "loss": 0.8702, "lr": 0.299999832987819, "epoch": 0.09685230024213075, "percentage": 0.05, "elapsed_time": "0:00:09", "remaining_time": "5:01:24", "throughput": 3788.23, "total_tokens": 34272}
|
| 5 |
+
{"current_steps": 25, "total_steps": 40000, "loss": 0.6104, "lr": 0.29999973352076004, "epoch": 0.12106537530266344, "percentage": 0.06, "elapsed_time": "0:00:11", "remaining_time": "4:53:38", "throughput": 3853.99, "total_tokens": 42464}
|
| 6 |
+
{"current_steps": 30, "total_steps": 40000, "loss": 0.4351, "lr": 0.2999996109218572, "epoch": 0.14527845036319612, "percentage": 0.07, "elapsed_time": "0:00:12", "remaining_time": "4:48:25", "throughput": 3924.48, "total_tokens": 50976}
|
| 7 |
+
{"current_steps": 35, "total_steps": 40000, "loss": 0.2699, "lr": 0.2999994651911293, "epoch": 0.1694915254237288, "percentage": 0.09, "elapsed_time": "0:00:14", "remaining_time": "4:45:14", "throughput": 3973.21, "total_tokens": 59552}
|
| 8 |
+
{"current_steps": 40, "total_steps": 40000, "loss": 0.2699, "lr": 0.2999992963285989, "epoch": 0.1937046004842615, "percentage": 0.1, "elapsed_time": "0:00:16", "remaining_time": "4:42:39", "throughput": 4030.11, "total_tokens": 68416}
|
| 9 |
+
{"current_steps": 45, "total_steps": 40000, "loss": 0.2844, "lr": 0.29999910433429194, "epoch": 0.2179176755447942, "percentage": 0.11, "elapsed_time": "0:00:18", "remaining_time": "4:40:27", "throughput": 4062.3, "total_tokens": 76992}
|
| 10 |
+
{"current_steps": 50, "total_steps": 40000, "loss": 0.1694, "lr": 0.29999888920823814, "epoch": 0.24213075060532688, "percentage": 0.12, "elapsed_time": "0:00:20", "remaining_time": "4:38:44", "throughput": 4098.53, "total_tokens": 85792}
|
| 11 |
+
{"current_steps": 55, "total_steps": 40000, "loss": 0.2117, "lr": 0.29999865095047057, "epoch": 0.26634382566585957, "percentage": 0.14, "elapsed_time": "0:00:22", "remaining_time": "4:37:18", "throughput": 4107.97, "total_tokens": 94112}
|
| 12 |
+
{"current_steps": 60, "total_steps": 40000, "loss": 0.1858, "lr": 0.29999838956102604, "epoch": 0.29055690072639223, "percentage": 0.15, "elapsed_time": "0:00:24", "remaining_time": "4:36:04", "throughput": 4133.0, "total_tokens": 102848}
|
| 13 |
+
{"current_steps": 65, "total_steps": 40000, "loss": 0.2456, "lr": 0.29999810503994484, "epoch": 0.31476997578692495, "percentage": 0.16, "elapsed_time": "0:00:26", "remaining_time": "4:35:01", "throughput": 4150.97, "total_tokens": 111488}
|
| 14 |
+
{"current_steps": 70, "total_steps": 40000, "loss": 0.2, "lr": 0.29999779738727084, "epoch": 0.3389830508474576, "percentage": 0.18, "elapsed_time": "0:00:28", "remaining_time": "4:34:07", "throughput": 4160.65, "total_tokens": 119968}
|
| 15 |
+
{"current_steps": 75, "total_steps": 40000, "loss": 0.1579, "lr": 0.29999746660305154, "epoch": 0.36319612590799033, "percentage": 0.19, "elapsed_time": "0:00:30", "remaining_time": "4:33:31", "throughput": 4180.99, "total_tokens": 128896}
|
| 16 |
+
{"current_steps": 80, "total_steps": 40000, "loss": 0.2211, "lr": 0.2999971126873379, "epoch": 0.387409200968523, "percentage": 0.2, "elapsed_time": "0:00:32", "remaining_time": "4:32:50", "throughput": 4188.47, "total_tokens": 137408}
|
| 17 |
+
{"current_steps": 85, "total_steps": 40000, "loss": 0.1993, "lr": 0.2999967356401845, "epoch": 0.4116222760290557, "percentage": 0.21, "elapsed_time": "0:00:34", "remaining_time": "4:32:13", "throughput": 4200.82, "total_tokens": 146112}
|
| 18 |
+
{"current_steps": 90, "total_steps": 40000, "loss": 0.1887, "lr": 0.29999633546164944, "epoch": 0.4358353510895884, "percentage": 0.22, "elapsed_time": "0:00:36", "remaining_time": "4:31:39", "throughput": 4205.95, "total_tokens": 154592}
|
| 19 |
+
{"current_steps": 95, "total_steps": 40000, "loss": 0.1806, "lr": 0.29999591215179444, "epoch": 0.4600484261501211, "percentage": 0.24, "elapsed_time": "0:00:38", "remaining_time": "4:31:07", "throughput": 4209.92, "total_tokens": 163040}
|
| 20 |
+
{"current_steps": 100, "total_steps": 40000, "loss": 0.1803, "lr": 0.2999954657106849, "epoch": 0.48426150121065376, "percentage": 0.25, "elapsed_time": "0:00:40", "remaining_time": "4:30:39", "throughput": 4216.66, "total_tokens": 171616}
|
| 21 |
+
{"current_steps": 105, "total_steps": 40000, "loss": 0.2159, "lr": 0.2999949961383896, "epoch": 0.5084745762711864, "percentage": 0.26, "elapsed_time": "0:00:42", "remaining_time": "4:30:13", "throughput": 4232.52, "total_tokens": 180608}
|
| 22 |
+
{"current_steps": 110, "total_steps": 40000, "loss": 0.197, "lr": 0.2999945034349809, "epoch": 0.5326876513317191, "percentage": 0.27, "elapsed_time": "0:00:44", "remaining_time": "4:29:49", "throughput": 4235.57, "total_tokens": 189088}
|
| 23 |
+
{"current_steps": 115, "total_steps": 40000, "loss": 0.2267, "lr": 0.2999939876005348, "epoch": 0.5569007263922519, "percentage": 0.29, "elapsed_time": "0:00:46", "remaining_time": "4:29:31", "throughput": 4241.93, "total_tokens": 197792}
|
| 24 |
+
{"current_steps": 120, "total_steps": 40000, "loss": 0.2161, "lr": 0.29999344863513094, "epoch": 0.5811138014527845, "percentage": 0.3, "elapsed_time": "0:00:48", "remaining_time": "4:29:12", "throughput": 4246.02, "total_tokens": 206368}
|
| 25 |
+
{"current_steps": 125, "total_steps": 40000, "loss": 0.1718, "lr": 0.2999928865388523, "epoch": 0.6053268765133172, "percentage": 0.31, "elapsed_time": "0:00:50", "remaining_time": "4:28:54", "throughput": 4246.51, "total_tokens": 214784}
|
| 26 |
+
{"current_steps": 130, "total_steps": 40000, "loss": 0.1906, "lr": 0.29999230131178567, "epoch": 0.6295399515738499, "percentage": 0.33, "elapsed_time": "0:00:52", "remaining_time": "4:28:38", "throughput": 4256.1, "total_tokens": 223680}
|
| 27 |
+
{"current_steps": 135, "total_steps": 40000, "loss": 0.1869, "lr": 0.2999916929540212, "epoch": 0.6537530266343826, "percentage": 0.34, "elapsed_time": "0:00:54", "remaining_time": "4:28:21", "throughput": 4257.72, "total_tokens": 232160}
|
| 28 |
+
{"current_steps": 140, "total_steps": 40000, "loss": 0.1952, "lr": 0.29999106146565285, "epoch": 0.6779661016949152, "percentage": 0.35, "elapsed_time": "0:00:56", "remaining_time": "4:28:05", "throughput": 4256.45, "total_tokens": 240480}
|
| 29 |
+
{"current_steps": 145, "total_steps": 40000, "loss": 0.1758, "lr": 0.29999040684677786, "epoch": 0.7021791767554479, "percentage": 0.36, "elapsed_time": "0:00:58", "remaining_time": "4:27:51", "throughput": 4256.69, "total_tokens": 248896}
|
| 30 |
+
{"current_steps": 150, "total_steps": 40000, "loss": 0.1947, "lr": 0.2999897290974972, "epoch": 0.7263922518159807, "percentage": 0.38, "elapsed_time": "0:01:00", "remaining_time": "4:27:42", "throughput": 4258.51, "total_tokens": 257472}
|
| 31 |
+
{"current_steps": 155, "total_steps": 40000, "loss": 0.1838, "lr": 0.2999890282179155, "epoch": 0.7506053268765133, "percentage": 0.39, "elapsed_time": "0:01:02", "remaining_time": "4:27:29", "throughput": 4255.76, "total_tokens": 265696}
|
| 32 |
+
{"current_steps": 160, "total_steps": 40000, "loss": 0.1891, "lr": 0.29998830420814077, "epoch": 0.774818401937046, "percentage": 0.4, "elapsed_time": "0:01:04", "remaining_time": "4:27:17", "throughput": 4255.52, "total_tokens": 274080}
|
| 33 |
+
{"current_steps": 165, "total_steps": 40000, "loss": 0.2118, "lr": 0.2999875570682846, "epoch": 0.7990314769975787, "percentage": 0.41, "elapsed_time": "0:01:06", "remaining_time": "4:27:04", "throughput": 4256.02, "total_tokens": 282496}
|
| 34 |
+
{"current_steps": 170, "total_steps": 40000, "loss": 0.1865, "lr": 0.2999867867984623, "epoch": 0.8232445520581114, "percentage": 0.43, "elapsed_time": "0:01:08", "remaining_time": "4:26:53", "throughput": 4256.87, "total_tokens": 290944}
|
| 35 |
+
{"current_steps": 175, "total_steps": 40000, "loss": 0.1724, "lr": 0.29998599339879267, "epoch": 0.847457627118644, "percentage": 0.44, "elapsed_time": "0:01:10", "remaining_time": "4:26:42", "throughput": 4264.59, "total_tokens": 299872}
|
| 36 |
+
{"current_steps": 180, "total_steps": 40000, "loss": 0.1813, "lr": 0.29998517686939796, "epoch": 0.8716707021791767, "percentage": 0.45, "elapsed_time": "0:01:12", "remaining_time": "4:26:32", "throughput": 4267.58, "total_tokens": 308512}
|
| 37 |
+
{"current_steps": 185, "total_steps": 40000, "loss": 0.1593, "lr": 0.29998433721040413, "epoch": 0.8958837772397095, "percentage": 0.46, "elapsed_time": "0:01:14", "remaining_time": "4:26:22", "throughput": 4272.83, "total_tokens": 317312}
|
| 38 |
+
{"current_steps": 190, "total_steps": 40000, "loss": 0.1487, "lr": 0.29998347442194073, "epoch": 0.9200968523002422, "percentage": 0.47, "elapsed_time": "0:01:16", "remaining_time": "4:26:17", "throughput": 4272.05, "total_tokens": 325760}
|
| 39 |
+
{"current_steps": 195, "total_steps": 40000, "loss": 0.1437, "lr": 0.2999825885041407, "epoch": 0.9443099273607748, "percentage": 0.49, "elapsed_time": "0:01:18", "remaining_time": "4:26:09", "throughput": 4274.82, "total_tokens": 334432}
|
| 40 |
+
{"current_steps": 200, "total_steps": 40000, "loss": 0.161, "lr": 0.29998167945714077, "epoch": 0.9685230024213075, "percentage": 0.5, "elapsed_time": "0:01:20", "remaining_time": "4:26:00", "throughput": 4271.5, "total_tokens": 342592}
|
| 41 |
+
{"current_steps": 200, "total_steps": 40000, "eval_loss": 0.1907040923833847, "epoch": 0.9685230024213075, "percentage": 0.5, "elapsed_time": "0:01:24", "remaining_time": "4:41:20", "throughput": 4038.78, "total_tokens": 342592}
|
| 42 |
+
{"current_steps": 205, "total_steps": 40000, "loss": 0.1786, "lr": 0.2999807472810811, "epoch": 0.9927360774818402, "percentage": 0.51, "elapsed_time": "0:01:28", "remaining_time": "4:45:31", "throughput": 3978.76, "total_tokens": 351136}
|
| 43 |
+
{"current_steps": 210, "total_steps": 40000, "loss": 0.2003, "lr": 0.29997979197610536, "epoch": 1.0193704600484261, "percentage": 0.53, "elapsed_time": "0:01:30", "remaining_time": "4:46:29", "throughput": 3968.82, "total_tokens": 360064}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9298989aab1783ff78f9fae9a0932f15ce837aaf793d49b07b7606290d448ff0
|
| 3 |
+
size 5752
|