Training in progress, step 8000, checkpoint
Browse files- checkpoint-8000/README.md +21 -0
- checkpoint-8000/adapter_config.json +21 -0
- checkpoint-8000/adapter_model.bin +3 -0
- checkpoint-8000/optimizer.pt +3 -0
- checkpoint-8000/rng_state.pth +3 -0
- checkpoint-8000/scheduler.pt +3 -0
- checkpoint-8000/trainer_state.json +105 -0
- checkpoint-8000/training_args.bin +3 -0
checkpoint-8000/README.md
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: peft
|
| 3 |
+
---
|
| 4 |
+
## Training procedure
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: False
|
| 10 |
+
- load_in_4bit: True
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: nf4
|
| 16 |
+
- bnb_4bit_use_double_quant: True
|
| 17 |
+
- bnb_4bit_compute_dtype: bfloat16
|
| 18 |
+
### Framework versions
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
- PEFT 0.6.0.dev0
|
checkpoint-8000/adapter_config.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
|
| 4 |
+
"bias": "lora_only",
|
| 5 |
+
"fan_in_fan_out": false,
|
| 6 |
+
"inference_mode": true,
|
| 7 |
+
"init_lora_weights": true,
|
| 8 |
+
"layers_pattern": null,
|
| 9 |
+
"layers_to_transform": null,
|
| 10 |
+
"lora_alpha": 8,
|
| 11 |
+
"lora_dropout": 0.1,
|
| 12 |
+
"modules_to_save": null,
|
| 13 |
+
"peft_type": "LORA",
|
| 14 |
+
"r": 8,
|
| 15 |
+
"revision": null,
|
| 16 |
+
"target_modules": [
|
| 17 |
+
"q_proj",
|
| 18 |
+
"v_proj"
|
| 19 |
+
],
|
| 20 |
+
"task_type": "CAUSAL_LM"
|
| 21 |
+
}
|
checkpoint-8000/adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:885b3701b22efc2c97101487782c0c78935d9a115d8cee1ff8562e30b08d36fc
|
| 3 |
+
size 16822989
|
checkpoint-8000/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc4dee53ea5158ce3add8dd997df220ee1c64f4fac3185f382b0c8ad6a1ade0d
|
| 3 |
+
size 8555781
|
checkpoint-8000/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b51906177ab45dca5274dbba530559cdb1531e72d00ed12acff96b04aec61c4b
|
| 3 |
+
size 14575
|
checkpoint-8000/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:956defb6e7acdc294b02a0946f2b07d80ea0e28c197c919727ee9f58a7c8114e
|
| 3 |
+
size 627
|
checkpoint-8000/trainer_state.json
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.027806701138615608,
|
| 3 |
+
"best_model_checkpoint": "./text2sql/codellama_instruct_pt_text2sql/checkpoint-8000",
|
| 4 |
+
"epoch": 0.8911787231079858,
|
| 5 |
+
"eval_steps": 2000,
|
| 6 |
+
"global_step": 8000,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.0,
|
| 13 |
+
"learning_rate": 1.1139992573338283e-07,
|
| 14 |
+
"loss": 1.2191,
|
| 15 |
+
"step": 1
|
| 16 |
+
},
|
| 17 |
+
{
|
| 18 |
+
"epoch": 0.11,
|
| 19 |
+
"learning_rate": 0.00011139992573338284,
|
| 20 |
+
"loss": 0.2684,
|
| 21 |
+
"step": 1000
|
| 22 |
+
},
|
| 23 |
+
{
|
| 24 |
+
"epoch": 0.22,
|
| 25 |
+
"learning_rate": 0.00022279985146676567,
|
| 26 |
+
"loss": 0.0693,
|
| 27 |
+
"step": 2000
|
| 28 |
+
},
|
| 29 |
+
{
|
| 30 |
+
"epoch": 0.22,
|
| 31 |
+
"eval_loss": 0.05887996032834053,
|
| 32 |
+
"eval_runtime": 171.127,
|
| 33 |
+
"eval_samples_per_second": 11.687,
|
| 34 |
+
"eval_steps_per_second": 1.461,
|
| 35 |
+
"step": 2000
|
| 36 |
+
},
|
| 37 |
+
{
|
| 38 |
+
"epoch": 0.33,
|
| 39 |
+
"learning_rate": 0.00029781686301467276,
|
| 40 |
+
"loss": 0.0565,
|
| 41 |
+
"step": 3000
|
| 42 |
+
},
|
| 43 |
+
{
|
| 44 |
+
"epoch": 0.45,
|
| 45 |
+
"learning_rate": 0.0002907056676227274,
|
| 46 |
+
"loss": 0.047,
|
| 47 |
+
"step": 4000
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"epoch": 0.45,
|
| 51 |
+
"eval_loss": 0.03957173973321915,
|
| 52 |
+
"eval_runtime": 171.3682,
|
| 53 |
+
"eval_samples_per_second": 11.671,
|
| 54 |
+
"eval_steps_per_second": 1.459,
|
| 55 |
+
"step": 4000
|
| 56 |
+
},
|
| 57 |
+
{
|
| 58 |
+
"epoch": 0.56,
|
| 59 |
+
"learning_rate": 0.00028359447223078195,
|
| 60 |
+
"loss": 0.0403,
|
| 61 |
+
"step": 5000
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"epoch": 0.67,
|
| 65 |
+
"learning_rate": 0.0002764832768388366,
|
| 66 |
+
"loss": 0.0364,
|
| 67 |
+
"step": 6000
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"epoch": 0.67,
|
| 71 |
+
"eval_loss": 0.030724667012691498,
|
| 72 |
+
"eval_runtime": 170.9405,
|
| 73 |
+
"eval_samples_per_second": 11.7,
|
| 74 |
+
"eval_steps_per_second": 1.462,
|
| 75 |
+
"step": 6000
|
| 76 |
+
},
|
| 77 |
+
{
|
| 78 |
+
"epoch": 0.78,
|
| 79 |
+
"learning_rate": 0.0002693720814468912,
|
| 80 |
+
"loss": 0.0341,
|
| 81 |
+
"step": 7000
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"epoch": 0.89,
|
| 85 |
+
"learning_rate": 0.00026226088605494583,
|
| 86 |
+
"loss": 0.0311,
|
| 87 |
+
"step": 8000
|
| 88 |
+
},
|
| 89 |
+
{
|
| 90 |
+
"epoch": 0.89,
|
| 91 |
+
"eval_loss": 0.027806701138615608,
|
| 92 |
+
"eval_runtime": 171.064,
|
| 93 |
+
"eval_samples_per_second": 11.692,
|
| 94 |
+
"eval_steps_per_second": 1.461,
|
| 95 |
+
"step": 8000
|
| 96 |
+
}
|
| 97 |
+
],
|
| 98 |
+
"logging_steps": 1000,
|
| 99 |
+
"max_steps": 44880,
|
| 100 |
+
"num_train_epochs": 5,
|
| 101 |
+
"save_steps": 2000,
|
| 102 |
+
"total_flos": 3.896521841666949e+18,
|
| 103 |
+
"trial_name": null,
|
| 104 |
+
"trial_params": null
|
| 105 |
+
}
|
checkpoint-8000/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c3233085912f12f47eb0b59979d9c767ddd5d0fc4cd96a7ebdcd9b12e9ba2c6
|
| 3 |
+
size 4219
|