Commit 
							
							·
						
						e88c052
	
1
								Parent(s):
							
							dcfb4ed
								
update newest
Browse files- README.md +1 -22
- adapter_config.json +2 -2
- checkpoint-200/README.md +0 -220
- checkpoint-200/adapter_config.json +0 -23
- checkpoint-200/adapter_model.safetensors +0 -3
- checkpoint-200/optimizer.pt +0 -3
- checkpoint-200/rng_state.pth +0 -3
- checkpoint-200/scheduler.pt +0 -3
- checkpoint-200/trainer_state.json +0 -147
- checkpoint-200/training_args.bin +0 -3
    	
        README.md
    CHANGED
    
    | @@ -1,8 +1,6 @@ | |
| 1 | 
             
            ---
         | 
| 2 | 
             
            library_name: peft
         | 
| 3 | 
             
            base_model: meta-llama/Llama-2-7b-chat-hf
         | 
| 4 | 
            -
            language:
         | 
| 5 | 
            -
            - vi
         | 
| 6 | 
             
            ---
         | 
| 7 |  | 
| 8 | 
             
            # Model Card for Model ID
         | 
| @@ -25,7 +23,7 @@ language: | |
| 25 | 
             
            - **Model type:** [More Information Needed]
         | 
| 26 | 
             
            - **Language(s) (NLP):** [More Information Needed]
         | 
| 27 | 
             
            - **License:** [More Information Needed]
         | 
| 28 | 
            -
            - **Finetuned from model [optional]:** [ | 
| 29 |  | 
| 30 | 
             
            ### Model Sources [optional]
         | 
| 31 |  | 
| @@ -220,22 +218,3 @@ The following `bitsandbytes` quantization config was used during training: | |
| 220 |  | 
| 221 |  | 
| 222 | 
             
            - PEFT 0.6.2.dev0
         | 
| 223 | 
            -
            ## Training procedure
         | 
| 224 | 
            -
             | 
| 225 | 
            -
             | 
| 226 | 
            -
            The following `bitsandbytes` quantization config was used during training:
         | 
| 227 | 
            -
            - quant_method: bitsandbytes
         | 
| 228 | 
            -
            - load_in_8bit: True
         | 
| 229 | 
            -
            - load_in_4bit: False
         | 
| 230 | 
            -
            - llm_int8_threshold: 6.0
         | 
| 231 | 
            -
            - llm_int8_skip_modules: None
         | 
| 232 | 
            -
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 233 | 
            -
            - llm_int8_has_fp16_weight: False
         | 
| 234 | 
            -
            - bnb_4bit_quant_type: fp4
         | 
| 235 | 
            -
            - bnb_4bit_use_double_quant: False
         | 
| 236 | 
            -
            - bnb_4bit_compute_dtype: float32
         | 
| 237 | 
            -
             | 
| 238 | 
            -
            ### Framework versions
         | 
| 239 | 
            -
             | 
| 240 | 
            -
             | 
| 241 | 
            -
            - PEFT 0.6.2.dev0
         | 
|  | |
| 1 | 
             
            ---
         | 
| 2 | 
             
            library_name: peft
         | 
| 3 | 
             
            base_model: meta-llama/Llama-2-7b-chat-hf
         | 
|  | |
|  | |
| 4 | 
             
            ---
         | 
| 5 |  | 
| 6 | 
             
            # Model Card for Model ID
         | 
|  | |
| 23 | 
             
            - **Model type:** [More Information Needed]
         | 
| 24 | 
             
            - **Language(s) (NLP):** [More Information Needed]
         | 
| 25 | 
             
            - **License:** [More Information Needed]
         | 
| 26 | 
            +
            - **Finetuned from model [optional]:** [More Information Needed]
         | 
| 27 |  | 
| 28 | 
             
            ### Model Sources [optional]
         | 
| 29 |  | 
|  | |
| 218 |  | 
| 219 |  | 
| 220 | 
             
            - PEFT 0.6.2.dev0
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        adapter_config.json
    CHANGED
    
    | @@ -8,11 +8,11 @@ | |
| 8 | 
             
              "init_lora_weights": true,
         | 
| 9 | 
             
              "layers_pattern": null,
         | 
| 10 | 
             
              "layers_to_transform": null,
         | 
| 11 | 
            -
              "lora_alpha":  | 
| 12 | 
             
              "lora_dropout": 0.05,
         | 
| 13 | 
             
              "modules_to_save": null,
         | 
| 14 | 
             
              "peft_type": "LORA",
         | 
| 15 | 
            -
              "r":  | 
| 16 | 
             
              "rank_pattern": {},
         | 
| 17 | 
             
              "revision": null,
         | 
| 18 | 
             
              "target_modules": [
         | 
|  | |
| 8 | 
             
              "init_lora_weights": true,
         | 
| 9 | 
             
              "layers_pattern": null,
         | 
| 10 | 
             
              "layers_to_transform": null,
         | 
| 11 | 
            +
              "lora_alpha": 16,
         | 
| 12 | 
             
              "lora_dropout": 0.05,
         | 
| 13 | 
             
              "modules_to_save": null,
         | 
| 14 | 
             
              "peft_type": "LORA",
         | 
| 15 | 
            +
              "r": 8,
         | 
| 16 | 
             
              "rank_pattern": {},
         | 
| 17 | 
             
              "revision": null,
         | 
| 18 | 
             
              "target_modules": [
         | 
    	
        checkpoint-200/README.md
    DELETED
    
    | @@ -1,220 +0,0 @@ | |
| 1 | 
            -
            ---
         | 
| 2 | 
            -
            library_name: peft
         | 
| 3 | 
            -
            base_model: meta-llama/Llama-2-7b-chat-hf
         | 
| 4 | 
            -
            ---
         | 
| 5 | 
            -
             | 
| 6 | 
            -
            # Model Card for Model ID
         | 
| 7 | 
            -
             | 
| 8 | 
            -
            <!-- Provide a quick summary of what the model is/does. -->
         | 
| 9 | 
            -
             | 
| 10 | 
            -
             | 
| 11 | 
            -
             | 
| 12 | 
            -
            ## Model Details
         | 
| 13 | 
            -
             | 
| 14 | 
            -
            ### Model Description
         | 
| 15 | 
            -
             | 
| 16 | 
            -
            <!-- Provide a longer summary of what this model is. -->
         | 
| 17 | 
            -
             | 
| 18 | 
            -
             | 
| 19 | 
            -
             | 
| 20 | 
            -
            - **Developed by:** [More Information Needed]
         | 
| 21 | 
            -
            - **Funded by [optional]:** [More Information Needed]
         | 
| 22 | 
            -
            - **Shared by [optional]:** [More Information Needed]
         | 
| 23 | 
            -
            - **Model type:** [More Information Needed]
         | 
| 24 | 
            -
            - **Language(s) (NLP):** [More Information Needed]
         | 
| 25 | 
            -
            - **License:** [More Information Needed]
         | 
| 26 | 
            -
            - **Finetuned from model [optional]:** [More Information Needed]
         | 
| 27 | 
            -
             | 
| 28 | 
            -
            ### Model Sources [optional]
         | 
| 29 | 
            -
             | 
| 30 | 
            -
            <!-- Provide the basic links for the model. -->
         | 
| 31 | 
            -
             | 
| 32 | 
            -
            - **Repository:** [More Information Needed]
         | 
| 33 | 
            -
            - **Paper [optional]:** [More Information Needed]
         | 
| 34 | 
            -
            - **Demo [optional]:** [More Information Needed]
         | 
| 35 | 
            -
             | 
| 36 | 
            -
            ## Uses
         | 
| 37 | 
            -
             | 
| 38 | 
            -
            <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
         | 
| 39 | 
            -
             | 
| 40 | 
            -
            ### Direct Use
         | 
| 41 | 
            -
             | 
| 42 | 
            -
            <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
         | 
| 43 | 
            -
             | 
| 44 | 
            -
            [More Information Needed]
         | 
| 45 | 
            -
             | 
| 46 | 
            -
            ### Downstream Use [optional]
         | 
| 47 | 
            -
             | 
| 48 | 
            -
            <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
         | 
| 49 | 
            -
             | 
| 50 | 
            -
            [More Information Needed]
         | 
| 51 | 
            -
             | 
| 52 | 
            -
            ### Out-of-Scope Use
         | 
| 53 | 
            -
             | 
| 54 | 
            -
            <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
         | 
| 55 | 
            -
             | 
| 56 | 
            -
            [More Information Needed]
         | 
| 57 | 
            -
             | 
| 58 | 
            -
            ## Bias, Risks, and Limitations
         | 
| 59 | 
            -
             | 
| 60 | 
            -
            <!-- This section is meant to convey both technical and sociotechnical limitations. -->
         | 
| 61 | 
            -
             | 
| 62 | 
            -
            [More Information Needed]
         | 
| 63 | 
            -
             | 
| 64 | 
            -
            ### Recommendations
         | 
| 65 | 
            -
             | 
| 66 | 
            -
            <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
         | 
| 67 | 
            -
             | 
| 68 | 
            -
            Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
         | 
| 69 | 
            -
             | 
| 70 | 
            -
            ## How to Get Started with the Model
         | 
| 71 | 
            -
             | 
| 72 | 
            -
            Use the code below to get started with the model.
         | 
| 73 | 
            -
             | 
| 74 | 
            -
            [More Information Needed]
         | 
| 75 | 
            -
             | 
| 76 | 
            -
            ## Training Details
         | 
| 77 | 
            -
             | 
| 78 | 
            -
            ### Training Data
         | 
| 79 | 
            -
             | 
| 80 | 
            -
            <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
         | 
| 81 | 
            -
             | 
| 82 | 
            -
            [More Information Needed]
         | 
| 83 | 
            -
             | 
| 84 | 
            -
            ### Training Procedure 
         | 
| 85 | 
            -
             | 
| 86 | 
            -
            <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
         | 
| 87 | 
            -
             | 
| 88 | 
            -
            #### Preprocessing [optional]
         | 
| 89 | 
            -
             | 
| 90 | 
            -
            [More Information Needed]
         | 
| 91 | 
            -
             | 
| 92 | 
            -
             | 
| 93 | 
            -
            #### Training Hyperparameters
         | 
| 94 | 
            -
             | 
| 95 | 
            -
            - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
         | 
| 96 | 
            -
             | 
| 97 | 
            -
            #### Speeds, Sizes, Times [optional]
         | 
| 98 | 
            -
             | 
| 99 | 
            -
            <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
         | 
| 100 | 
            -
             | 
| 101 | 
            -
            [More Information Needed]
         | 
| 102 | 
            -
             | 
| 103 | 
            -
            ## Evaluation
         | 
| 104 | 
            -
             | 
| 105 | 
            -
            <!-- This section describes the evaluation protocols and provides the results. -->
         | 
| 106 | 
            -
             | 
| 107 | 
            -
            ### Testing Data, Factors & Metrics
         | 
| 108 | 
            -
             | 
| 109 | 
            -
            #### Testing Data
         | 
| 110 | 
            -
             | 
| 111 | 
            -
            <!-- This should link to a Dataset Card if possible. -->
         | 
| 112 | 
            -
             | 
| 113 | 
            -
            [More Information Needed]
         | 
| 114 | 
            -
             | 
| 115 | 
            -
            #### Factors
         | 
| 116 | 
            -
             | 
| 117 | 
            -
            <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
         | 
| 118 | 
            -
             | 
| 119 | 
            -
            [More Information Needed]
         | 
| 120 | 
            -
             | 
| 121 | 
            -
            #### Metrics
         | 
| 122 | 
            -
             | 
| 123 | 
            -
            <!-- These are the evaluation metrics being used, ideally with a description of why. -->
         | 
| 124 | 
            -
             | 
| 125 | 
            -
            [More Information Needed]
         | 
| 126 | 
            -
             | 
| 127 | 
            -
            ### Results
         | 
| 128 | 
            -
             | 
| 129 | 
            -
            [More Information Needed]
         | 
| 130 | 
            -
             | 
| 131 | 
            -
            #### Summary
         | 
| 132 | 
            -
             | 
| 133 | 
            -
             | 
| 134 | 
            -
             | 
| 135 | 
            -
            ## Model Examination [optional]
         | 
| 136 | 
            -
             | 
| 137 | 
            -
            <!-- Relevant interpretability work for the model goes here -->
         | 
| 138 | 
            -
             | 
| 139 | 
            -
            [More Information Needed]
         | 
| 140 | 
            -
             | 
| 141 | 
            -
            ## Environmental Impact
         | 
| 142 | 
            -
             | 
| 143 | 
            -
            <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
         | 
| 144 | 
            -
             | 
| 145 | 
            -
            Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
         | 
| 146 | 
            -
             | 
| 147 | 
            -
            - **Hardware Type:** [More Information Needed]
         | 
| 148 | 
            -
            - **Hours used:** [More Information Needed]
         | 
| 149 | 
            -
            - **Cloud Provider:** [More Information Needed]
         | 
| 150 | 
            -
            - **Compute Region:** [More Information Needed]
         | 
| 151 | 
            -
            - **Carbon Emitted:** [More Information Needed]
         | 
| 152 | 
            -
             | 
| 153 | 
            -
            ## Technical Specifications [optional]
         | 
| 154 | 
            -
             | 
| 155 | 
            -
            ### Model Architecture and Objective
         | 
| 156 | 
            -
             | 
| 157 | 
            -
            [More Information Needed]
         | 
| 158 | 
            -
             | 
| 159 | 
            -
            ### Compute Infrastructure
         | 
| 160 | 
            -
             | 
| 161 | 
            -
            [More Information Needed]
         | 
| 162 | 
            -
             | 
| 163 | 
            -
            #### Hardware
         | 
| 164 | 
            -
             | 
| 165 | 
            -
            [More Information Needed]
         | 
| 166 | 
            -
             | 
| 167 | 
            -
            #### Software
         | 
| 168 | 
            -
             | 
| 169 | 
            -
            [More Information Needed]
         | 
| 170 | 
            -
             | 
| 171 | 
            -
            ## Citation [optional]
         | 
| 172 | 
            -
             | 
| 173 | 
            -
            <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
         | 
| 174 | 
            -
             | 
| 175 | 
            -
            **BibTeX:**
         | 
| 176 | 
            -
             | 
| 177 | 
            -
            [More Information Needed]
         | 
| 178 | 
            -
             | 
| 179 | 
            -
            **APA:**
         | 
| 180 | 
            -
             | 
| 181 | 
            -
            [More Information Needed]
         | 
| 182 | 
            -
             | 
| 183 | 
            -
            ## Glossary [optional]
         | 
| 184 | 
            -
             | 
| 185 | 
            -
            <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
         | 
| 186 | 
            -
             | 
| 187 | 
            -
            [More Information Needed]
         | 
| 188 | 
            -
             | 
| 189 | 
            -
            ## More Information [optional]
         | 
| 190 | 
            -
             | 
| 191 | 
            -
            [More Information Needed]
         | 
| 192 | 
            -
             | 
| 193 | 
            -
            ## Model Card Authors [optional]
         | 
| 194 | 
            -
             | 
| 195 | 
            -
            [More Information Needed]
         | 
| 196 | 
            -
             | 
| 197 | 
            -
            ## Model Card Contact
         | 
| 198 | 
            -
             | 
| 199 | 
            -
            [More Information Needed]
         | 
| 200 | 
            -
             | 
| 201 | 
            -
             | 
| 202 | 
            -
            ## Training procedure
         | 
| 203 | 
            -
             | 
| 204 | 
            -
             | 
| 205 | 
            -
            The following `bitsandbytes` quantization config was used during training:
         | 
| 206 | 
            -
            - quant_method: bitsandbytes
         | 
| 207 | 
            -
            - load_in_8bit: True
         | 
| 208 | 
            -
            - load_in_4bit: False
         | 
| 209 | 
            -
            - llm_int8_threshold: 6.0
         | 
| 210 | 
            -
            - llm_int8_skip_modules: None
         | 
| 211 | 
            -
            - llm_int8_enable_fp32_cpu_offload: False
         | 
| 212 | 
            -
            - llm_int8_has_fp16_weight: False
         | 
| 213 | 
            -
            - bnb_4bit_quant_type: fp4
         | 
| 214 | 
            -
            - bnb_4bit_use_double_quant: False
         | 
| 215 | 
            -
            - bnb_4bit_compute_dtype: float32
         | 
| 216 | 
            -
             | 
| 217 | 
            -
            ### Framework versions
         | 
| 218 | 
            -
             | 
| 219 | 
            -
             | 
| 220 | 
            -
            - PEFT 0.6.2.dev0
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/adapter_config.json
    DELETED
    
    | @@ -1,23 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "alpha_pattern": {},
         | 
| 3 | 
            -
              "auto_mapping": null,
         | 
| 4 | 
            -
              "base_model_name_or_path": "meta-llama/Llama-2-7b-chat-hf",
         | 
| 5 | 
            -
              "bias": "none",
         | 
| 6 | 
            -
              "fan_in_fan_out": false,
         | 
| 7 | 
            -
              "inference_mode": true,
         | 
| 8 | 
            -
              "init_lora_weights": true,
         | 
| 9 | 
            -
              "layers_pattern": null,
         | 
| 10 | 
            -
              "layers_to_transform": null,
         | 
| 11 | 
            -
              "lora_alpha": 32,
         | 
| 12 | 
            -
              "lora_dropout": 0.05,
         | 
| 13 | 
            -
              "modules_to_save": null,
         | 
| 14 | 
            -
              "peft_type": "LORA",
         | 
| 15 | 
            -
              "r": 16,
         | 
| 16 | 
            -
              "rank_pattern": {},
         | 
| 17 | 
            -
              "revision": null,
         | 
| 18 | 
            -
              "target_modules": [
         | 
| 19 | 
            -
                "v_proj",
         | 
| 20 | 
            -
                "q_proj"
         | 
| 21 | 
            -
              ],
         | 
| 22 | 
            -
              "task_type": "CAUSAL_LM"
         | 
| 23 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/adapter_model.safetensors
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:e44ce263e6fd885f50d82ca515b9325375b43ee36ededb75acf161ce88bc2e41
         | 
| 3 | 
            -
            size 48
         | 
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/optimizer.pt
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:28d37d8f6447c575a2561e21f372a616859ab32505c10202500519c4b2123b29
         | 
| 3 | 
            -
            size 67217018
         | 
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/rng_state.pth
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:a8829d0a25ff97021072fb843b073b1d71220c37836574d3373d3869c786b07f
         | 
| 3 | 
            -
            size 14244
         | 
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/scheduler.pt
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:00a06e30b6154c24b7c037908cdfc128b1f68e0c9d24c9b07f6913a932ed0d12
         | 
| 3 | 
            -
            size 1064
         | 
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/trainer_state.json
    DELETED
    
    | @@ -1,147 +0,0 @@ | |
| 1 | 
            -
            {
         | 
| 2 | 
            -
              "best_metric": 0.5213077664375305,
         | 
| 3 | 
            -
              "best_model_checkpoint": "finetuned/Llama-2-7b-chat-hf_vn/checkpoint-200",
         | 
| 4 | 
            -
              "epoch": 6.042296072507553,
         | 
| 5 | 
            -
              "eval_steps": 200,
         | 
| 6 | 
            -
              "global_step": 200,
         | 
| 7 | 
            -
              "is_hyper_param_search": false,
         | 
| 8 | 
            -
              "is_local_process_zero": true,
         | 
| 9 | 
            -
              "is_world_process_zero": true,
         | 
| 10 | 
            -
              "log_history": [
         | 
| 11 | 
            -
                {
         | 
| 12 | 
            -
                  "epoch": 0.3,
         | 
| 13 | 
            -
                  "learning_rate": 2.9999999999999997e-05,
         | 
| 14 | 
            -
                  "loss": 1.2545,
         | 
| 15 | 
            -
                  "step": 10
         | 
| 16 | 
            -
                },
         | 
| 17 | 
            -
                {
         | 
| 18 | 
            -
                  "epoch": 0.6,
         | 
| 19 | 
            -
                  "learning_rate": 5.9999999999999995e-05,
         | 
| 20 | 
            -
                  "loss": 1.122,
         | 
| 21 | 
            -
                  "step": 20
         | 
| 22 | 
            -
                },
         | 
| 23 | 
            -
                {
         | 
| 24 | 
            -
                  "epoch": 0.91,
         | 
| 25 | 
            -
                  "learning_rate": 8.999999999999999e-05,
         | 
| 26 | 
            -
                  "loss": 0.9456,
         | 
| 27 | 
            -
                  "step": 30
         | 
| 28 | 
            -
                },
         | 
| 29 | 
            -
                {
         | 
| 30 | 
            -
                  "epoch": 1.21,
         | 
| 31 | 
            -
                  "learning_rate": 0.00011999999999999999,
         | 
| 32 | 
            -
                  "loss": 0.795,
         | 
| 33 | 
            -
                  "step": 40
         | 
| 34 | 
            -
                },
         | 
| 35 | 
            -
                {
         | 
| 36 | 
            -
                  "epoch": 1.51,
         | 
| 37 | 
            -
                  "learning_rate": 0.00015,
         | 
| 38 | 
            -
                  "loss": 0.6801,
         | 
| 39 | 
            -
                  "step": 50
         | 
| 40 | 
            -
                },
         | 
| 41 | 
            -
                {
         | 
| 42 | 
            -
                  "epoch": 1.81,
         | 
| 43 | 
            -
                  "learning_rate": 0.00017999999999999998,
         | 
| 44 | 
            -
                  "loss": 0.6285,
         | 
| 45 | 
            -
                  "step": 60
         | 
| 46 | 
            -
                },
         | 
| 47 | 
            -
                {
         | 
| 48 | 
            -
                  "epoch": 2.11,
         | 
| 49 | 
            -
                  "learning_rate": 0.00020999999999999998,
         | 
| 50 | 
            -
                  "loss": 0.5934,
         | 
| 51 | 
            -
                  "step": 70
         | 
| 52 | 
            -
                },
         | 
| 53 | 
            -
                {
         | 
| 54 | 
            -
                  "epoch": 2.42,
         | 
| 55 | 
            -
                  "learning_rate": 0.00023999999999999998,
         | 
| 56 | 
            -
                  "loss": 0.5605,
         | 
| 57 | 
            -
                  "step": 80
         | 
| 58 | 
            -
                },
         | 
| 59 | 
            -
                {
         | 
| 60 | 
            -
                  "epoch": 2.72,
         | 
| 61 | 
            -
                  "learning_rate": 0.00027,
         | 
| 62 | 
            -
                  "loss": 0.5303,
         | 
| 63 | 
            -
                  "step": 90
         | 
| 64 | 
            -
                },
         | 
| 65 | 
            -
                {
         | 
| 66 | 
            -
                  "epoch": 3.02,
         | 
| 67 | 
            -
                  "learning_rate": 0.0003,
         | 
| 68 | 
            -
                  "loss": 0.5363,
         | 
| 69 | 
            -
                  "step": 100
         | 
| 70 | 
            -
                },
         | 
| 71 | 
            -
                {
         | 
| 72 | 
            -
                  "epoch": 3.32,
         | 
| 73 | 
            -
                  "learning_rate": 0.0002869565217391304,
         | 
| 74 | 
            -
                  "loss": 0.5326,
         | 
| 75 | 
            -
                  "step": 110
         | 
| 76 | 
            -
                },
         | 
| 77 | 
            -
                {
         | 
| 78 | 
            -
                  "epoch": 3.63,
         | 
| 79 | 
            -
                  "learning_rate": 0.00027391304347826085,
         | 
| 80 | 
            -
                  "loss": 0.5291,
         | 
| 81 | 
            -
                  "step": 120
         | 
| 82 | 
            -
                },
         | 
| 83 | 
            -
                {
         | 
| 84 | 
            -
                  "epoch": 3.93,
         | 
| 85 | 
            -
                  "learning_rate": 0.0002608695652173913,
         | 
| 86 | 
            -
                  "loss": 0.5139,
         | 
| 87 | 
            -
                  "step": 130
         | 
| 88 | 
            -
                },
         | 
| 89 | 
            -
                {
         | 
| 90 | 
            -
                  "epoch": 4.23,
         | 
| 91 | 
            -
                  "learning_rate": 0.0002478260869565217,
         | 
| 92 | 
            -
                  "loss": 0.4972,
         | 
| 93 | 
            -
                  "step": 140
         | 
| 94 | 
            -
                },
         | 
| 95 | 
            -
                {
         | 
| 96 | 
            -
                  "epoch": 4.53,
         | 
| 97 | 
            -
                  "learning_rate": 0.00023478260869565215,
         | 
| 98 | 
            -
                  "loss": 0.5095,
         | 
| 99 | 
            -
                  "step": 150
         | 
| 100 | 
            -
                },
         | 
| 101 | 
            -
                {
         | 
| 102 | 
            -
                  "epoch": 4.83,
         | 
| 103 | 
            -
                  "learning_rate": 0.00022173913043478256,
         | 
| 104 | 
            -
                  "loss": 0.5085,
         | 
| 105 | 
            -
                  "step": 160
         | 
| 106 | 
            -
                },
         | 
| 107 | 
            -
                {
         | 
| 108 | 
            -
                  "epoch": 5.14,
         | 
| 109 | 
            -
                  "learning_rate": 0.00020869565217391303,
         | 
| 110 | 
            -
                  "loss": 0.499,
         | 
| 111 | 
            -
                  "step": 170
         | 
| 112 | 
            -
                },
         | 
| 113 | 
            -
                {
         | 
| 114 | 
            -
                  "epoch": 5.44,
         | 
| 115 | 
            -
                  "learning_rate": 0.00019565217391304346,
         | 
| 116 | 
            -
                  "loss": 0.4926,
         | 
| 117 | 
            -
                  "step": 180
         | 
| 118 | 
            -
                },
         | 
| 119 | 
            -
                {
         | 
| 120 | 
            -
                  "epoch": 5.74,
         | 
| 121 | 
            -
                  "learning_rate": 0.0001826086956521739,
         | 
| 122 | 
            -
                  "loss": 0.4789,
         | 
| 123 | 
            -
                  "step": 190
         | 
| 124 | 
            -
                },
         | 
| 125 | 
            -
                {
         | 
| 126 | 
            -
                  "epoch": 6.04,
         | 
| 127 | 
            -
                  "learning_rate": 0.00016956521739130433,
         | 
| 128 | 
            -
                  "loss": 0.4882,
         | 
| 129 | 
            -
                  "step": 200
         | 
| 130 | 
            -
                },
         | 
| 131 | 
            -
                {
         | 
| 132 | 
            -
                  "epoch": 6.04,
         | 
| 133 | 
            -
                  "eval_loss": 0.5213077664375305,
         | 
| 134 | 
            -
                  "eval_runtime": 243.3322,
         | 
| 135 | 
            -
                  "eval_samples_per_second": 8.219,
         | 
| 136 | 
            -
                  "eval_steps_per_second": 0.686,
         | 
| 137 | 
            -
                  "step": 200
         | 
| 138 | 
            -
                }
         | 
| 139 | 
            -
              ],
         | 
| 140 | 
            -
              "logging_steps": 10,
         | 
| 141 | 
            -
              "max_steps": 330,
         | 
| 142 | 
            -
              "num_train_epochs": 10,
         | 
| 143 | 
            -
              "save_steps": 200,
         | 
| 144 | 
            -
              "total_flos": 6.452136280530616e+17,
         | 
| 145 | 
            -
              "trial_name": null,
         | 
| 146 | 
            -
              "trial_params": null
         | 
| 147 | 
            -
            }
         | 
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
    	
        checkpoint-200/training_args.bin
    DELETED
    
    | @@ -1,3 +0,0 @@ | |
| 1 | 
            -
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            -
            oid sha256:26df47623533f55dbad8435cf04308e41ffbbe7aa5197b46eb90f78705948ad8
         | 
| 3 | 
            -
            size 4600
         | 
|  | |
|  | |
|  | |
|  | 
