Upload Gemma2ForCausalLM
Browse files- config.json +58 -0
- generation_config.json +8 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +0 -0
    	
        config.json
    ADDED
    
    | @@ -0,0 +1,58 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_name_or_path": "Gemma-2-9B-Instruct-4Bit-GPTQ",
         | 
| 3 | 
            +
              "architectures": [
         | 
| 4 | 
            +
                "Gemma2ForCausalLM"
         | 
| 5 | 
            +
              ],
         | 
| 6 | 
            +
              "attention_bias": false,
         | 
| 7 | 
            +
              "attention_dropout": 0.0,
         | 
| 8 | 
            +
              "attn_logit_softcapping": 50.0,
         | 
| 9 | 
            +
              "bos_token_id": 2,
         | 
| 10 | 
            +
              "cache_implementation": "hybrid",
         | 
| 11 | 
            +
              "eos_token_id": 1,
         | 
| 12 | 
            +
              "final_logit_softcapping": 30.0,
         | 
| 13 | 
            +
              "head_dim": 256,
         | 
| 14 | 
            +
              "hidden_act": "gelu_pytorch_tanh",
         | 
| 15 | 
            +
              "hidden_activation": "gelu_pytorch_tanh",
         | 
| 16 | 
            +
              "hidden_size": 3584,
         | 
| 17 | 
            +
              "initializer_range": 0.02,
         | 
| 18 | 
            +
              "intermediate_size": 14336,
         | 
| 19 | 
            +
              "max_position_embeddings": 8192,
         | 
| 20 | 
            +
              "model_type": "gemma2",
         | 
| 21 | 
            +
              "num_attention_heads": 16,
         | 
| 22 | 
            +
              "num_hidden_layers": 42,
         | 
| 23 | 
            +
              "num_key_value_heads": 8,
         | 
| 24 | 
            +
              "pad_token_id": 0,
         | 
| 25 | 
            +
              "quantization_config": {
         | 
| 26 | 
            +
                "batch_size": 1,
         | 
| 27 | 
            +
                "bits": 4,
         | 
| 28 | 
            +
                "block_name_to_quantize": null,
         | 
| 29 | 
            +
                "cache_block_outputs": true,
         | 
| 30 | 
            +
                "damp_percent": 0.1,
         | 
| 31 | 
            +
                "dataset": "c4",
         | 
| 32 | 
            +
                "desc_act": false,
         | 
| 33 | 
            +
                "exllama_config": {
         | 
| 34 | 
            +
                  "version": 1
         | 
| 35 | 
            +
                },
         | 
| 36 | 
            +
                "group_size": 128,
         | 
| 37 | 
            +
                "max_input_length": null,
         | 
| 38 | 
            +
                "model_seqlen": null,
         | 
| 39 | 
            +
                "module_name_preceding_first_block": null,
         | 
| 40 | 
            +
                "modules_in_block_to_quantize": null,
         | 
| 41 | 
            +
                "pad_token_id": null,
         | 
| 42 | 
            +
                "quant_method": "gptq",
         | 
| 43 | 
            +
                "sym": true,
         | 
| 44 | 
            +
                "tokenizer": null,
         | 
| 45 | 
            +
                "true_sequential": true,
         | 
| 46 | 
            +
                "use_cuda_fp16": false,
         | 
| 47 | 
            +
                "use_exllama": true
         | 
| 48 | 
            +
              },
         | 
| 49 | 
            +
              "query_pre_attn_scalar": 224,
         | 
| 50 | 
            +
              "rms_norm_eps": 1e-06,
         | 
| 51 | 
            +
              "rope_theta": 10000.0,
         | 
| 52 | 
            +
              "sliding_window": 4096,
         | 
| 53 | 
            +
              "sliding_window_size": 4096,
         | 
| 54 | 
            +
              "torch_dtype": "float16",
         | 
| 55 | 
            +
              "transformers_version": "4.43.0.dev0",
         | 
| 56 | 
            +
              "use_cache": false,
         | 
| 57 | 
            +
              "vocab_size": 256000
         | 
| 58 | 
            +
            }
         | 
    	
        generation_config.json
    ADDED
    
    | @@ -0,0 +1,8 @@ | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            {
         | 
| 2 | 
            +
              "_from_model_config": true,
         | 
| 3 | 
            +
              "bos_token_id": 2,
         | 
| 4 | 
            +
              "cache_implementation": "hybrid",
         | 
| 5 | 
            +
              "eos_token_id": 1,
         | 
| 6 | 
            +
              "pad_token_id": 0,
         | 
| 7 | 
            +
              "transformers_version": "4.43.0.dev0"
         | 
| 8 | 
            +
            }
         | 
    	
        model-00001-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:bd43562b13cec941284ecab02604ba9ee3d0e401905c8c2e9f7d721534fd8ba0
         | 
| 3 | 
            +
            size 4978866392
         | 
    	
        model-00002-of-00002.safetensors
    ADDED
    
    | @@ -0,0 +1,3 @@ | |
|  | |
|  | |
|  | 
|  | |
| 1 | 
            +
            version https://git-lfs.github.com/spec/v1
         | 
| 2 | 
            +
            oid sha256:942953c50a2ec6dd3f7abb304a4318ef293fa242fd6ae34cd66e9f551cfe266e
         | 
| 3 | 
            +
            size 1187980416
         | 
    	
        model.safetensors.index.json
    ADDED
    
    | The diff for this file is too large to render. 
		See raw diff | 
|  | 
