Upload LlamaForCausalLM
Browse files- config.json +13 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +0 -0
config.json
CHANGED
|
@@ -23,6 +23,19 @@
|
|
| 23 |
"num_hidden_layers": 32,
|
| 24 |
"num_key_value_heads": 8,
|
| 25 |
"pretraining_tp": 1,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
"rms_norm_eps": 1e-05,
|
| 27 |
"rope_scaling": {
|
| 28 |
"factor": 8.0,
|
|
|
|
| 23 |
"num_hidden_layers": 32,
|
| 24 |
"num_key_value_heads": 8,
|
| 25 |
"pretraining_tp": 1,
|
| 26 |
+
"quantization_config": {
|
| 27 |
+
"backward_dtype": "bf16",
|
| 28 |
+
"forward_dtype": "nvfp4",
|
| 29 |
+
"forward_method": "abs_max",
|
| 30 |
+
"hadamard_group_size": 16,
|
| 31 |
+
"modules_to_not_convert": [
|
| 32 |
+
"lm_head"
|
| 33 |
+
],
|
| 34 |
+
"pseudoquantization": false,
|
| 35 |
+
"quant_method": "fp_quant",
|
| 36 |
+
"store_master_weights": false,
|
| 37 |
+
"transform_init": "hadamard"
|
| 38 |
+
},
|
| 39 |
"rms_norm_eps": 1e-05,
|
| 40 |
"rope_scaling": {
|
| 41 |
"factor": 8.0,
|
model-00001-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14ed96e68ce9efab0e4d1d98b42eb3a65ea1a25658786642e11a34bc4954e04d
|
| 3 |
+
size 4977475024
|
model-00002-of-00002.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f70af9efce273f2dbb03522ea4532a75dea16add2fa30bb07e8040d5a183b252
|
| 3 |
+
size 1050673280
|
model.safetensors.index.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|