Update README.md
Browse files
README.md
CHANGED
|
@@ -18,9 +18,6 @@ pipeline_tag: text-generation
|
|
| 18 |
# Alpie-Core: 4-bit Quantized Reasoning Model
|
| 19 |
|
| 20 |
---
|
| 21 |
-
<p align="center">
|
| 22 |
-
<img src="./Frame%202018777151.png" alt="Alpie-Core Architecture" width="700"/>
|
| 23 |
-
</p>
|
| 24 |
*[Space reserved for blog paper, technical report links]*
|
| 25 |
---
|
| 26 |
|
|
@@ -205,7 +202,7 @@ from peft import PeftModel, PeftConfig
|
|
| 205 |
import torch
|
| 206 |
|
| 207 |
# Load LoRA adapter configuration to find the base model
|
| 208 |
-
peft_model_id = "169Pi/Alpie-
|
| 209 |
config = PeftConfig.from_pretrained(peft_model_id)
|
| 210 |
|
| 211 |
# Load the base model
|
|
@@ -242,7 +239,7 @@ from peft import PeftModel, PeftConfig
|
|
| 242 |
import torch
|
| 243 |
|
| 244 |
# Load LoRA adapter configuration to find the base model
|
| 245 |
-
peft_model_id = "169Pi/Alpie-
|
| 246 |
config = PeftConfig.from_pretrained(peft_model_id)
|
| 247 |
|
| 248 |
# Load the base model
|
|
|
|
| 18 |
# Alpie-Core: 4-bit Quantized Reasoning Model
|
| 19 |
|
| 20 |
---
|
|
|
|
|
|
|
|
|
|
| 21 |
*[Space reserved for blog paper, technical report links]*
|
| 22 |
---
|
| 23 |
|
|
|
|
| 202 |
import torch
|
| 203 |
|
| 204 |
# Load LoRA adapter configuration to find the base model
|
| 205 |
+
peft_model_id = "169Pi/Alpie-Core-4-bit"
|
| 206 |
config = PeftConfig.from_pretrained(peft_model_id)
|
| 207 |
|
| 208 |
# Load the base model
|
|
|
|
| 239 |
import torch
|
| 240 |
|
| 241 |
# Load LoRA adapter configuration to find the base model
|
| 242 |
+
peft_model_id = "169Pi/Alpie-Core-4-bit"
|
| 243 |
config = PeftConfig.from_pretrained(peft_model_id)
|
| 244 |
|
| 245 |
# Load the base model
|