Update README.md
Browse files
README.md
CHANGED
|
@@ -37,7 +37,13 @@ model_id = "reaperdoesntknow/Qemma-GEI"
|
|
| 37 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
|
| 38 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16).eval()
|
| 39 |
|
| 40 |
-
text =
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
inputs = tokenizer(text, return_tensors="pt", max_length=64, padding='max_length', truncation=True)
|
| 42 |
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 43 |
|
|
|
|
| 37 |
tokenizer = AutoTokenizer.from_pretrained(model_id, use_fast=True)
|
| 38 |
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16).eval()
|
| 39 |
|
| 40 |
+
text = (
|
| 41 |
+
"<|user|>"
|
| 42 |
+
"What makes the sky blue?."
|
| 43 |
+
"<|assistant|>"
|
| 44 |
+
"<think><reasoning_step>"
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
inputs = tokenizer(text, return_tensors="pt", max_length=64, padding='max_length', truncation=True)
|
| 48 |
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
| 49 |
|