Upload README.md
Browse files
README.md
CHANGED
|
@@ -53,24 +53,17 @@ dtype: bfloat16
|
|
| 53 |
## 💻 Usage
|
| 54 |
|
| 55 |
```python
|
| 56 |
-
!pip install -qU transformers accelerate
|
| 57 |
-
|
| 58 |
-
from transformers import AutoTokenizer
|
| 59 |
-
import transformers
|
| 60 |
import torch
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
)
|
| 73 |
-
|
| 74 |
-
outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
| 75 |
-
print(outputs[0]["generated_text"])
|
| 76 |
```
|
|
|
|
|
|
| 53 |
## 💻 Usage
|
| 54 |
|
| 55 |
```python
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
import torch
|
| 57 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 58 |
+
torch.set_default_device("cuda")
|
| 59 |
+
model = AutoModelForCausalLM.from_pretrained("vince62s/phi-2-psy", torch_dtype="auto", trust_remote_code=True)
|
| 60 |
+
tokenizer = AutoTokenizer.from_pretrained("vince62s/phi-2-psy", trust_remote_code=True)
|
| 61 |
+
inputs = tokenizer('''def print_prime(n):
|
| 62 |
+
"""
|
| 63 |
+
Print all primes between 1 and n
|
| 64 |
+
"""''', return_tensors="pt", return_attention_mask=False)
|
| 65 |
+
outputs = model.generate(**inputs, max_length=200)
|
| 66 |
+
text = tokenizer.batch_decode(outputs)[0]
|
| 67 |
+
print(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
```
|
| 69 |
+
|