shanearora commited on
Commit
c58f920
·
verified ·
1 Parent(s): c9c2937

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -5
README.md CHANGED
@@ -28,16 +28,17 @@ This information and more can also be found:
28
 
29
  # Use
30
 
31
- Install `transformers` **from [this source](https://github.com/swj0419/transformers_flexolmo)** and run:
32
  ```python
33
- from transformers import Olmoe2ForCausalLM, AutoTokenizer
34
  import torch
35
 
36
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
37
 
38
- MODEL_NAME = "allenai/FlexOlmo-7x7B-1T"
39
- model = Olmoe2ForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE)
40
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
 
41
  inputs = tokenizer("Bitcoin is", return_tensors="pt")
42
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
43
  out = model.generate(**inputs, max_length=64)
 
28
 
29
  # Use
30
 
31
+ Install `transformers` with version `4.57.0` or newer and run:
32
  ```python
33
+ from transformers import AutoModelForCausalLM, AutoTokenizer
34
  import torch
35
 
36
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
37
 
38
+ MODEL_NAME = "allenai/Flex-code-2x7B-1T"
39
+ TOKENIZER_NAME = "allenai/dolma2-tokenizer"
40
+ model = AutoModelForCausalLM.from_pretrained(MODEL_NAME).to(DEVICE)
41
+ tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_NAME)
42
  inputs = tokenizer("Bitcoin is", return_tensors="pt")
43
  inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
44
  out = model.generate(**inputs, max_length=64)