pandora-s Rocketknight1 HF Staff commited on
Commit
e17a136
·
verified ·
1 Parent(s): 7d30db4

Clean up tool use snippet (#75)

Browse files

- Clean up tool use snippet (897a9a2cb569478d9809939ba60de59e32421f01)


Co-authored-by: Matthew Carrigan <[email protected]>

Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -235,18 +235,18 @@ def get_current_weather(location: str, format: str):
235
  conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
236
  tools = [get_current_weather]
237
 
238
- # render the tool use prompt as a string:
239
- tool_use_prompt = tokenizer.apply_chat_template(
240
  conversation,
241
  tools=tools,
242
- tokenize=False,
243
  add_generation_prompt=True,
 
 
244
  )
245
 
246
- inputs = tokenizer(tool_use_prompt, return_tensors="pt")
247
-
248
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
249
 
 
250
  outputs = model.generate(**inputs, max_new_tokens=1000)
251
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
252
  ```
 
235
  conversation = [{"role": "user", "content": "What's the weather like in Paris?"}]
236
  tools = [get_current_weather]
237
 
238
+ # format and tokenize the tool use prompt
239
+ inputs = tokenizer.apply_chat_template(
240
  conversation,
241
  tools=tools,
 
242
  add_generation_prompt=True,
243
+ return_dict=True,
244
+ return_tensors="pt",
245
  )
246
 
 
 
247
  model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="auto")
248
 
249
+ inputs.to(model.device)
250
  outputs = model.generate(**inputs, max_new_tokens=1000)
251
  print(tokenizer.decode(outputs[0], skip_special_tokens=True))
252
  ```