Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -46,7 +46,7 @@ def inference(query):
|
|
| 46 |
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
|
| 47 |
outputs = model.generate(tokenized_chat, **generation_params)
|
| 48 |
decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
| 49 |
-
assistant_response = decoded_outputs[0].split("<|im_start|>assistant
|
| 50 |
response_ = assistant_response.replace('<|im_end|>', "")
|
| 51 |
return response_
|
| 52 |
# outputs = model.generate(tokenized_chat, **generation_params, streamer=streamer)
|
|
|
|
| 46 |
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to("cuda")
|
| 47 |
outputs = model.generate(tokenized_chat, **generation_params)
|
| 48 |
decoded_outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
| 49 |
+
assistant_response = decoded_outputs[0].split("<|im_start|>assistant")[-1].strip()
|
| 50 |
response_ = assistant_response.replace('<|im_end|>', "")
|
| 51 |
return response_
|
| 52 |
# outputs = model.generate(tokenized_chat, **generation_params, streamer=streamer)
|