Text Generation
Transformers
Safetensors
qwen3
Not-For-All-Audiences
conversational
text-generation-inference
Abhaykoul commited on
Commit
513dca4
·
verified ·
1 Parent(s): 5496097

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +10 -21
README.md CHANGED
@@ -68,36 +68,25 @@ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
68
 
69
  ```python
70
  # Example for complex adult scenario generation
71
- prompt = "Create an intimate scene with emotional depth and explicit content"
72
-
73
- messages = [
74
- {"role": "user", "content": prompt}
75
  ]
76
 
77
- text = tokenizer.apply_chat_template(
78
- messages,
79
- tokenize=False,
80
- add_generation_prompt=True
81
  )
82
 
83
- model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
84
-
85
- # Generate with intermediate thinking enabled
86
- generated_ids = model.generate(
87
- **model_inputs,
88
  max_new_tokens=2048,
89
  temperature=0.7,
90
  top_p=0.9,
91
- do_sample=True,
92
- repetition_penalty=1.1
93
  )
94
 
95
- response = tokenizer.decode(
96
- generated_ids[0][len(model_inputs.input_ids[0]):],
97
- skip_special_tokens=True
98
- )
99
- print(response)
100
- ```
101
 
102
  ---
103
 
 
68
 
69
  ```python
70
  # Example for complex adult scenario generation
71
+ chat = [
72
+ {"role": "user", "content": "hlo"}
 
 
73
  ]
74
 
75
+ inputs = tokenizer.apply_chat_template(
76
+ chat,
77
+ add_generation_prompt=True,
78
+ return_tensors="pt"
79
  )
80
 
81
+ outputs = model.generate(
82
+ inputs,
 
 
 
83
  max_new_tokens=2048,
84
  temperature=0.7,
85
  top_p=0.9,
86
+ do_sample=True
 
87
  )
88
 
89
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
 
 
 
 
 
90
 
91
  ---
92