meg's picture
meg HF Staff
Update app.py
16090ab verified
import gradio as gr
from huggingface_hub import InferenceClient
# Initialize the InferenceClient
client = InferenceClient()
with gr.Blocks(theme="ocean") as demo:
gr.HTML("""<h1><center> Watermark Your Chats🔒💬</center></h1>
<br><center>Append the <i>Watermark</i> text <b>"-- AI Generated --"</b> to the end of messages copied from the chatbot. You can set this to any useful value.
<br>This helps indicate that the message was generated by an AI model.</center>""")
with gr.Row():
chatbot = gr.Chatbot(type="messages",
label="openai/gpt-oss-20b",
watermark="-- AI Generated --",
show_copy_button=True,
show_copy_all_button=True)
gr.HTML("""
<div style="display: flex; flex-direction: column; align-items: center; justify-content: center; height: 100%; min-height: 400px;">
<div style="font-size: 18px; font-weight: bold; line-height: 1.5;
border: 2px dotted #7de6b7; padding: 25px 35px;
background-color: #edfdf5; text-align: center;">
⬅️ Copy the text from the chatbot using the copy button ⿻ below the message <br><br>And then <i>Paste</i> it into the <i>Clipboard</i> below ⬇️
</div>
</div>
""")
with gr.Row():
with gr.Column():
msg = gr.Textbox(label="User Input", placeholder="Type your message here, then press enter.")
with gr.Column():
clipboard_textbox = gr.TextArea(label="Clipboard", placeholder="Paste the copied text here!" ,interactive=True)
with gr.Row():
clear = gr.ClearButton([msg, chatbot, clipboard_textbox], value="Clear")
with gr.Row():
gr.HTML('<i>Space based on <a href="https://huggingface.co/spaces/ysharma/chatbot_watermark_demo" target="_blank">earlier chatbot watermarking demo from Yuvraj Sharma.</a></i>')
def respond(message, chat_history):
try:
# Build messages list from chat history
messages = []
# Add all previous messages
for msg in chat_history:
messages.append({"role": msg["role"], "content": msg["content"]})
# Add the new user message
messages.append({"role": "user", "content": message})
# Get response from the model
response = client.chat_completion(
messages=messages,
model="openai/gpt-oss-20b",
max_tokens=500,
temperature=0.7,
)
# Extract the assistant's response
bot_message = response.choices[0].message.content
except Exception as e:
# Check if it's a quota/rate limit error
error_msg = str(e).lower()
if "quota" in error_msg or "rate limit" in error_msg or "429" in error_msg or "insufficient" in error_msg:
bot_message = "⚠️ The inference quota has been exhausted. Kindly duplicate this space and configure your HF_TOKEN to continue using the app."
else:
# For other errors, still provide a helpful message
bot_message = f"⚠️ An error occurred: {str(e)}. If this persists, please duplicate the space and configure your HF_TOKEN."
# Update chat history
chat_history.append({"role": "user", "content": message})
chat_history.append({"role": "assistant", "content": bot_message})
return "", chat_history
msg.submit(respond, [msg, chatbot], [msg, chatbot])
if __name__ == "__main__":
demo.launch(debug=False)