File size: 1,761 Bytes
6a19530 c9bb5fc 6a19530 c9bb5fc 6a19530 c9bb5fc 6a19530 c9bb5fc 6a19530 c9bb5fc 6a19530 c9bb5fc 6a19530 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
# app.py (or your main Gradio script)
import gradio as gr
from backend import chatbot
from langchain_core.messages import HumanMessage, BaseMessage, AIMessage
def respond_stream(message, history):
# Convert history to LangChain format
messages = []
for human, ai in history:
messages.append(HumanMessage(content=human))
messages.append(ai)
# Add the current message
messages.append(HumanMessage(content=message))
# Stream the response
config = {"configurable": {"thread_id": "1"}}
full_response = ""
# Stream mode 'messages' returns tuples of (message, metadata)
for chunk, metadata in chatbot.stream(
{"messages": messages},
config=config,
stream_mode="messages"
):
# Extract content from the chunk
if hasattr(chunk, 'content'):
full_response += chunk.content
yield full_response
elif isinstance(chunk, dict) and 'content' in chunk:
full_response += chunk['content']
yield full_response
def main():
# Create the Gradio ChatInterface using the streaming function
demo = gr.ChatInterface(
fn=respond_stream, # Use the streaming function
# Note: The 'type' parameter for ChatInterface history format is usually handled internally
# based on the function signature or defaults. Specifying it might be unnecessary or outdated.
title="Streaming ChatBot",
description="Chatbot using langgraph backend with streaming output.",
examples=[["Hello"], ["How are you?"], ["Tell me a joke."]] # Add examples
)
# Launch with share=True to get a public link if needed
demo.launch(share=True)
if __name__ == "__main__":
main() |