|
|
|
|
|
import gradio as gr |
|
|
from backend import chatbot |
|
|
from langchain_core.messages import HumanMessage, BaseMessage, AIMessage |
|
|
|
|
|
def respond_stream(message, history): |
|
|
|
|
|
messages = [] |
|
|
for human, ai in history: |
|
|
messages.append(HumanMessage(content=human)) |
|
|
messages.append(ai) |
|
|
|
|
|
|
|
|
messages.append(HumanMessage(content=message)) |
|
|
|
|
|
|
|
|
config = {"configurable": {"thread_id": "1"}} |
|
|
full_response = "" |
|
|
|
|
|
|
|
|
for chunk, metadata in chatbot.stream( |
|
|
{"messages": messages}, |
|
|
config=config, |
|
|
stream_mode="messages" |
|
|
): |
|
|
|
|
|
if hasattr(chunk, 'content'): |
|
|
full_response += chunk.content |
|
|
yield full_response |
|
|
elif isinstance(chunk, dict) and 'content' in chunk: |
|
|
full_response += chunk['content'] |
|
|
yield full_response |
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
demo = gr.ChatInterface( |
|
|
fn=respond_stream, |
|
|
|
|
|
|
|
|
title="Streaming ChatBot", |
|
|
description="Chatbot using langgraph backend with streaming output.", |
|
|
examples=[["Hello"], ["How are you?"], ["Tell me a joke."]] |
|
|
) |
|
|
|
|
|
demo.launch(share=True) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |