# backend.py from langgraph.graph import StateGraph, START, END from typing import TypedDict, Annotated, List from dotenv import load_dotenv from langgraph.checkpoint.memory import InMemorySaver from langchain_google_genai import ChatGoogleGenerativeAI from langchain_core.messages import BaseMessage, HumanMessage from langgraph.graph.message import add_messages load_dotenv() THREAD_ID = "1" gen_llm = ChatGoogleGenerativeAI(model='gemini-2.5-flash') class ChatBotState(TypedDict): messages: Annotated[List[BaseMessage], add_messages] def chat_node(state: ChatBotState) -> ChatBotState: messages = state['messages'] response = gen_llm.invoke(messages) # depending on design, maybe response returns BaseMessage or list return {'messages': [response]} # build graph graph = StateGraph(ChatBotState) graph.add_node('chat_node', chat_node) graph.add_edge(START, 'chat_node') graph.add_edge('chat_node', END) checkpointer = InMemorySaver() chatbot = graph.compile(checkpointer=checkpointer) def backend_chat(history: List[BaseMessage], user_text: str) -> List[BaseMessage]: if history is None: history = [] history.append(HumanMessage(content=user_text)) state = {'messages': history} config = { "configurable": { "thread_id": THREAD_ID } } result_state = chatbot.invoke(state, config=config) new_messages = result_state.get('messages', []) # append new messages (assistant responses) to history history.extend(new_messages) return history