|
|
import streamlit as st |
|
|
import time |
|
|
import os |
|
|
import sys |
|
|
from datetime import datetime |
|
|
from pathlib import Path |
|
|
sys.path.append(str(Path(__file__).parent)) |
|
|
|
|
|
from utils.config import config |
|
|
from core.llm import send_to_ollama, send_to_hf |
|
|
from core.session import session_manager |
|
|
from core.memory import check_redis_health |
|
|
|
|
|
st.set_page_config(page_title="AI Life Coach", page_icon="π§ ", layout="wide") |
|
|
|
|
|
|
|
|
if "messages" not in st.session_state: |
|
|
st.session_state.messages = [] |
|
|
|
|
|
if "last_error" not in st.session_state: |
|
|
st.session_state.last_error = "" |
|
|
|
|
|
if "last_ollama_call_success" not in st.session_state: |
|
|
st.session_state.last_ollama_call_success = None |
|
|
|
|
|
if "last_ollama_call_time" not in st.session_state: |
|
|
st.session_state.last_ollama_call_time = "" |
|
|
|
|
|
if "last_ollama_response_preview" not in st.session_state: |
|
|
st.session_state.last_ollama_response_preview = "" |
|
|
|
|
|
if "last_hf_call_success" not in st.session_state: |
|
|
st.session_state.last_hf_call_success = None |
|
|
|
|
|
if "last_hf_call_time" not in st.session_state: |
|
|
st.session_state.last_hf_call_time = "" |
|
|
|
|
|
if "last_hf_response_preview" not in st.session_state: |
|
|
st.session_state.last_hf_response_preview = "" |
|
|
|
|
|
|
|
|
with st.sidebar: |
|
|
st.title("AI Life Coach") |
|
|
st.markdown("Your personal AI-powered life development assistant") |
|
|
|
|
|
|
|
|
model_options = { |
|
|
"Mistral 7B (Local)": "mistral:latest", |
|
|
"Llama 2 7B (Local)": "llama2:latest", |
|
|
"OpenChat 3.5 (Local)": "openchat:latest" |
|
|
} |
|
|
selected_model_name = st.selectbox( |
|
|
"Select Model", |
|
|
options=list(model_options.keys()), |
|
|
index=0 |
|
|
) |
|
|
st.session_state.selected_model = model_options[selected_model_name] |
|
|
|
|
|
|
|
|
st.session_state.ngrok_url = st.text_input( |
|
|
"Ollama Server URL", |
|
|
value=st.session_state.get("ngrok_url", "http://localhost:11434"), |
|
|
help="Enter the URL to your Ollama server" |
|
|
) |
|
|
|
|
|
|
|
|
st.subheader("Conversation History") |
|
|
if st.button("Clear History"): |
|
|
st.session_state.messages = [] |
|
|
st.success("History cleared!") |
|
|
|
|
|
|
|
|
with st.sidebar.expander("π§ Debug Info"): |
|
|
st.write(f"**OLLAMA_HOST**: {st.session_state.ngrok_url}") |
|
|
st.write(f"**Selected Model**: {st.session_state.selected_model}") |
|
|
st.write(f"Fallback Mode: {'β
On' if config.use_fallback else 'β Off'}") |
|
|
st.write(f"Redis Status: {'β
Healthy' if check_redis_health() else 'β οΈ Unavailable'}") |
|
|
st.write(f"Env Detected As: {'βοΈ HF Space' if config.is_hf_space else 'π Local'}") |
|
|
st.write(f"HF Token Set: {'β
Yes' if config.hf_token else 'β No'}") |
|
|
|
|
|
if st.session_state.last_error: |
|
|
st.warning(f"Last Error: {st.session_state.last_error}") |
|
|
|
|
|
|
|
|
if st.session_state.last_ollama_call_success is not None: |
|
|
status_icon = "β
Success" if st.session_state.last_ollama_call_success else "β Failed" |
|
|
st.write(f"Last Ollama Call: {status_icon}") |
|
|
st.write(f"At: {st.session_state.last_ollama_call_time}") |
|
|
if st.session_state.last_ollama_response_preview: |
|
|
st.code(st.session_state.last_ollama_response_preview[:200] + ("..." if len(st.session_state.last_ollama_response_preview) > 200 else ""), language="text") |
|
|
|
|
|
|
|
|
if st.session_state.last_hf_call_success is not None: |
|
|
status_icon = "β
Success" if st.session_state.last_hf_call_success else "β Failed" |
|
|
st.write(f"Last HF Call: {status_icon}") |
|
|
st.write(f"At: {st.session_state.last_hf_call_time}") |
|
|
if st.session_state.last_hf_response_preview: |
|
|
st.code(st.session_state.last_hf_response_preview[:200] + ("..." if len(st.session_state.last_hf_response_preview) > 200 else ""), language="text") |
|
|
|
|
|
|
|
|
if st.button("π Refresh Ollama Status"): |
|
|
from services.ollama_monitor import check_ollama_status |
|
|
status = check_ollama_status() |
|
|
|
|
|
st.sidebar.info(f"Ollama Status: {'Running' if status['running'] else 'Unavailable'}") |
|
|
|
|
|
|
|
|
st.title("π§ AI Life Coach") |
|
|
st.markdown("Ask me anything about personal development, goal setting, or life advice!") |
|
|
|
|
|
|
|
|
for message in st.session_state.messages: |
|
|
with st.chat_message(message["role"]): |
|
|
st.markdown(message["content"]) |
|
|
|
|
|
|
|
|
col1, col2 = st.columns([4, 1]) |
|
|
with col1: |
|
|
user_input = st.text_input( |
|
|
"Your message...", |
|
|
key="user_message_input", |
|
|
placeholder="Type your message here...", |
|
|
label_visibility="collapsed" |
|
|
) |
|
|
with col2: |
|
|
send_button = st.button("Send", key="send_message_button", use_container_width=True) |
|
|
|
|
|
if send_button and user_input.strip(): |
|
|
|
|
|
with st.chat_message("user"): |
|
|
st.markdown(user_input) |
|
|
|
|
|
|
|
|
st.session_state.messages.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
st.session_state.last_error = "" |
|
|
|
|
|
|
|
|
user_session = session_manager.get_session("default_user") |
|
|
conversation = user_session.get("conversation", []) |
|
|
conversation_history = conversation[-5:] |
|
|
conversation_history.append({"role": "user", "content": user_input}) |
|
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
|
with st.spinner("AI Coach is thinking..."): |
|
|
ai_response = None |
|
|
backend_used = "" |
|
|
error_msg = "" |
|
|
|
|
|
|
|
|
if not config.use_fallback: |
|
|
try: |
|
|
ai_response = send_to_ollama( |
|
|
user_input, conversation_history, |
|
|
st.session_state.ngrok_url, st.session_state.selected_model |
|
|
) |
|
|
backend_used = "Ollama" |
|
|
|
|
|
st.session_state.last_ollama_call_success = True |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Ollama error: {str(e)}" |
|
|
|
|
|
st.session_state.last_ollama_call_success = False |
|
|
st.session_state.last_ollama_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_ollama_response_preview = str(e)[:200] |
|
|
|
|
|
|
|
|
if not ai_response and config.hf_token: |
|
|
try: |
|
|
ai_response = send_to_hf(user_input, conversation_history) |
|
|
backend_used = "Hugging Face" |
|
|
|
|
|
st.session_state.last_hf_call_success = True |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else "" |
|
|
except Exception as e: |
|
|
error_msg = f"Hugging Face error: {str(e)}" |
|
|
|
|
|
st.session_state.last_hf_call_success = False |
|
|
st.session_state.last_hf_call_time = str(datetime.utcnow()) |
|
|
st.session_state.last_hf_response_preview = str(e)[:200] |
|
|
|
|
|
if ai_response: |
|
|
st.markdown(f"{ai_response}") |
|
|
|
|
|
conversation.append({"role": "user", "content": user_input}) |
|
|
conversation.append({"role": "assistant", "content": ai_response}) |
|
|
|
|
|
user_session["conversation"] = conversation |
|
|
session_manager.update_session("default_user", user_session) |
|
|
|
|
|
st.session_state.messages.append({"role": "assistant", "content": ai_response}) |
|
|
else: |
|
|
st.error("Failed to get response from both providers.") |
|
|
st.session_state.last_error = error_msg or "No response from either provider" |
|
|
|
|
|
|
|
|
st.experimental_rerun() |
|
|
|