import streamlit as st import time import os import sys from datetime import datetime from pathlib import Path sys.path.append(str(Path(__file__).parent)) from utils.config import config from core.llm import send_to_ollama, send_to_hf from core.session import session_manager from core.memory import check_redis_health import logging # Set up logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) st.set_page_config(page_title="AI Life Coach", page_icon="๐Ÿง ", layout="wide") # Initialize session state if "messages" not in st.session_state: st.session_state.messages = [] if "last_error" not in st.session_state: st.session_state.last_error = "" if "last_ollama_call_success" not in st.session_state: st.session_state.last_ollama_call_success = None if "last_ollama_call_time" not in st.session_state: st.session_state.last_ollama_call_time = "" if "last_ollama_response_preview" not in st.session_state: st.session_state.last_ollama_response_preview = "" if "last_hf_call_success" not in st.session_state: st.session_state.last_hf_call_success = None if "last_hf_call_time" not in st.session_state: st.session_state.last_hf_call_time = "" if "last_hf_response_preview" not in st.session_state: st.session_state.last_hf_response_preview = "" # Sidebar with st.sidebar: st.title("AI Life Coach") st.markdown("Your personal AI-powered life development assistant") # Model selection model_options = { "Mistral 7B (Local)": "mistral:latest", "Llama 2 7B (Local)": "llama2:latest", "OpenChat 3.5 (Local)": "openchat:latest" } selected_model_name = st.selectbox( "Select Model", options=list(model_options.keys()), index=0 ) st.session_state.selected_model = model_options[selected_model_name] # Ollama URL input st.session_state.ngrok_url = st.text_input( "Ollama Server URL", value=st.session_state.get("ngrok_url", "http://localhost:11434"), help="Enter the URL to your Ollama server" ) # Conversation history st.subheader("Conversation History") if st.button("Clear History"): st.session_state.messages = [] st.success("History cleared!") # Enhanced Debug Panel with st.sidebar.expander("๐Ÿ” Advanced System Monitor", expanded=False): st.subheader("๐ŸŽ›๏ธ System Controls") # Fallback Mode Toggle fallback_mode = st.checkbox( "Enable Provider Fallback", value=config.use_fallback, help="Enable automatic fallback between AI providers" ) # HF Deep Analysis Toggle hf_analysis = st.checkbox( "Enable HF Deep Analysis", value=bool(config.hf_token), help="Enable Hugging Face endpoint for deep analysis" ) # Web Search Toggle web_search = st.checkbox( "Enable Web Research", value=bool(os.getenv("TAVILY_API_KEY")), help="Enable Tavily/DDG web search integration" ) st.divider() st.subheader("๐Ÿ“Š Provider Status") # Ollama Status with Detailed Info try: from services.ollama_monitor import check_ollama_status ollama_status = check_ollama_status() if ollama_status.get("running"): st.success(f"๐Ÿฆ™ Ollama: Running") if ollama_status.get("model_loaded"): st.caption(f"Model: {ollama_status['model_loaded']}") st.caption(f"URL: {ollama_status.get('ngrok_url', 'N/A')}") else: st.error("๐Ÿฆ™ Ollama: Unavailable") if st.button("๐Ÿ”„ Refresh Ollama Status", key="refresh_ollama"): st.experimental_rerun() except Exception as e: st.warning(f"๐Ÿฆ™ Ollama: Status check failed") # HF Endpoint Status with Scale-to-Zero Handling try: from services.hf_endpoint_monitor import hf_monitor hf_status_detail = hf_monitor.check_endpoint_status() if hf_status_detail['available']: if hf_status_detail.get('initialized', False): st.success("๐Ÿค— HF Endpoint: Available & Initialized") else: st.warning("๐Ÿค— HF Endpoint: Available (Initializing)") else: st.error("๐Ÿค— HF Endpoint: Scaled to Zero") # Show detailed status st.caption(f"Status Code: {hf_status_detail.get('status_code', 'N/A')}") if 'response_time' in hf_status_detail: st.caption(f"Response Time: {hf_status_detail['response_time']:.2f}s") # Wake-up button for scaled-to-zero endpoints if not hf_status_detail['available'] and config.hf_token: if st.button("โšก Wake Up HF Endpoint", key="wake_hf"): with st.spinner("Waking up HF endpoint... (2-4 minutes)"): success = hf_monitor.handle_scale_to_zero() if success: st.success("โœ… HF endpoint activated!") time.sleep(2) st.experimental_rerun() else: st.error("โŒ Failed to activate HF endpoint") except Exception as e: st.warning(f"๐Ÿค— HF Endpoint: Monitor unavailable") st.caption(f"Error: {str(e)[:50]}...") # Redis Status redis_healthy = check_redis_health() if redis_healthy: st.success("๐Ÿ’พ Redis: Connected") else: st.error("๐Ÿ’พ Redis: Disconnected") st.divider() st.subheader("๐ŸŒ External Services") # Web Search Status tavily_key = os.getenv("TAVILY_API_KEY") if tavily_key: st.success("๐Ÿ” Web Search: Tavily API Active") # Test search button if st.button("๐Ÿงช Test Web Search", key="test_web_search"): try: from tavily import TavilyClient tavily = TavilyClient(api_key=tavily_key) with st.spinner("Testing web search..."): test_result = tavily.search("AI life coach benefits", max_results=1) st.success("โœ… Web search working!") except Exception as e: st.error(f"โŒ Web search test failed: {str(e)[:30]}...") else: st.info("๐Ÿ” Web Search: Not configured") # Weather Service if config.openweather_api_key: st.success("๐ŸŒค๏ธ Weather: API Active") if st.button("๐ŸŒก๏ธ Test Weather", key="test_weather"): try: from services.weather import weather_service with st.spinner("Testing weather service..."): test_weather = weather_service.get_current_weather("New York") if test_weather: st.success(f"โœ… Weather working! {test_weather['temperature']}ยฐC in New York") else: st.warning("โš ๏ธ Weather service returned no data") except Exception as e: st.error(f"โŒ Weather test failed: {str(e)[:30]}...") else: st.info("๐ŸŒค๏ธ Weather: Not configured") st.divider() st.subheader("๐Ÿ“ˆ Session Statistics") # Session Stats try: user_session = session_manager.get_session("default_user") conversation = user_session.get("conversation", []) st.caption(f"๐Ÿ’ฌ Messages: {len(conversation)}") # AI Coordination Stats (if available) coord_stats = user_session.get('ai_coordination', {}) if coord_stats: st.caption(f"๐Ÿค– AI Requests: {coord_stats.get('requests_processed', 0)}") st.caption(f"๐Ÿฆ™ Ollama Responses: {coord_stats.get('ollama_responses', 0)}") st.caption(f"๐Ÿค— HF Responses: {coord_stats.get('hf_responses', 0)}") else: st.caption("๐Ÿค– AI Coordination: Not active") except Exception as e: st.caption("๐Ÿ’ฌ Session: Not initialized") st.divider() st.subheader("โš™๏ธ Configuration") st.caption(f"**Environment**: {'โ˜๏ธ HF Space' if config.is_hf_space else '๐Ÿ  Local'}") st.caption(f"**Primary Model**: {config.local_model_name or 'Not set'}") # Feature Flags Summary features = [] if fallback_mode: features.append("Fallback") if hf_analysis and config.hf_token: features.append("HF Deep Analysis") if web_search and tavily_key: features.append("Web Search") if config.openweather_api_key: features.append("Weather") if features: st.caption(f"**Active Features**: {', '.join(features)}") else: st.caption("**Active Features**: None") # Main chat interface st.title("๐Ÿง  AI Life Coach") st.markdown("Ask me anything about personal development, goal setting, or life advice!") # Display chat messages for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) # Chat input and send button col1, col2 = st.columns([4, 1]) with col1: user_input = st.text_input( "Your message...", key="user_message_input", placeholder="Type your message here...", label_visibility="collapsed" ) with col2: send_button = st.button("Send", key="send_message_button", use_container_width=True) if send_button and user_input.strip(): # Display user message with st.chat_message("user"): st.markdown(user_input) # Add user message to history st.session_state.messages.append({"role": "user", "content": user_input}) # Reset error state st.session_state.last_error = "" # Get conversation history user_session = session_manager.get_session("default_user") conversation = user_session.get("conversation", []) conversation_history = conversation[-5:] # Last 5 messages conversation_history.append({"role": "user", "content": user_input}) # Send to backend with st.chat_message("assistant"): with st.spinner("AI Coach is thinking..."): ai_response = None backend_used = "" error_msg = "" # Try Ollama first if not falling back if not config.use_fallback: try: ai_response = send_to_ollama( user_input, conversation_history, st.session_state.ngrok_url, st.session_state.selected_model ) backend_used = "Ollama" # Capture success metadata st.session_state.last_ollama_call_success = True st.session_state.last_ollama_call_time = str(datetime.utcnow()) st.session_state.last_ollama_response_preview = ai_response[:200] if ai_response else "" except Exception as e: error_msg = f"Ollama error: {str(e)}" # Capture failure metadata st.session_state.last_ollama_call_success = False st.session_state.last_ollama_call_time = str(datetime.utcnow()) st.session_state.last_ollama_response_preview = str(e)[:200] # Fallback to Hugging Face if not ai_response and config.hf_token if not ai_response and config.hf_token: try: ai_response = send_to_hf(user_input, conversation_history) backend_used = "Hugging Face" # Capture success metadata st.session_state.last_hf_call_success = True st.session_state.last_hf_call_time = str(datetime.utcnow()) st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else "" except Exception as e: error_msg = f"Hugging Face error: {str(e)}" # Capture failure metadata st.session_state.last_hf_call_success = False st.session_state.last_hf_call_time = str(datetime.utcnow()) st.session_state.last_hf_response_preview = str(e)[:200] if ai_response: st.markdown(f"{ai_response}") # Update conversation history conversation.append({"role": "user", "content": user_input}) conversation.append({"role": "assistant", "content": ai_response}) # Update session using the correct method user_session["conversation"] = conversation session_manager.update_session("default_user", user_session) # Add assistant response to history st.session_state.messages.append({"role": "assistant", "content": ai_response}) else: st.error("Failed to get response from both providers.") st.session_state.last_error = error_msg or "No response from either provider" # Clear input by forcing rerun st.experimental_rerun()