Implement enhanced UI with better feedback and detailed AI coordination tracking
Browse files
app.py
CHANGED
|
@@ -10,6 +10,7 @@ from utils.config import config
|
|
| 10 |
from core.llm import send_to_ollama, send_to_hf
|
| 11 |
from core.session import session_manager
|
| 12 |
from core.memory import check_redis_health
|
|
|
|
| 13 |
import logging
|
| 14 |
|
| 15 |
# Set up logging
|
|
@@ -23,22 +24,14 @@ if "messages" not in st.session_state:
|
|
| 23 |
st.session_state.messages = []
|
| 24 |
if "last_error" not in st.session_state:
|
| 25 |
st.session_state.last_error = ""
|
| 26 |
-
if "
|
| 27 |
-
st.session_state.
|
| 28 |
-
if "
|
| 29 |
-
st.session_state.
|
| 30 |
-
if "last_ollama_response_preview" not in st.session_state:
|
| 31 |
-
st.session_state.last_ollama_response_preview = ""
|
| 32 |
-
if "last_hf_call_success" not in st.session_state:
|
| 33 |
-
st.session_state.last_hf_call_success = None
|
| 34 |
-
if "last_hf_call_time" not in st.session_state:
|
| 35 |
-
st.session_state.last_hf_call_time = ""
|
| 36 |
-
if "last_hf_response_preview" not in st.session_state:
|
| 37 |
-
st.session_state.last_hf_response_preview = ""
|
| 38 |
|
| 39 |
-
# Sidebar
|
| 40 |
with st.sidebar:
|
| 41 |
-
st.title("AI Life Coach")
|
| 42 |
st.markdown("Your personal AI-powered life development assistant")
|
| 43 |
|
| 44 |
# Model selection
|
|
@@ -61,272 +54,205 @@ with st.sidebar:
|
|
| 61 |
help="Enter the URL to your Ollama server"
|
| 62 |
)
|
| 63 |
|
| 64 |
-
# Conversation
|
| 65 |
st.subheader("Conversation History")
|
| 66 |
-
if st.button("Clear History"):
|
| 67 |
st.session_state.messages = []
|
| 68 |
st.success("History cleared!")
|
| 69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
# Enhanced Debug Panel
|
| 71 |
-
with st.
|
| 72 |
st.subheader("🎛️ System Controls")
|
| 73 |
|
| 74 |
-
#
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
# HF Deep Analysis Toggle
|
| 82 |
-
hf_analysis = st.checkbox(
|
| 83 |
-
"Enable HF Deep Analysis",
|
| 84 |
-
value=bool(config.hf_token),
|
| 85 |
-
help="Enable Hugging Face endpoint for deep analysis"
|
| 86 |
-
)
|
| 87 |
-
|
| 88 |
-
# Web Search Toggle
|
| 89 |
-
web_search = st.checkbox(
|
| 90 |
-
"Enable Web Research",
|
| 91 |
-
value=bool(os.getenv("TAVILY_API_KEY")),
|
| 92 |
-
help="Enable Tavily/DDG web search integration"
|
| 93 |
-
)
|
| 94 |
-
|
| 95 |
-
st.divider()
|
| 96 |
-
|
| 97 |
-
st.subheader("📊 Provider Status")
|
| 98 |
-
|
| 99 |
-
# Ollama Status with Detailed Info
|
| 100 |
-
try:
|
| 101 |
-
from services.ollama_monitor import check_ollama_status
|
| 102 |
-
ollama_status = check_ollama_status()
|
| 103 |
-
if ollama_status.get("running"):
|
| 104 |
-
st.success(f"🦙 Ollama: Running")
|
| 105 |
-
if ollama_status.get("model_loaded"):
|
| 106 |
-
st.caption(f"Model: {ollama_status['model_loaded']}")
|
| 107 |
-
st.caption(f"URL: {ollama_status.get('ngrok_url', 'N/A')}")
|
| 108 |
-
else:
|
| 109 |
-
st.error("🦙 Ollama: Unavailable")
|
| 110 |
-
if st.button("🔄 Refresh Ollama Status", key="refresh_ollama"):
|
| 111 |
-
st.experimental_rerun()
|
| 112 |
-
except Exception as e:
|
| 113 |
-
st.warning(f"🦙 Ollama: Status check failed")
|
| 114 |
|
| 115 |
-
# HF Endpoint Status with
|
| 116 |
try:
|
| 117 |
from services.hf_endpoint_monitor import hf_monitor
|
| 118 |
-
|
| 119 |
|
| 120 |
-
if
|
| 121 |
-
if
|
| 122 |
st.success("🤗 HF Endpoint: Available & Initialized")
|
| 123 |
else:
|
| 124 |
st.warning("🤗 HF Endpoint: Available (Initializing)")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
else:
|
| 126 |
st.error("🤗 HF Endpoint: Scaled to Zero")
|
| 127 |
-
|
| 128 |
-
# Show detailed status
|
| 129 |
-
st.caption(f"Status Code: {hf_status_detail.get('status_code', 'N/A')}")
|
| 130 |
-
if 'response_time' in hf_status_detail:
|
| 131 |
-
st.caption(f"Response Time: {hf_status_detail['response_time']:.2f}s")
|
| 132 |
-
|
| 133 |
-
# Wake-up button for scaled-to-zero endpoints
|
| 134 |
-
if not hf_status_detail['available'] and config.hf_token:
|
| 135 |
-
if st.button("⚡ Wake Up HF Endpoint", key="wake_hf"):
|
| 136 |
-
with st.spinner("Waking up HF endpoint... (2-4 minutes)"):
|
| 137 |
-
success = hf_monitor.handle_scale_to_zero()
|
| 138 |
-
if success:
|
| 139 |
-
st.success("✅ HF endpoint activated!")
|
| 140 |
-
time.sleep(2)
|
| 141 |
-
st.experimental_rerun()
|
| 142 |
-
else:
|
| 143 |
-
st.error("❌ Failed to activate HF endpoint")
|
| 144 |
-
|
| 145 |
-
except Exception as e:
|
| 146 |
-
st.warning(f"🤗 HF Endpoint: Monitor unavailable")
|
| 147 |
-
st.caption(f"Error: {str(e)[:50]}...")
|
| 148 |
-
|
| 149 |
-
# Redis Status
|
| 150 |
-
redis_healthy = check_redis_health()
|
| 151 |
-
if redis_healthy:
|
| 152 |
-
st.success("💾 Redis: Connected")
|
| 153 |
-
else:
|
| 154 |
-
st.error("💾 Redis: Disconnected")
|
| 155 |
-
|
| 156 |
-
st.divider()
|
| 157 |
-
|
| 158 |
-
st.subheader("🌐 External Services")
|
| 159 |
-
|
| 160 |
-
# Web Search Status
|
| 161 |
-
tavily_key = os.getenv("TAVILY_API_KEY")
|
| 162 |
-
if tavily_key:
|
| 163 |
-
st.success("🔍 Web Search: Tavily API Active")
|
| 164 |
-
# Test search button
|
| 165 |
-
if st.button("🧪 Test Web Search", key="test_web_search"):
|
| 166 |
-
try:
|
| 167 |
-
from tavily import TavilyClient
|
| 168 |
-
tavily = TavilyClient(api_key=tavily_key)
|
| 169 |
-
with st.spinner("Testing web search..."):
|
| 170 |
-
test_result = tavily.search("AI life coach benefits", max_results=1)
|
| 171 |
-
st.success("✅ Web search working!")
|
| 172 |
-
except Exception as e:
|
| 173 |
-
st.error(f"❌ Web search test failed: {str(e)[:30]}...")
|
| 174 |
-
else:
|
| 175 |
-
st.info("🔍 Web Search: Not configured")
|
| 176 |
-
|
| 177 |
-
# Weather Service
|
| 178 |
-
if config.openweather_api_key:
|
| 179 |
-
st.success("🌤️ Weather: API Active")
|
| 180 |
-
if st.button("🌡️ Test Weather", key="test_weather"):
|
| 181 |
-
try:
|
| 182 |
-
from services.weather import weather_service
|
| 183 |
-
with st.spinner("Testing weather service..."):
|
| 184 |
-
test_weather = weather_service.get_current_weather("New York")
|
| 185 |
-
if test_weather:
|
| 186 |
-
st.success(f"✅ Weather working! {test_weather['temperature']}°C in New York")
|
| 187 |
-
else:
|
| 188 |
-
st.warning("⚠️ Weather service returned no data")
|
| 189 |
-
except Exception as e:
|
| 190 |
-
st.error(f"❌ Weather test failed: {str(e)[:30]}...")
|
| 191 |
-
else:
|
| 192 |
-
st.info("🌤️ Weather: Not configured")
|
| 193 |
-
|
| 194 |
-
st.divider()
|
| 195 |
-
|
| 196 |
-
st.subheader("📈 Session Statistics")
|
| 197 |
-
|
| 198 |
-
# Session Stats
|
| 199 |
-
try:
|
| 200 |
-
user_session = session_manager.get_session("default_user")
|
| 201 |
-
conversation = user_session.get("conversation", [])
|
| 202 |
-
st.caption(f"💬 Messages: {len(conversation)}")
|
| 203 |
-
|
| 204 |
-
# AI Coordination Stats (if available)
|
| 205 |
-
coord_stats = user_session.get('ai_coordination', {})
|
| 206 |
-
if coord_stats:
|
| 207 |
-
st.caption(f"🤖 AI Requests: {coord_stats.get('requests_processed', 0)}")
|
| 208 |
-
st.caption(f"🦙 Ollama Responses: {coord_stats.get('ollama_responses', 0)}")
|
| 209 |
-
st.caption(f"🤗 HF Responses: {coord_stats.get('hf_responses', 0)}")
|
| 210 |
-
else:
|
| 211 |
-
st.caption("🤖 AI Coordination: Not active")
|
| 212 |
|
| 213 |
except Exception as e:
|
| 214 |
-
st.
|
| 215 |
-
|
| 216 |
-
st.divider()
|
| 217 |
-
|
| 218 |
-
st.subheader("⚙️ Configuration")
|
| 219 |
-
st.caption(f"**Environment**: {'☁️ HF Space' if config.is_hf_space else '🏠 Local'}")
|
| 220 |
-
st.caption(f"**Primary Model**: {config.local_model_name or 'Not set'}")
|
| 221 |
-
|
| 222 |
-
# Feature Flags Summary
|
| 223 |
-
features = []
|
| 224 |
-
if fallback_mode:
|
| 225 |
-
features.append("Fallback")
|
| 226 |
-
if hf_analysis and config.hf_token:
|
| 227 |
-
features.append("HF Deep Analysis")
|
| 228 |
-
if web_search and tavily_key:
|
| 229 |
-
features.append("Web Search")
|
| 230 |
-
if config.openweather_api_key:
|
| 231 |
-
features.append("Weather")
|
| 232 |
-
|
| 233 |
-
if features:
|
| 234 |
-
st.caption(f"**Active Features**: {', '.join(features)}")
|
| 235 |
-
else:
|
| 236 |
-
st.caption("**Active Features**: None")
|
| 237 |
|
| 238 |
-
# Main
|
| 239 |
st.title("🧠 AI Life Coach")
|
| 240 |
st.markdown("Ask me anything about personal development, goal setting, or life advice!")
|
| 241 |
|
| 242 |
-
# Display chat messages
|
| 243 |
-
for message in st.session_state.messages:
|
| 244 |
with st.chat_message(message["role"]):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
st.markdown(message["content"])
|
|
|
|
|
|
|
|
|
|
|
|
|
| 246 |
|
| 247 |
-
#
|
| 248 |
-
col1, col2 = st.columns([
|
| 249 |
with col1:
|
| 250 |
user_input = st.text_input(
|
| 251 |
"Your message...",
|
| 252 |
key="user_message_input",
|
| 253 |
placeholder="Type your message here...",
|
| 254 |
-
label_visibility="collapsed"
|
|
|
|
| 255 |
)
|
| 256 |
with col2:
|
| 257 |
-
send_button = st.button(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 258 |
|
| 259 |
-
|
| 260 |
-
|
|
|
|
|
|
|
|
|
|
| 261 |
with st.chat_message("user"):
|
| 262 |
st.markdown(user_input)
|
| 263 |
|
| 264 |
-
# Add
|
| 265 |
-
st.session_state.messages.append({
|
|
|
|
|
|
|
|
|
|
|
|
|
| 266 |
|
| 267 |
# Reset error state
|
| 268 |
st.session_state.last_error = ""
|
| 269 |
|
| 270 |
-
#
|
| 271 |
-
user_session = session_manager.get_session("default_user")
|
| 272 |
-
conversation = user_session.get("conversation", [])
|
| 273 |
-
conversation_history = conversation[-5:] # Last 5 messages
|
| 274 |
-
conversation_history.append({"role": "user", "content": user_input})
|
| 275 |
-
|
| 276 |
-
# Send to backend
|
| 277 |
with st.chat_message("assistant"):
|
| 278 |
-
|
| 279 |
-
|
| 280 |
-
|
| 281 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 282 |
|
| 283 |
-
|
| 284 |
-
|
| 285 |
-
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
|
| 290 |
-
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
|
| 294 |
-
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
|
| 298 |
-
|
| 299 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 300 |
|
| 301 |
-
#
|
| 302 |
-
|
| 303 |
-
try:
|
| 304 |
-
ai_response = send_to_hf(user_input, conversation_history)
|
| 305 |
-
backend_used = "Hugging Face"
|
| 306 |
-
# Capture success metadata
|
| 307 |
-
st.session_state.last_hf_call_success = True
|
| 308 |
-
st.session_state.last_hf_call_time = str(datetime.utcnow())
|
| 309 |
-
st.session_state.last_hf_response_preview = ai_response[:200] if ai_response else ""
|
| 310 |
-
except Exception as e:
|
| 311 |
-
error_msg = f"Hugging Face error: {str(e)}"
|
| 312 |
-
# Capture failure metadata
|
| 313 |
-
st.session_state.last_hf_call_success = False
|
| 314 |
-
st.session_state.last_hf_call_time = str(datetime.utcnow())
|
| 315 |
-
st.session_state.last_hf_response_preview = str(e)[:200]
|
| 316 |
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 330 |
|
| 331 |
-
#
|
| 332 |
st.experimental_rerun()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
from core.llm import send_to_ollama, send_to_hf
|
| 11 |
from core.session import session_manager
|
| 12 |
from core.memory import check_redis_health
|
| 13 |
+
from core.coordinator import coordinator
|
| 14 |
import logging
|
| 15 |
|
| 16 |
# Set up logging
|
|
|
|
| 24 |
st.session_state.messages = []
|
| 25 |
if "last_error" not in st.session_state:
|
| 26 |
st.session_state.last_error = ""
|
| 27 |
+
if "is_sending" not in st.session_state:
|
| 28 |
+
st.session_state.is_sending = False
|
| 29 |
+
if "current_coordination" not in st.session_state:
|
| 30 |
+
st.session_state.current_coordination = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
+
# Enhanced Sidebar
|
| 33 |
with st.sidebar:
|
| 34 |
+
st.title("AI Life Coach 🧠")
|
| 35 |
st.markdown("Your personal AI-powered life development assistant")
|
| 36 |
|
| 37 |
# Model selection
|
|
|
|
| 54 |
help="Enter the URL to your Ollama server"
|
| 55 |
)
|
| 56 |
|
| 57 |
+
# Enhanced Conversation History
|
| 58 |
st.subheader("Conversation History")
|
| 59 |
+
if st.button("Clear History 🗑️"):
|
| 60 |
st.session_state.messages = []
|
| 61 |
st.success("History cleared!")
|
| 62 |
|
| 63 |
+
# Show conversation stats
|
| 64 |
+
if st.session_state.messages:
|
| 65 |
+
user_msgs = len([m for m in st.session_state.messages if m["role"] == "user"])
|
| 66 |
+
ai_msgs = len([m for m in st.session_state.messages if m["role"] == "assistant"])
|
| 67 |
+
st.caption(f"💬 {user_msgs} user messages, {ai_msgs} AI responses")
|
| 68 |
+
|
| 69 |
# Enhanced Debug Panel
|
| 70 |
+
with st.expander("🔍 Advanced System Monitor", expanded=True):
|
| 71 |
st.subheader("🎛️ System Controls")
|
| 72 |
|
| 73 |
+
# Real-time coordination status
|
| 74 |
+
if st.session_state.current_coordination:
|
| 75 |
+
coord = st.session_state.current_coordination
|
| 76 |
+
st.info(f"🔄 Coordination in progress...")
|
| 77 |
+
st.caption(f"Phase: {coord.get('phase', 'Unknown')}")
|
| 78 |
+
st.caption(f"Status: {coord.get('status', 'Unknown')}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
# HF Endpoint Status with detailed monitoring
|
| 81 |
try:
|
| 82 |
from services.hf_endpoint_monitor import hf_monitor
|
| 83 |
+
hf_status = hf_monitor.check_endpoint_status()
|
| 84 |
|
| 85 |
+
if hf_status['available']:
|
| 86 |
+
if hf_status.get('initialized'):
|
| 87 |
st.success("🤗 HF Endpoint: Available & Initialized")
|
| 88 |
else:
|
| 89 |
st.warning("🤗 HF Endpoint: Available (Initializing)")
|
| 90 |
+
if st.button("⚡ Wake Up HF Endpoint"):
|
| 91 |
+
with st.spinner("Waking up HF endpoint... (2-4 minutes)"):
|
| 92 |
+
success = hf_monitor.handle_scale_to_zero()
|
| 93 |
+
if success:
|
| 94 |
+
st.success("✅ HF endpoint activated!")
|
| 95 |
+
time.sleep(1)
|
| 96 |
+
st.experimental_rerun()
|
| 97 |
else:
|
| 98 |
st.error("🤗 HF Endpoint: Scaled to Zero")
|
| 99 |
+
st.caption(f"Status Code: {hf_status.get('status_code', 'N/A')}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
except Exception as e:
|
| 102 |
+
st.warning("🤗 HF Endpoint: Monitor unavailable")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
|
| 104 |
+
# Enhanced Main Interface
|
| 105 |
st.title("🧠 AI Life Coach")
|
| 106 |
st.markdown("Ask me anything about personal development, goal setting, or life advice!")
|
| 107 |
|
| 108 |
+
# Display chat messages with enhanced formatting
|
| 109 |
+
for i, message in enumerate(st.session_state.messages):
|
| 110 |
with st.chat_message(message["role"]):
|
| 111 |
+
# Add message metadata
|
| 112 |
+
if message["role"] == "assistant":
|
| 113 |
+
# Show which model responded
|
| 114 |
+
if "Note: A more comprehensive analysis" in message["content"]:
|
| 115 |
+
st.caption("🦙 Ollama (initial response)")
|
| 116 |
+
elif "🎯 HF analysis complete" in str(message.get("metadata", "")):
|
| 117 |
+
st.caption("🤗 HF Endpoint (deep analysis)")
|
| 118 |
+
|
| 119 |
st.markdown(message["content"])
|
| 120 |
+
|
| 121 |
+
# Show message timestamp
|
| 122 |
+
if "timestamp" in message:
|
| 123 |
+
st.caption(f"🕒 {message['timestamp']}")
|
| 124 |
|
| 125 |
+
# Enhanced chat input with better feedback
|
| 126 |
+
col1, col2, col3 = st.columns([3, 1, 1])
|
| 127 |
with col1:
|
| 128 |
user_input = st.text_input(
|
| 129 |
"Your message...",
|
| 130 |
key="user_message_input",
|
| 131 |
placeholder="Type your message here...",
|
| 132 |
+
label_visibility="collapsed",
|
| 133 |
+
disabled=st.session_state.is_sending
|
| 134 |
)
|
| 135 |
with col2:
|
| 136 |
+
send_button = st.button(
|
| 137 |
+
"Send 🚀" if not st.session_state.is_sending else "Sending...",
|
| 138 |
+
key="send_message_button",
|
| 139 |
+
use_container_width=True,
|
| 140 |
+
disabled=st.session_state.is_sending or not user_input.strip()
|
| 141 |
+
)
|
| 142 |
+
with col3:
|
| 143 |
+
# Add a "thinking" indicator
|
| 144 |
+
if st.session_state.is_sending:
|
| 145 |
+
st.info("🧠 Thinking...")
|
| 146 |
|
| 147 |
+
# Enhanced message sending with coordination feedback
|
| 148 |
+
if send_button and user_input.strip() and not st.session_state.is_sending:
|
| 149 |
+
st.session_state.is_sending = True
|
| 150 |
+
|
| 151 |
+
# Display user message immediately
|
| 152 |
with st.chat_message("user"):
|
| 153 |
st.markdown(user_input)
|
| 154 |
|
| 155 |
+
# Add to message history with timestamp
|
| 156 |
+
st.session_state.messages.append({
|
| 157 |
+
"role": "user",
|
| 158 |
+
"content": user_input,
|
| 159 |
+
"timestamp": datetime.now().strftime("%H:%M:%S")
|
| 160 |
+
})
|
| 161 |
|
| 162 |
# Reset error state
|
| 163 |
st.session_state.last_error = ""
|
| 164 |
|
| 165 |
+
# Enhanced coordination processing
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 166 |
with st.chat_message("assistant"):
|
| 167 |
+
# Create containers for different response components
|
| 168 |
+
ollama_container = st.empty()
|
| 169 |
+
coordination_status = st.empty()
|
| 170 |
+
hf_thinking_container = st.empty()
|
| 171 |
+
final_response_container = st.empty()
|
| 172 |
+
|
| 173 |
+
try:
|
| 174 |
+
# Use enhanced coordinator with real-time feedback
|
| 175 |
+
import asyncio
|
| 176 |
|
| 177 |
+
async def process_with_feedback():
|
| 178 |
+
user_id = "default_user"
|
| 179 |
+
full_response = ""
|
| 180 |
+
ollama_response = ""
|
| 181 |
+
hf_response = ""
|
| 182 |
+
|
| 183 |
+
# Track coordination phases
|
| 184 |
+
st.session_state.current_coordination = {
|
| 185 |
+
"phase": "starting",
|
| 186 |
+
"status": "Initializing coordination..."
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
async for response_chunk in coordinator.coordinate_hierarchical_conversation(user_id, user_input):
|
| 190 |
+
st.session_state.current_coordination = {
|
| 191 |
+
"phase": response_chunk['type'],
|
| 192 |
+
"status": response_chunk['content'][:100]
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
if response_chunk['type'] == 'initial_response':
|
| 196 |
+
ollama_response = response_chunk['content']
|
| 197 |
+
ollama_container.markdown(f"**🦙 Ollama (Fast Response):**\n\n{ollama_response}")
|
| 198 |
+
full_response = ollama_response
|
| 199 |
+
|
| 200 |
+
elif response_chunk['type'] == 'coordination_status':
|
| 201 |
+
coordination_status.info(f"🤖 {response_chunk['content']}")
|
| 202 |
+
|
| 203 |
+
elif response_chunk['type'] == 'hf_thinking':
|
| 204 |
+
hf_response += response_chunk['content']
|
| 205 |
+
with hf_thinking_container.container():
|
| 206 |
+
st.markdown(f"**🤗 HF Endpoint (Deep Analysis):**")
|
| 207 |
+
st.info(hf_response)
|
| 208 |
+
full_response = hf_response
|
| 209 |
+
|
| 210 |
+
elif response_chunk['type'] == 'final_response':
|
| 211 |
+
hf_response = response_chunk['content']
|
| 212 |
+
final_response_container.markdown(f"**🎯 Final Response:**\n\n{hf_response}")
|
| 213 |
+
full_response = hf_response
|
| 214 |
+
coordination_status.success("✅ Coordination complete!")
|
| 215 |
+
|
| 216 |
+
return full_response
|
| 217 |
|
| 218 |
+
# Run the enhanced processing
|
| 219 |
+
full_response = asyncio.run(process_with_feedback())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
+
# Update session with complete conversation
|
| 222 |
+
user_session = session_manager.get_session("default_user")
|
| 223 |
+
conversation = user_session.get("conversation", [])
|
| 224 |
+
conversation.append({"role": "user", "content": user_input})
|
| 225 |
+
conversation.append({"role": "assistant", "content": full_response})
|
| 226 |
+
user_session["conversation"] = conversation
|
| 227 |
+
session_manager.update_session("default_user", user_session)
|
| 228 |
+
|
| 229 |
+
# Add to message history
|
| 230 |
+
st.session_state.messages.append({
|
| 231 |
+
"role": "assistant",
|
| 232 |
+
"content": full_response,
|
| 233 |
+
"timestamp": datetime.now().strftime("%H:%M:%S"),
|
| 234 |
+
"metadata": {"source": "coordinated_response"}
|
| 235 |
+
})
|
| 236 |
+
|
| 237 |
+
except Exception as e:
|
| 238 |
+
st.error(f"❌ Error: {str(e)}")
|
| 239 |
+
st.session_state.last_error = str(e)
|
| 240 |
+
finally:
|
| 241 |
+
st.session_state.is_sending = False
|
| 242 |
+
st.session_state.current_coordination = None
|
| 243 |
|
| 244 |
+
# Force rerun to update UI
|
| 245 |
st.experimental_rerun()
|
| 246 |
+
|
| 247 |
+
# Add conversation export feature
|
| 248 |
+
if st.session_state.messages and st.sidebar.button("📤 Export Conversation"):
|
| 249 |
+
conversation_text = ""
|
| 250 |
+
for msg in st.session_state.messages:
|
| 251 |
+
conversation_text += f"{msg['role'].upper()}: {msg['content']}\n\n"
|
| 252 |
+
|
| 253 |
+
st.sidebar.download_button(
|
| 254 |
+
label="💾 Download Conversation",
|
| 255 |
+
data=conversation_text,
|
| 256 |
+
file_name=f"ai_life_coach_conversation_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt",
|
| 257 |
+
mime="text/plain"
|
| 258 |
+
)
|
core/__pycache__/coordinator.cpython-313.pyc
CHANGED
|
Binary files a/core/__pycache__/coordinator.cpython-313.pyc and b/core/__pycache__/coordinator.cpython-313.pyc differ
|
|
|
core/__pycache__/session.cpython-313.pyc
CHANGED
|
Binary files a/core/__pycache__/session.cpython-313.pyc and b/core/__pycache__/session.cpython-313.pyc differ
|
|
|
core/coordinator.py
CHANGED
|
@@ -52,66 +52,116 @@ Your role is to:
|
|
| 52 |
|
| 53 |
async def coordinate_hierarchical_conversation(self, user_id: str, user_query: str) -> AsyncGenerator[Dict, None]:
|
| 54 |
"""
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
Yields:
|
| 58 |
-
Dict with 'type' and 'content' fields:
|
| 59 |
-
- {'type': 'initial_response', 'content': str}
|
| 60 |
-
- {'type': 'coordination_status', 'content': str}
|
| 61 |
-
- {'type': 'hf_thinking', 'content': str} # Streaming HF response
|
| 62 |
-
- {'type': 'final_response', 'content': str}
|
| 63 |
"""
|
| 64 |
try:
|
| 65 |
# Get conversation history
|
| 66 |
session = session_manager.get_session(user_id)
|
| 67 |
conversation_history = session.get("conversation", []).copy()
|
| 68 |
|
| 69 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
| 71 |
-
# Step 1: Gather external data
|
| 72 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
external_data = await self._gather_external_data(user_query)
|
| 74 |
|
| 75 |
-
#
|
| 76 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
ollama_response = await self._get_hierarchical_ollama_response(
|
| 78 |
user_query, conversation_history, external_data
|
| 79 |
)
|
| 80 |
|
| 81 |
-
# Send initial response
|
| 82 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 83 |
|
| 84 |
-
# Step 3: Coordinate with
|
| 85 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
-
# Check HF availability
|
| 88 |
hf_available = self._check_hf_availability()
|
| 89 |
if hf_available:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
async for hf_chunk in self._coordinate_hierarchical_hf_response(
|
| 91 |
user_id, user_query, conversation_history,
|
| 92 |
external_data, ollama_response
|
| 93 |
):
|
| 94 |
yield hf_chunk
|
| 95 |
else:
|
| 96 |
-
yield {
|
|
|
|
|
|
|
|
|
|
|
|
|
| 97 |
|
| 98 |
# Final coordination status
|
| 99 |
-
yield {
|
| 100 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
except Exception as e:
|
| 102 |
logger.error(f"Hierarchical coordination failed: {e}")
|
| 103 |
-
yield {
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
conversation_history = session.get("conversation", [])
|
| 109 |
-
fallback_response = await self._get_hierarchical_ollama_response(
|
| 110 |
-
user_query, conversation_history, {}
|
| 111 |
-
)
|
| 112 |
-
yield {'type': 'initial_response', 'content': fallback_response}
|
| 113 |
-
except:
|
| 114 |
-
yield {'type': 'initial_response', 'content': "I'm here to help! What would you like to discuss?"}
|
| 115 |
|
| 116 |
async def _coordinate_hierarchical_hf_response(self, user_id: str, query: str,
|
| 117 |
history: List, external_data: Dict,
|
|
|
|
| 52 |
|
| 53 |
async def coordinate_hierarchical_conversation(self, user_id: str, user_query: str) -> AsyncGenerator[Dict, None]:
|
| 54 |
"""
|
| 55 |
+
Enhanced coordination with detailed tracking and feedback
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
"""
|
| 57 |
try:
|
| 58 |
# Get conversation history
|
| 59 |
session = session_manager.get_session(user_id)
|
| 60 |
conversation_history = session.get("conversation", []).copy()
|
| 61 |
|
| 62 |
+
yield {
|
| 63 |
+
'type': 'coordination_status',
|
| 64 |
+
'content': '🚀 Initiating hierarchical AI coordination...',
|
| 65 |
+
'details': {
|
| 66 |
+
'conversation_length': len(conversation_history),
|
| 67 |
+
'user_query_length': len(user_query)
|
| 68 |
+
}
|
| 69 |
+
}
|
| 70 |
|
| 71 |
+
# Step 1: Gather external data with detailed logging
|
| 72 |
+
yield {
|
| 73 |
+
'type': 'coordination_status',
|
| 74 |
+
'content': '🔍 Gathering external context...',
|
| 75 |
+
'details': {'phase': 'external_data_gathering'}
|
| 76 |
+
}
|
| 77 |
external_data = await self._gather_external_data(user_query)
|
| 78 |
|
| 79 |
+
# Log what external data was gathered
|
| 80 |
+
if external_data:
|
| 81 |
+
data_summary = []
|
| 82 |
+
if 'search_results' in external_data:
|
| 83 |
+
data_summary.append(f"Web search: {len(external_data['search_results'])} results")
|
| 84 |
+
if 'weather' in external_data:
|
| 85 |
+
data_summary.append("Weather data: available")
|
| 86 |
+
if 'current_datetime' in external_data:
|
| 87 |
+
data_summary.append(f"Time: {external_data['current_datetime']}")
|
| 88 |
+
|
| 89 |
+
yield {
|
| 90 |
+
'type': 'coordination_status',
|
| 91 |
+
'content': f'📊 External data gathered: {", ".join(data_summary)}',
|
| 92 |
+
'details': {'external_data_summary': data_summary}
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
# Step 2: Get initial Ollama response
|
| 96 |
+
yield {
|
| 97 |
+
'type': 'coordination_status',
|
| 98 |
+
'content': '🦙 Getting initial response from Ollama...',
|
| 99 |
+
'details': {'phase': 'ollama_response'}
|
| 100 |
+
}
|
| 101 |
ollama_response = await self._get_hierarchical_ollama_response(
|
| 102 |
user_query, conversation_history, external_data
|
| 103 |
)
|
| 104 |
|
| 105 |
+
# Send initial response with context info
|
| 106 |
+
yield {
|
| 107 |
+
'type': 'initial_response',
|
| 108 |
+
'content': ollama_response,
|
| 109 |
+
'details': {
|
| 110 |
+
'response_length': len(ollama_response),
|
| 111 |
+
'external_data_injected': bool(external_data)
|
| 112 |
+
}
|
| 113 |
+
}
|
| 114 |
|
| 115 |
+
# Step 3: Coordinate with HF endpoint
|
| 116 |
+
yield {
|
| 117 |
+
'type': 'coordination_status',
|
| 118 |
+
'content': '🤗 Engaging HF endpoint for deep analysis...',
|
| 119 |
+
'details': {'phase': 'hf_coordination'}
|
| 120 |
+
}
|
| 121 |
|
| 122 |
+
# Check HF availability
|
| 123 |
hf_available = self._check_hf_availability()
|
| 124 |
if hf_available:
|
| 125 |
+
# Show what context will be sent to HF
|
| 126 |
+
context_summary = {
|
| 127 |
+
'conversation_turns': len(conversation_history),
|
| 128 |
+
'ollama_response_length': len(ollama_response),
|
| 129 |
+
'external_data_items': len(external_data) if external_data else 0
|
| 130 |
+
}
|
| 131 |
+
|
| 132 |
+
yield {
|
| 133 |
+
'type': 'coordination_status',
|
| 134 |
+
'content': f'📋 HF context: {len(conversation_history)} conversation turns, Ollama response ({len(ollama_response)} chars)',
|
| 135 |
+
'details': context_summary
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
# Coordinate with HF
|
| 139 |
async for hf_chunk in self._coordinate_hierarchical_hf_response(
|
| 140 |
user_id, user_query, conversation_history,
|
| 141 |
external_data, ollama_response
|
| 142 |
):
|
| 143 |
yield hf_chunk
|
| 144 |
else:
|
| 145 |
+
yield {
|
| 146 |
+
'type': 'coordination_status',
|
| 147 |
+
'content': 'ℹ️ HF endpoint not available - using Ollama response',
|
| 148 |
+
'details': {'hf_available': False}
|
| 149 |
+
}
|
| 150 |
|
| 151 |
# Final coordination status
|
| 152 |
+
yield {
|
| 153 |
+
'type': 'coordination_status',
|
| 154 |
+
'content': '✅ Hierarchical coordination complete',
|
| 155 |
+
'details': {'status': 'complete'}
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
except Exception as e:
|
| 159 |
logger.error(f"Hierarchical coordination failed: {e}")
|
| 160 |
+
yield {
|
| 161 |
+
'type': 'coordination_status',
|
| 162 |
+
'content': f'❌ Coordination error: {str(e)}',
|
| 163 |
+
'details': {'error': str(e)}
|
| 164 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 165 |
|
| 166 |
async def _coordinate_hierarchical_hf_response(self, user_id: str, query: str,
|
| 167 |
history: List, external_data: Dict,
|
core/providers/__pycache__/huggingface.cpython-313.pyc
CHANGED
|
Binary files a/core/providers/__pycache__/huggingface.cpython-313.pyc and b/core/providers/__pycache__/huggingface.cpython-313.pyc differ
|
|
|
services/__pycache__/hf_endpoint_monitor.cpython-313.pyc
CHANGED
|
Binary files a/services/__pycache__/hf_endpoint_monitor.cpython-313.pyc and b/services/__pycache__/hf_endpoint_monitor.cpython-313.pyc differ
|
|
|
test_enhanced_features.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import asyncio
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
# Add project root to path
|
| 6 |
+
project_root = Path(__file__).parent
|
| 7 |
+
sys.path.append(str(project_root))
|
| 8 |
+
|
| 9 |
+
from core.coordinator import coordinator
|
| 10 |
+
from core.session import session_manager
|
| 11 |
+
|
| 12 |
+
async def test_enhanced_features():
|
| 13 |
+
"""Test the enhanced UI and coordination features"""
|
| 14 |
+
print("=== Enhanced Features Test ===")
|
| 15 |
+
print()
|
| 16 |
+
|
| 17 |
+
# Test user query
|
| 18 |
+
user_query = "What are the benefits of meditation for stress management?"
|
| 19 |
+
user_id = "test_user"
|
| 20 |
+
|
| 21 |
+
print(f"User Query: {user_query}")
|
| 22 |
+
print()
|
| 23 |
+
|
| 24 |
+
# Test enhanced coordination with detailed tracking
|
| 25 |
+
print("1. Testing Enhanced Coordination with Detailed Tracking:")
|
| 26 |
+
try:
|
| 27 |
+
print(" Starting enhanced coordination...")
|
| 28 |
+
response_count = 0
|
| 29 |
+
|
| 30 |
+
async for response_chunk in coordinator.coordinate_hierarchical_conversation(user_id, user_query):
|
| 31 |
+
response_count += 1
|
| 32 |
+
print(f" Chunk {response_count}:")
|
| 33 |
+
print(f" Type: {response_chunk['type']}")
|
| 34 |
+
print(f" Content: {response_chunk['content'][:100]}...")
|
| 35 |
+
if 'details' in response_chunk:
|
| 36 |
+
print(f" Details: {response_chunk['details']}")
|
| 37 |
+
print()
|
| 38 |
+
|
| 39 |
+
# Limit output for readability
|
| 40 |
+
if response_count >= 8:
|
| 41 |
+
print(" ... (truncated for brevity)")
|
| 42 |
+
break
|
| 43 |
+
|
| 44 |
+
print(" ✅ Enhanced Coordination Test Passed")
|
| 45 |
+
except Exception as e:
|
| 46 |
+
print(f" ❌ Enhanced Coordination Test Failed: {e}")
|
| 47 |
+
|
| 48 |
+
print()
|
| 49 |
+
|
| 50 |
+
# Test coordination status
|
| 51 |
+
print("2. Testing Coordination Status:")
|
| 52 |
+
try:
|
| 53 |
+
coord_status = coordinator.get_coordination_status()
|
| 54 |
+
print(f" Tavily Available: {coord_status.get('tavily_available', False)}")
|
| 55 |
+
print(f" Weather Available: {coord_status.get('weather_available', False)}")
|
| 56 |
+
print(f" Web Search Enabled: {coord_status.get('web_search_enabled', False)}")
|
| 57 |
+
print(" ✅ Coordination Status Check Passed")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f" ❌ Coordination Status Check Failed: {e}")
|
| 60 |
+
|
| 61 |
+
print()
|
| 62 |
+
print("🎉 Enhanced Features Test Completed!")
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
asyncio.run(test_enhanced_features())
|