rdune71 commited on
Commit
0e216c6
·
1 Parent(s): e9b4a9e
Files changed (3) hide show
  1. app.py +93 -1
  2. core/coordinator.py +79 -0
  3. core/session.py +24 -0
app.py CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
2
  import time
3
  import os
4
  import sys
 
5
  from datetime import datetime
6
  from pathlib import Path
7
  sys.path.append(str(Path(__file__).parent))
@@ -27,6 +28,8 @@ if "is_processing" not in st.session_state:
27
  st.session_state.is_processing = False
28
  if "ngrok_url_temp" not in st.session_state:
29
  st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app")
 
 
30
 
31
  # Sidebar
32
  with st.sidebar:
@@ -130,10 +133,99 @@ st.markdown("Ask me anything about personal development, goal setting, or life a
130
  # Display messages
131
  for message in st.session_state.messages:
132
  with st.chat_message(message["role"]):
133
- st.markdown(message["content"])
 
 
 
 
 
134
  if "timestamp" in message:
135
  st.caption(f"🕒 {message['timestamp']}")
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  # Chat input - FIXED VERSION
138
  user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing)
139
 
 
2
  import time
3
  import os
4
  import sys
5
+ import json
6
  from datetime import datetime
7
  from pathlib import Path
8
  sys.path.append(str(Path(__file__).parent))
 
28
  st.session_state.is_processing = False
29
  if "ngrok_url_temp" not in st.session_state:
30
  st.session_state.ngrok_url_temp = st.session_state.get("ngrok_url", "https://7bcc180dffd1.ngrok-free.app")
31
+ if "manual_hf_requested" not in st.session_state:
32
+ st.session_state.manual_hf_requested = False
33
 
34
  # Sidebar
35
  with st.sidebar:
 
133
  # Display messages
134
  for message in st.session_state.messages:
135
  with st.chat_message(message["role"]):
136
+ # Format HF expert messages differently
137
+ if message.get("source") == "hf_expert":
138
+ st.markdown("**🤖 HF Expert Analysis:**")
139
+ st.markdown(message["content"])
140
+ else:
141
+ st.markdown(message["content"])
142
  if "timestamp" in message:
143
  st.caption(f"🕒 {message['timestamp']}")
144
 
145
+ # Manual HF Analysis Button
146
+ if st.session_state.messages and len(st.session_state.messages) > 0:
147
+ st.divider()
148
+ st.subheader("🎯 Deep Analysis")
149
+
150
+ col1, col2 = st.columns([1, 3])
151
+ with col1:
152
+ if st.button("🧠 Bring HF Expert Into Conversation",
153
+ help="Send entire conversation to HF endpoint for deep analysis",
154
+ key="manual_hf_button",
155
+ disabled=st.session_state.is_processing):
156
+ # Trigger HF analysis
157
+ st.session_state.manual_hf_requested = True
158
+
159
+ # Show HF analysis if requested
160
+ if st.session_state.get("manual_hf_requested", False):
161
+ with st.spinner("🧠 HF Expert joining conversation..."):
162
+ try:
163
+ # Get conversation history
164
+ user_session = session_manager.get_session("default_user")
165
+ conversation_history = user_session.get("conversation", [])
166
+
167
+ # Create HF analysis request
168
+ analysis_prompt = f"""
169
+ You are joining an ongoing conversation as a deep analysis expert.
170
+ Please analyze the entire conversation history and provide:
171
+ 1. Key themes and patterns
172
+ 2. Deeper insights on topics discussed
173
+ 3. Suggestions for next steps
174
+ 4. Any research needs (specify if web search needed)
175
+
176
+ Conversation History:
177
+ """
178
+
179
+ # Get HF provider
180
+ from core.llm_factory import llm_factory
181
+ hf_provider = llm_factory.get_provider('huggingface')
182
+
183
+ if hf_provider:
184
+ # Prepare messages for HF
185
+ hf_messages = [
186
+ {"role": "system", "content": analysis_prompt}
187
+ ]
188
+
189
+ # Add conversation history
190
+ for msg in conversation_history[-10:]: # Last 10 messages for context
191
+ hf_messages.append({
192
+ "role": msg["role"],
193
+ "content": msg["content"]
194
+ })
195
+
196
+ # Generate deep analysis
197
+ hf_response = hf_provider.generate(
198
+ analysis_prompt,
199
+ hf_messages
200
+ )
201
+
202
+ if hf_response:
203
+ # Display HF expert response
204
+ with st.chat_message("assistant"):
205
+ st.markdown("**🤖 HF Expert Analysis:**")
206
+ st.markdown(hf_response)
207
+
208
+ # Add to message history
209
+ st.session_state.messages.append({
210
+ "role": "assistant",
211
+ "content": hf_response,
212
+ "timestamp": datetime.now().strftime("%H:%M:%S"),
213
+ "source": "hf_expert"
214
+ })
215
+
216
+ st.session_state.manual_hf_requested = False
217
+ else:
218
+ st.error("❌ HF analysis failed - no response")
219
+ else:
220
+ st.error("❌ HF provider not available")
221
+
222
+ except Exception as e:
223
+ st.error(f"❌ HF analysis failed: {str(e)}")
224
+ finally:
225
+ st.session_state.manual_hf_requested = False
226
+ time.sleep(0.5)
227
+ st.experimental_rerun()
228
+
229
  # Chat input - FIXED VERSION
230
  user_input = st.chat_input("Type your message here...", disabled=st.session_state.is_processing)
231
 
core/coordinator.py CHANGED
@@ -50,6 +50,85 @@ Your role is to:
50
  7. Provide authoritative answers that take precedence"""
51
  }
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  async def coordinate_hierarchical_conversation(self, user_id: str, user_query: str) -> AsyncGenerator[Dict, None]:
54
  """
55
  Enhanced coordination with detailed tracking and feedback
 
50
  7. Provide authoritative answers that take precedence"""
51
  }
52
 
53
+ def determine_web_search_needs(self, conversation_history: List[Dict]) -> Dict:
54
+ """Determine if web search is needed based on conversation content"""
55
+ needs_search = False
56
+ search_topics = []
57
+
58
+ # Extract key topics from conversation
59
+ conversation_text = " ".join([msg.get("content", "") for msg in conversation_history])
60
+
61
+ # Topics that typically need current information
62
+ current_info_topics = [
63
+ "news", "current events", "latest", "recent", "today",
64
+ "weather", "stock", "price", "trend", "update",
65
+ "breaking", "new development", "recent study"
66
+ ]
67
+
68
+ # Check if conversation contains time-sensitive topics
69
+ for topic in current_info_topics:
70
+ if topic in conversation_text.lower():
71
+ needs_search = True
72
+ search_topics.append(topic)
73
+
74
+ return {
75
+ "needs_search": needs_search,
76
+ "search_topics": search_topics,
77
+ "reasoning": f"Found topics requiring current info: {', '.join(search_topics)}" if search_topics else "No current info needed"
78
+ }
79
+
80
+ async def manual_hf_analysis(self, user_id: str, conversation_history: List[Dict]) -> str:
81
+ """Perform manual HF analysis with web search integration"""
82
+ try:
83
+ # Determine if web search is needed
84
+ search_decision = self.determine_web_search_needs(conversation_history)
85
+
86
+ # Prepare enhanced prompt for HF
87
+ prompt = f"""
88
+ You are a deep analysis expert joining an ongoing conversation.
89
+ Conversation participants want your expert insights.
90
+
91
+ Web Search Decision: {search_decision['reasoning']}
92
+
93
+ Please provide:
94
+ 1. Deep insights on the conversation themes
95
+ 2. Any research needs (specify if web search needed)
96
+ 3. Strategic recommendations
97
+ 4. Questions to explore further
98
+
99
+ Conversation History:
100
+ """
101
+
102
+ # Add web search if needed
103
+ if search_decision["needs_search"]:
104
+ prompt += "\n\n[Note: Web search will be performed for current information]"
105
+
106
+ # Get HF provider
107
+ from core.llm_factory import llm_factory
108
+ hf_provider = llm_factory.get_provider('huggingface')
109
+
110
+ if hf_provider:
111
+ # Prepare messages for HF with full context
112
+ hf_messages = [
113
+ {"role": "system", "content": prompt}
114
+ ]
115
+
116
+ # Add conversation history
117
+ for msg in conversation_history[-15:]: # Last 15 messages for rich context
118
+ hf_messages.append({
119
+ "role": msg["role"],
120
+ "content": msg["content"]
121
+ })
122
+
123
+ # Generate response with full 8192 token capacity
124
+ response = hf_provider.generate(prompt, hf_messages)
125
+ return response
126
+ else:
127
+ return "❌ HF provider not available for deep analysis"
128
+
129
+ except Exception as e:
130
+ return f"❌ HF analysis failed: {str(e)}"
131
+
132
  async def coordinate_hierarchical_conversation(self, user_id: str, user_query: str) -> AsyncGenerator[Dict, None]:
133
  """
134
  Enhanced coordination with detailed tracking and feedback
core/session.py CHANGED
@@ -182,6 +182,30 @@ class SessionManager:
182
  logger.error(f"Error updating hierarchical coordination for user {user_id}: {e}")
183
  return False
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  def get_hierarchical_stats(self, user_id: str) -> Dict:
186
  """Get hierarchical coordination statistics"""
187
  try:
 
182
  logger.error(f"Error updating hierarchical coordination for user {user_id}: {e}")
183
  return False
184
 
185
+ def add_hf_expert_to_conversation(self, user_id: str, hf_analysis: str) -> bool:
186
+ """Add HF expert participation to conversation history"""
187
+ try:
188
+ session = self.get_session(user_id)
189
+
190
+ # Add HF expert entry to conversation
191
+ hf_entry = {
192
+ "role": "assistant",
193
+ "content": hf_analysis,
194
+ "timestamp": datetime.now().isoformat(),
195
+ "type": "hf_expert_analysis"
196
+ }
197
+
198
+ conversation = session.get("conversation", [])
199
+ conversation.append(hf_entry)
200
+ session["conversation"] = conversation
201
+
202
+ # Update session
203
+ return self.update_session(user_id, session)
204
+
205
+ except Exception as e:
206
+ logger.error(f"Error adding HF expert to conversation: {e}")
207
+ return False
208
+
209
  def get_hierarchical_stats(self, user_id: str) -> Dict:
210
  """Get hierarchical coordination statistics"""
211
  try: