Implement AI Mentor/Sidekick architecture with HF Expert and Ollama Mentor
Browse files- src/llm/factory.py +8 -8
- src/llm/mentor_provider.py +269 -0
- src/ui/chat_handler.py +17 -17
src/llm/factory.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import logging
|
| 2 |
from typing import Optional
|
| 3 |
from src.llm.base_provider import LLMProvider
|
| 4 |
-
from src.llm.
|
| 5 |
from src.llm.hf_provider import HuggingFaceProvider
|
| 6 |
from src.llm.ollama_provider import OllamaProvider
|
| 7 |
from utils.config import config
|
|
@@ -14,7 +14,7 @@ class ProviderNotAvailableError(Exception):
|
|
| 14 |
pass
|
| 15 |
|
| 16 |
class LLMFactory:
|
| 17 |
-
"""Factory for creating LLM providers with
|
| 18 |
|
| 19 |
_instance = None
|
| 20 |
|
|
@@ -26,16 +26,16 @@ class LLMFactory:
|
|
| 26 |
def get_provider(self, preferred_provider: Optional[str] = None) -> LLMProvider:
|
| 27 |
"""
|
| 28 |
Get an LLM provider based on preference and availability.
|
| 29 |
-
Default:
|
| 30 |
"""
|
| 31 |
try:
|
| 32 |
-
#
|
| 33 |
-
logger.info("Initializing
|
| 34 |
-
return
|
| 35 |
-
model_name="
|
| 36 |
)
|
| 37 |
except Exception as e:
|
| 38 |
-
logger.warning(f"Failed to initialize
|
| 39 |
|
| 40 |
# Fallback to individual providers
|
| 41 |
if config.hf_token:
|
|
|
|
| 1 |
import logging
|
| 2 |
from typing import Optional
|
| 3 |
from src.llm.base_provider import LLMProvider
|
| 4 |
+
from src.llm.mentor_provider import MentorProvider
|
| 5 |
from src.llm.hf_provider import HuggingFaceProvider
|
| 6 |
from src.llm.ollama_provider import OllamaProvider
|
| 7 |
from utils.config import config
|
|
|
|
| 14 |
pass
|
| 15 |
|
| 16 |
class LLMFactory:
|
| 17 |
+
"""Factory for creating LLM providers with mentor approach"""
|
| 18 |
|
| 19 |
_instance = None
|
| 20 |
|
|
|
|
| 26 |
def get_provider(self, preferred_provider: Optional[str] = None) -> LLMProvider:
|
| 27 |
"""
|
| 28 |
Get an LLM provider based on preference and availability.
|
| 29 |
+
Default: Mentor approach (HF expert + Ollama mentor)
|
| 30 |
"""
|
| 31 |
try:
|
| 32 |
+
# Try mentor provider first (HF expert + Ollama mentor)
|
| 33 |
+
logger.info("Initializing Mentor Provider (HF Expert + Ollama Mentor)")
|
| 34 |
+
return MentorProvider(
|
| 35 |
+
model_name="mentor_model"
|
| 36 |
)
|
| 37 |
except Exception as e:
|
| 38 |
+
logger.warning(f"Failed to initialize Mentor provider: {e}")
|
| 39 |
|
| 40 |
# Fallback to individual providers
|
| 41 |
if config.hf_token:
|
src/llm/mentor_provider.py
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import logging
|
| 3 |
+
from typing import List, Dict, Optional, Union, Tuple
|
| 4 |
+
from src.llm.base_provider import LLMProvider
|
| 5 |
+
from src.llm.hf_provider import HuggingFaceProvider
|
| 6 |
+
from src.llm.ollama_provider import OllamaProvider
|
| 7 |
+
from core.session import session_manager
|
| 8 |
+
from utils.config import config
|
| 9 |
+
|
| 10 |
+
logger = logging.getLogger(__name__)
|
| 11 |
+
|
| 12 |
+
class MentorProvider(LLMProvider):
|
| 13 |
+
"""Mentor provider that uses HF as expert and Ollama as mentor/coach"""
|
| 14 |
+
|
| 15 |
+
def __init__(self, model_name: str, timeout: int = 120, max_retries: int = 2):
|
| 16 |
+
super().__init__(model_name, timeout, max_retries)
|
| 17 |
+
self.hf_provider = None
|
| 18 |
+
self.ollama_provider = None
|
| 19 |
+
self.conversation_analyzer = ConversationAnalyzer()
|
| 20 |
+
|
| 21 |
+
# Initialize providers
|
| 22 |
+
try:
|
| 23 |
+
if config.hf_token:
|
| 24 |
+
self.hf_provider = HuggingFaceProvider(
|
| 25 |
+
model_name="DavidAU/OpenAi-GPT-oss-20b-abliterated-uncensored-NEO-Imatrix-gguf",
|
| 26 |
+
timeout=120
|
| 27 |
+
)
|
| 28 |
+
except Exception as e:
|
| 29 |
+
logger.warning(f"Failed to initialize HF provider: {e}")
|
| 30 |
+
|
| 31 |
+
try:
|
| 32 |
+
if config.ollama_host:
|
| 33 |
+
self.ollama_provider = OllamaProvider(
|
| 34 |
+
model_name=config.local_model_name,
|
| 35 |
+
timeout=60
|
| 36 |
+
)
|
| 37 |
+
except Exception as e:
|
| 38 |
+
logger.warning(f"Failed to initialize Ollama provider: {e}")
|
| 39 |
+
|
| 40 |
+
def generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 41 |
+
"""Generate response using mentor approach"""
|
| 42 |
+
try:
|
| 43 |
+
# Step 1: Get expert response from HF Endpoint
|
| 44 |
+
hf_response = self._get_expert_response(prompt, conversation_history)
|
| 45 |
+
|
| 46 |
+
if not hf_response:
|
| 47 |
+
raise Exception("HF Endpoint expert failed to provide response")
|
| 48 |
+
|
| 49 |
+
# Step 2: Get mentor analysis from Ollama
|
| 50 |
+
mentor_insights = self._get_mentor_analysis(
|
| 51 |
+
prompt,
|
| 52 |
+
hf_response,
|
| 53 |
+
conversation_history
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
# Step 3: Combine expert response with mentor insights
|
| 57 |
+
combined_response = self._combine_responses(hf_response, mentor_insights)
|
| 58 |
+
|
| 59 |
+
# Step 4: Store interaction for learning
|
| 60 |
+
self._store_interaction(prompt, hf_response, mentor_insights)
|
| 61 |
+
|
| 62 |
+
return combined_response
|
| 63 |
+
|
| 64 |
+
except Exception as e:
|
| 65 |
+
logger.error(f"Mentor generation failed: {e}")
|
| 66 |
+
|
| 67 |
+
# Fallback to HF only
|
| 68 |
+
if self.hf_provider:
|
| 69 |
+
try:
|
| 70 |
+
logger.info("Falling back to HF Endpoint only")
|
| 71 |
+
return self.hf_provider.generate(prompt, conversation_history)
|
| 72 |
+
except Exception as fallback_error:
|
| 73 |
+
logger.error(f"HF fallback also failed: {fallback_error}")
|
| 74 |
+
|
| 75 |
+
raise Exception(f"All providers failed: {str(e)}")
|
| 76 |
+
|
| 77 |
+
def stream_generate(self, prompt: str, conversation_history: List[Dict]) -> Optional[Union[str, List[str]]]:
|
| 78 |
+
"""Stream response using mentor approach"""
|
| 79 |
+
try:
|
| 80 |
+
# For streaming, we'll stream HF response and add mentor insights at the end
|
| 81 |
+
if self.hf_provider:
|
| 82 |
+
hf_stream = self.hf_provider.stream_generate(prompt, conversation_history)
|
| 83 |
+
return hf_stream
|
| 84 |
+
else:
|
| 85 |
+
raise Exception("No HF provider available for streaming")
|
| 86 |
+
except Exception as e:
|
| 87 |
+
logger.error(f"Mentor stream generation failed: {e}")
|
| 88 |
+
raise
|
| 89 |
+
|
| 90 |
+
def _get_expert_response(self, prompt: str, conversation_history: List[Dict]) -> Optional[str]:
|
| 91 |
+
"""Get expert response from HF Endpoint"""
|
| 92 |
+
if not self.hf_provider:
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
try:
|
| 96 |
+
logger.info("π€ Getting expert response from HF Endpoint...")
|
| 97 |
+
response = self.hf_provider.generate(prompt, conversation_history)
|
| 98 |
+
logger.info("β
HF Endpoint expert response received")
|
| 99 |
+
return response
|
| 100 |
+
except Exception as e:
|
| 101 |
+
logger.error(f"HF Endpoint expert failed: {e}")
|
| 102 |
+
return None
|
| 103 |
+
|
| 104 |
+
def _get_mentor_analysis(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> Dict:
|
| 105 |
+
"""Get mentor analysis and suggestions from Ollama"""
|
| 106 |
+
if not self.ollama_provider:
|
| 107 |
+
return {}
|
| 108 |
+
|
| 109 |
+
try:
|
| 110 |
+
logger.info("π± Getting mentor analysis from Ollama...")
|
| 111 |
+
|
| 112 |
+
# Create mentor prompt for analysis
|
| 113 |
+
mentor_prompt = self._create_mentor_prompt(user_prompt, hf_response, conversation_history)
|
| 114 |
+
|
| 115 |
+
# Get mentor insights
|
| 116 |
+
mentor_response = self.ollama_provider.generate(mentor_prompt, [])
|
| 117 |
+
|
| 118 |
+
# Parse mentor response into structured insights
|
| 119 |
+
insights = self.conversation_analyzer.parse_mentor_response(mentor_response)
|
| 120 |
+
|
| 121 |
+
logger.info("β
Ollama mentor analysis completed")
|
| 122 |
+
return insights
|
| 123 |
+
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.warning(f"Ollama mentor analysis failed: {e}")
|
| 126 |
+
return {}
|
| 127 |
+
|
| 128 |
+
def _create_mentor_prompt(self, user_prompt: str, hf_response: str, conversation_history: List[Dict]) -> str:
|
| 129 |
+
"""Create prompt for Ollama mentor to analyze interaction"""
|
| 130 |
+
conversation_context = "\n".join([
|
| 131 |
+
f"{msg['role']}: {msg['content']}"
|
| 132 |
+
for msg in conversation_history[-5:] # Last 5 messages for context
|
| 133 |
+
])
|
| 134 |
+
|
| 135 |
+
prompt = f"""
|
| 136 |
+
You are an AI mentor and conversation analyst. Your job is to analyze the interaction between a user and an expert AI, then provide insightful guidance.
|
| 137 |
+
|
| 138 |
+
ANALYZE THIS INTERACTION:
|
| 139 |
+
User Question: "{user_prompt}"
|
| 140 |
+
Expert Response: "{hf_response}"
|
| 141 |
+
|
| 142 |
+
Recent Conversation Context:
|
| 143 |
+
{conversation_context}
|
| 144 |
+
|
| 145 |
+
PROVIDE YOUR ANALYSIS IN THIS FORMAT:
|
| 146 |
+
|
| 147 |
+
<thinking_analysis>
|
| 148 |
+
Analyze the expert's reasoning approach, depth of analysis, and problem-solving methodology.
|
| 149 |
+
</thinking_analysis>
|
| 150 |
+
|
| 151 |
+
<goal_progress>
|
| 152 |
+
Assess how well this response advances toward the user's likely goals based on conversation history.
|
| 153 |
+
</goal_progress>
|
| 154 |
+
|
| 155 |
+
<follow_up_suggestions>
|
| 156 |
+
Provide 2-3 thoughtful follow-up questions or research directions that would deepen understanding.
|
| 157 |
+
</follow_up_suggestions>
|
| 158 |
+
|
| 159 |
+
<data_gathering>
|
| 160 |
+
Suggest what additional information or data would be valuable to collect.
|
| 161 |
+
</data_gathering>
|
| 162 |
+
|
| 163 |
+
<critical_insights>
|
| 164 |
+
Highlight any key insights, potential blind spots, or areas needing further exploration.
|
| 165 |
+
</critical_insights>
|
| 166 |
+
|
| 167 |
+
Keep your analysis concise but insightful. Focus on helping the user achieve their goals through better questioning and information gathering.
|
| 168 |
+
"""
|
| 169 |
+
return prompt
|
| 170 |
+
|
| 171 |
+
def _combine_responses(self, hf_response: str, mentor_insights: Dict) -> str:
|
| 172 |
+
"""Combine expert response with mentor insights"""
|
| 173 |
+
if not mentor_insights:
|
| 174 |
+
return hf_response
|
| 175 |
+
|
| 176 |
+
# Format mentor insights nicely
|
| 177 |
+
insights_section = "\n\n--- π Mentor Insights ---\n"
|
| 178 |
+
|
| 179 |
+
if mentor_insights.get('thinking_analysis'):
|
| 180 |
+
insights_section += f"\nπ§ **Thinking Analysis**\n{mentor_insights['thinking_analysis']}"
|
| 181 |
+
|
| 182 |
+
if mentor_insights.get('goal_progress'):
|
| 183 |
+
insights_section += f"\n\nπ― **Goal Progress**\n{mentor_insights['goal_progress']}"
|
| 184 |
+
|
| 185 |
+
if mentor_insights.get('follow_up_suggestions'):
|
| 186 |
+
insights_section += f"\n\nπ€ **Follow-up Suggestions**\n{mentor_insights['follow_up_suggestions']}"
|
| 187 |
+
|
| 188 |
+
if mentor_insights.get('data_gathering'):
|
| 189 |
+
insights_section += f"\n\nπ **Data to Gather**\n{mentor_insights['data_gathering']}"
|
| 190 |
+
|
| 191 |
+
if mentor_insights.get('critical_insights'):
|
| 192 |
+
insights_section += f"\n\nπ‘ **Critical Insights**\n{mentor_insights['critical_insights']}"
|
| 193 |
+
|
| 194 |
+
return f"{hf_response}{insights_section}"
|
| 195 |
+
|
| 196 |
+
def _store_interaction(self, user_prompt: str, hf_response: str, mentor_insights: Dict):
|
| 197 |
+
"""Store interaction for learning and pattern recognition"""
|
| 198 |
+
try:
|
| 199 |
+
user_session = session_manager.get_session("default_user")
|
| 200 |
+
interaction_log = user_session.get("interaction_log", [])
|
| 201 |
+
|
| 202 |
+
# Create interaction record
|
| 203 |
+
interaction = {
|
| 204 |
+
"timestamp": time.time(),
|
| 205 |
+
"user_prompt": user_prompt,
|
| 206 |
+
"expert_response": hf_response,
|
| 207 |
+
"mentor_insights": mentor_insights,
|
| 208 |
+
"conversation_length": len(interaction_log)
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
# Keep last 20 interactions
|
| 212 |
+
interaction_log.append(interaction)
|
| 213 |
+
if len(interaction_log) > 20:
|
| 214 |
+
interaction_log = interaction_log[-20:]
|
| 215 |
+
|
| 216 |
+
user_session["interaction_log"] = interaction_log
|
| 217 |
+
session_manager.update_session("default_user", user_session)
|
| 218 |
+
|
| 219 |
+
except Exception as e:
|
| 220 |
+
logger.warning(f"Failed to store interaction: {e}")
|
| 221 |
+
|
| 222 |
+
class ConversationAnalyzer:
|
| 223 |
+
"""Analyzes conversation patterns and provides insights"""
|
| 224 |
+
|
| 225 |
+
def parse_mentor_response(self, mentor_response: str) -> Dict:
|
| 226 |
+
"""Parse mentor response into structured insights"""
|
| 227 |
+
if not mentor_response:
|
| 228 |
+
return {}
|
| 229 |
+
|
| 230 |
+
insights = {}
|
| 231 |
+
|
| 232 |
+
# Extract sections using simple parsing
|
| 233 |
+
sections = {
|
| 234 |
+
'thinking_analysis': self._extract_section(mentor_response, 'thinking_analysis'),
|
| 235 |
+
'goal_progress': self._extract_section(mentor_response, 'goal_progress'),
|
| 236 |
+
'follow_up_suggestions': self._extract_section(mentor_response, 'follow_up_suggestions'),
|
| 237 |
+
'data_gathering': self._extract_section(mentor_response, 'data_gathering'),
|
| 238 |
+
'critical_insights': self._extract_section(mentor_response, 'critical_insights')
|
| 239 |
+
}
|
| 240 |
+
|
| 241 |
+
# Clean up sections
|
| 242 |
+
for key, value in sections.items():
|
| 243 |
+
if value:
|
| 244 |
+
# Remove markdown and clean text
|
| 245 |
+
cleaned = value.strip()
|
| 246 |
+
if cleaned:
|
| 247 |
+
insights[key] = cleaned
|
| 248 |
+
|
| 249 |
+
return insights
|
| 250 |
+
|
| 251 |
+
def _extract_section(self, text: str, section_name: str) -> Optional[str]:
|
| 252 |
+
"""Extract specific section from mentor response"""
|
| 253 |
+
start_tag = f"<{section_name}>"
|
| 254 |
+
end_tag = f"</{section_name}>"
|
| 255 |
+
|
| 256 |
+
start_idx = text.find(start_tag)
|
| 257 |
+
if start_idx == -1:
|
| 258 |
+
return None
|
| 259 |
+
|
| 260 |
+
start_idx += len(start_tag)
|
| 261 |
+
end_idx = text.find(end_tag, start_idx)
|
| 262 |
+
|
| 263 |
+
if end_idx == -1:
|
| 264 |
+
return None
|
| 265 |
+
|
| 266 |
+
return text[start_idx:end_idx].strip()
|
| 267 |
+
|
| 268 |
+
# Global instance
|
| 269 |
+
mentor_provider = MentorProvider("mentor_model")
|
src/ui/chat_handler.py
CHANGED
|
@@ -8,7 +8,7 @@ from core.session import session_manager
|
|
| 8 |
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
class ChatHandler:
|
| 11 |
-
"""Handles chat interactions with
|
| 12 |
|
| 13 |
def __init__(self):
|
| 14 |
self.is_processing = False
|
|
@@ -53,7 +53,7 @@ class ChatHandler:
|
|
| 53 |
st.session_state.last_processed_message = ""
|
| 54 |
|
| 55 |
def process_ai_response(self, user_input: str, selected_model: str):
|
| 56 |
-
"""Process AI response with
|
| 57 |
if not user_input or not user_input.strip():
|
| 58 |
return
|
| 59 |
|
|
@@ -64,15 +64,15 @@ class ChatHandler:
|
|
| 64 |
response_placeholder = st.empty()
|
| 65 |
|
| 66 |
try:
|
| 67 |
-
# Get
|
| 68 |
-
status_placeholder.info("π
|
| 69 |
provider = llm_factory.get_provider()
|
| 70 |
|
| 71 |
-
# Show
|
| 72 |
if hasattr(provider, 'hf_provider') and provider.hf_provider:
|
| 73 |
-
status_placeholder.info("
|
| 74 |
else:
|
| 75 |
-
status_placeholder.info("π¦
|
| 76 |
|
| 77 |
# Get response
|
| 78 |
response = None
|
|
@@ -88,16 +88,16 @@ class ChatHandler:
|
|
| 88 |
raise
|
| 89 |
|
| 90 |
if response and response.strip():
|
| 91 |
-
status_placeholder.success("β
Response
|
| 92 |
response_placeholder.markdown(response)
|
| 93 |
|
| 94 |
# Add to session history with provider info
|
| 95 |
timestamp = time.strftime("%H:%M:%S")
|
| 96 |
-
provider_info = "
|
| 97 |
if hasattr(provider, 'hf_provider') and provider.hf_provider:
|
| 98 |
-
provider_info = "
|
| 99 |
elif hasattr(provider, 'ollama_provider') and provider.ollama_provider:
|
| 100 |
-
provider_info = "
|
| 101 |
|
| 102 |
st.session_state.messages.append({
|
| 103 |
"role": "assistant",
|
|
@@ -132,10 +132,10 @@ class ChatHandler:
|
|
| 132 |
|
| 133 |
# User-friendly error messages
|
| 134 |
if "timeout" in str(e).lower() or "500" in str(e):
|
| 135 |
-
error_message = ("β° Request timed out. The AI
|
| 136 |
"**Current setup:**\n"
|
| 137 |
-
"β’ π€ HF
|
| 138 |
-
"β’
|
| 139 |
"Please try again or simplify your question.")
|
| 140 |
else:
|
| 141 |
error_message = f"Sorry, I encountered an error: {str(e)}"
|
|
@@ -161,9 +161,9 @@ class ChatHandler:
|
|
| 161 |
def _get_provider_display_name(self, provider_name: str) -> str:
|
| 162 |
"""Get display name for provider"""
|
| 163 |
display_names = {
|
| 164 |
-
"
|
| 165 |
-
"
|
| 166 |
-
"
|
| 167 |
}
|
| 168 |
return display_names.get(provider_name, provider_name)
|
| 169 |
|
|
|
|
| 8 |
logger = logging.getLogger(__name__)
|
| 9 |
|
| 10 |
class ChatHandler:
|
| 11 |
+
"""Handles chat interactions with mentor AI approach"""
|
| 12 |
|
| 13 |
def __init__(self):
|
| 14 |
self.is_processing = False
|
|
|
|
| 53 |
st.session_state.last_processed_message = ""
|
| 54 |
|
| 55 |
def process_ai_response(self, user_input: str, selected_model: str):
|
| 56 |
+
"""Process AI response with mentor approach"""
|
| 57 |
if not user_input or not user_input.strip():
|
| 58 |
return
|
| 59 |
|
|
|
|
| 64 |
response_placeholder = st.empty()
|
| 65 |
|
| 66 |
try:
|
| 67 |
+
# Get mentor provider
|
| 68 |
+
status_placeholder.info("π Activating AI Mentor System...")
|
| 69 |
provider = llm_factory.get_provider()
|
| 70 |
|
| 71 |
+
# Show system status
|
| 72 |
if hasattr(provider, 'hf_provider') and provider.hf_provider:
|
| 73 |
+
status_placeholder.info("π€ Consulting HF Expert + π± Getting Ollama Mentor Insights...")
|
| 74 |
else:
|
| 75 |
+
status_placeholder.info("π¦ Consulting Local Ollama...")
|
| 76 |
|
| 77 |
# Get response
|
| 78 |
response = None
|
|
|
|
| 88 |
raise
|
| 89 |
|
| 90 |
if response and response.strip():
|
| 91 |
+
status_placeholder.success("β
Expert Response + Mentor Insights Received!")
|
| 92 |
response_placeholder.markdown(response)
|
| 93 |
|
| 94 |
# Add to session history with provider info
|
| 95 |
timestamp = time.strftime("%H:%M:%S")
|
| 96 |
+
provider_info = "mentor_system"
|
| 97 |
if hasattr(provider, 'hf_provider') and provider.hf_provider:
|
| 98 |
+
provider_info = "mentor_hf"
|
| 99 |
elif hasattr(provider, 'ollama_provider') and provider.ollama_provider:
|
| 100 |
+
provider_info = "ollama_only"
|
| 101 |
|
| 102 |
st.session_state.messages.append({
|
| 103 |
"role": "assistant",
|
|
|
|
| 132 |
|
| 133 |
# User-friendly error messages
|
| 134 |
if "timeout" in str(e).lower() or "500" in str(e):
|
| 135 |
+
error_message = ("β° Request timed out. The AI is taking too long to respond.\n\n"
|
| 136 |
"**Current setup:**\n"
|
| 137 |
+
"β’ π€ HF Expert: Providing deep analysis\n"
|
| 138 |
+
"β’ π± Ollama Mentor: Analyzing thinking patterns\n\n"
|
| 139 |
"Please try again or simplify your question.")
|
| 140 |
else:
|
| 141 |
error_message = f"Sorry, I encountered an error: {str(e)}"
|
|
|
|
| 161 |
def _get_provider_display_name(self, provider_name: str) -> str:
|
| 162 |
"""Get display name for provider"""
|
| 163 |
display_names = {
|
| 164 |
+
"ollama_only": "π¦ Ollama (Local)",
|
| 165 |
+
"mentor_hf": "π Mentor System (HF Expert + Ollama Mentor)",
|
| 166 |
+
"mentor_system": "π Mentor System Active"
|
| 167 |
}
|
| 168 |
return display_names.get(provider_name, provider_name)
|
| 169 |
|