JatsTheAIGen commited on
Commit
a5d9083
·
1 Parent(s): 29048d9

cumulative upgrade - context + safety + response length v2

Browse files
Files changed (6) hide show
  1. agent_stubs.py +570 -8
  2. app.py +13 -5
  3. llm_router.py +100 -43
  4. orchestrator_engine.py +345 -5
  5. src/llm_router.py +100 -43
  6. src/orchestrator_engine.py +344 -5
agent_stubs.py CHANGED
@@ -2,15 +2,23 @@
2
  """
3
  Agent implementations for the orchestrator
4
 
5
- NOTE: Intent Recognition Agent has been fully implemented in src/agents/intent_agent.py
6
- This file serves as the stub for other agents
7
  """
8
 
9
- # Import the fully implemented agents
10
- from src.agents.intent_agent import IntentRecognitionAgent
11
- from src.agents.synthesis_agent import ResponseSynthesisAgent
12
- from src.agents.safety_agent import SafetyCheckAgent
13
 
 
 
 
 
 
 
 
 
 
14
  class IntentRecognitionAgentStub(IntentRecognitionAgent):
15
  """
16
  Wrapper for the fully implemented Intent Recognition Agent
@@ -18,9 +26,9 @@ class IntentRecognitionAgentStub(IntentRecognitionAgent):
18
  """
19
  pass
20
 
21
- class ResponseSynthesisAgentStub(ResponseSynthesisAgent):
22
  """
23
- Wrapper for the fully implemented Response Synthesis Agent
24
  Maintains compatibility with orchestrator expectations
25
  """
26
  pass
@@ -32,3 +40,557 @@ class SafetyCheckAgentStub(SafetyCheckAgent):
32
  """
33
  pass
34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  """
3
  Agent implementations for the orchestrator
4
 
5
+ Core agents are fully implemented in src/agents/
6
+ Task-specific execution agents are implemented here
7
  """
8
 
9
+ import logging
10
+ from typing import Dict, Any, Optional
11
+ import asyncio
 
12
 
13
+ logger = logging.getLogger(__name__)
14
+
15
+ # Import the fully implemented core agents
16
+ from src.agents.intent_agent import IntentRecognitionAgent, create_intent_agent
17
+ from src.agents.synthesis_agent import EnhancedSynthesisAgent, create_synthesis_agent
18
+ from src.agents.safety_agent import SafetyCheckAgent, create_safety_agent
19
+ from src.agents.skills_identification_agent import SkillsIdentificationAgent, create_skills_identification_agent
20
+
21
+ # Compatibility wrappers for core agents
22
  class IntentRecognitionAgentStub(IntentRecognitionAgent):
23
  """
24
  Wrapper for the fully implemented Intent Recognition Agent
 
26
  """
27
  pass
28
 
29
+ class ResponseSynthesisAgentStub(EnhancedSynthesisAgent):
30
  """
31
+ Wrapper for the fully implemented Enhanced Synthesis Agent
32
  Maintains compatibility with orchestrator expectations
33
  """
34
  pass
 
40
  """
41
  pass
42
 
43
+ class SkillsIdentificationAgentStub(SkillsIdentificationAgent):
44
+ """
45
+ Wrapper for the fully implemented Skills Identification Agent
46
+ Maintains compatibility with orchestrator expectations
47
+ """
48
+ pass
49
+
50
+
51
+ # ============================================================================
52
+ # Task-Specific Execution Agents
53
+ # These agents handle specialized tasks in the execution plan
54
+ # ============================================================================
55
+
56
+ class TaskExecutionAgent:
57
+ """
58
+ Base class for task-specific execution agents
59
+ Provides common functionality for all task agents
60
+ """
61
+
62
+ def __init__(self, llm_router, agent_id: str, task_name: str, specialization: str = ""):
63
+ """
64
+ Initialize task execution agent
65
+
66
+ Args:
67
+ llm_router: LLMRouter instance for making inference calls
68
+ agent_id: Unique identifier for this agent
69
+ task_name: Name of the task this agent handles
70
+ specialization: Description of what this agent specializes in
71
+ """
72
+ self.llm_router = llm_router
73
+ self.agent_id = agent_id
74
+ self.task_name = task_name
75
+ self.specialization = specialization or f"Specialized in {task_name} tasks"
76
+ logger.info(f"Initialized {self.agent_id}: {self.specialization}")
77
+
78
+ async def execute(self, user_input: str, context: Dict[str, Any] = None,
79
+ previous_results: Dict[str, Any] = None, **kwargs) -> Dict[str, Any]:
80
+ """
81
+ Execute the agent's task
82
+
83
+ Args:
84
+ user_input: Original user query
85
+ context: Conversation context
86
+ previous_results: Results from previous sequential tasks
87
+ **kwargs: Additional parameters
88
+
89
+ Returns:
90
+ Dict with task execution results
91
+ """
92
+ try:
93
+ logger.info(f"{self.agent_id} executing task: {self.task_name}")
94
+
95
+ # Build task-specific prompt
96
+ prompt = self._build_execution_prompt(user_input, context, previous_results, **kwargs)
97
+
98
+ # Execute via LLM router
99
+ logger.debug(f"{self.agent_id} calling LLM router for {self.task_name}")
100
+ result = await self.llm_router.route_inference(
101
+ task_type="general_reasoning",
102
+ prompt=prompt,
103
+ max_tokens=kwargs.get('max_tokens', 2000),
104
+ temperature=kwargs.get('temperature', 0.7)
105
+ )
106
+
107
+ if result:
108
+ return {
109
+ "agent_id": self.agent_id,
110
+ "task": self.task_name,
111
+ "status": "completed",
112
+ "content": result,
113
+ "content_length": len(str(result)),
114
+ "method": "llm_enhanced"
115
+ }
116
+ else:
117
+ logger.warning(f"{self.agent_id} returned empty result")
118
+ return {
119
+ "agent_id": self.agent_id,
120
+ "task": self.task_name,
121
+ "status": "empty",
122
+ "content": "",
123
+ "content_length": 0,
124
+ "method": "llm_enhanced"
125
+ }
126
+
127
+ except Exception as e:
128
+ logger.error(f"{self.agent_id} execution failed: {e}", exc_info=True)
129
+ return {
130
+ "agent_id": self.agent_id,
131
+ "task": self.task_name,
132
+ "status": "error",
133
+ "error": str(e),
134
+ "content": "",
135
+ "method": "llm_enhanced"
136
+ }
137
+
138
+ def _build_execution_prompt(self, user_input: str, context: Dict[str, Any] = None,
139
+ previous_results: Dict[str, Any] = None, **kwargs) -> str:
140
+ """
141
+ Build task-specific execution prompt
142
+ Override in subclasses for custom prompt building
143
+ """
144
+ # Build context summary
145
+ context_summary = self._build_context_summary(context)
146
+
147
+ # Base prompt structure
148
+ prompt = f"""User Query: {user_input}
149
+
150
+ Context: {context_summary}
151
+ """
152
+
153
+ # Add previous results if sequential execution
154
+ if previous_results:
155
+ prompt += f"\nPrevious Task Results:\n{self._format_previous_results(previous_results)}\n"
156
+
157
+ # Add task-specific instructions
158
+ prompt += f"\n{self._get_task_instructions()}"
159
+
160
+ return prompt
161
+
162
+ def _build_context_summary(self, context: Dict[str, Any] = None) -> str:
163
+ """Build concise context summary"""
164
+ if not context:
165
+ return "No prior context"
166
+
167
+ summary_parts = []
168
+
169
+ # Extract interaction contexts
170
+ interaction_contexts = context.get('interaction_contexts', [])
171
+ if interaction_contexts:
172
+ recent_summaries = [ic.get('summary', '') for ic in interaction_contexts[-3:]]
173
+ if recent_summaries:
174
+ summary_parts.append(f"Recent topics: {', '.join(recent_summaries)}")
175
+
176
+ # Extract user context
177
+ user_context = context.get('user_context', '')
178
+ if user_context:
179
+ summary_parts.append(f"User background: {user_context[:200]}")
180
+
181
+ return " | ".join(summary_parts) if summary_parts else "No prior context"
182
+
183
+ def _format_previous_results(self, previous_results: Dict[str, Any]) -> str:
184
+ """Format previous task results for inclusion in prompt"""
185
+ formatted = []
186
+ for task_name, result in previous_results.items():
187
+ if isinstance(result, dict):
188
+ content = result.get('content', result.get('result', ''))
189
+ if content:
190
+ formatted.append(f"- {task_name}: {str(content)[:500]}")
191
+ return "\n".join(formatted) if formatted else "No previous results"
192
+
193
+ def _get_task_instructions(self) -> str:
194
+ """
195
+ Get task-specific instructions
196
+ Override in subclasses
197
+ """
198
+ return f"Task: Execute {self.task_name} based on the user query and context."
199
+
200
+
201
+ # ============================================================================
202
+ # Specific Task Execution Agents
203
+ # ============================================================================
204
+
205
+ class InformationGatheringAgent(TaskExecutionAgent):
206
+ """Agent specialized in gathering comprehensive information"""
207
+
208
+ def __init__(self, llm_router):
209
+ super().__init__(
210
+ llm_router,
211
+ agent_id="INFO_GATH_001",
212
+ task_name="information_gathering",
213
+ specialization="Comprehensive information gathering and fact verification"
214
+ )
215
+
216
+ def _get_task_instructions(self) -> str:
217
+ return """Task: Gather comprehensive, accurate information relevant to the user's query.
218
+ - Focus on facts, definitions, explanations, and verified information
219
+ - Structure the information clearly with key points
220
+ - Cite important details and provide context
221
+ - Ensure accuracy and completeness"""
222
+
223
+
224
+ class ContentResearchAgent(TaskExecutionAgent):
225
+ """Agent specialized in researching and compiling content"""
226
+
227
+ def __init__(self, llm_router):
228
+ super().__init__(
229
+ llm_router,
230
+ agent_id="CONTENT_RESEARCH_001",
231
+ task_name="content_research",
232
+ specialization="Detailed content research and compilation"
233
+ )
234
+
235
+ def _get_task_instructions(self) -> str:
236
+ return """Task: Research and compile detailed content about the topic.
237
+ - Include multiple perspectives and viewpoints
238
+ - Gather current information and relevant examples
239
+ - Organize findings logically with clear sections
240
+ - Provide comprehensive coverage of the topic"""
241
+
242
+
243
+ class TaskPlanningAgent(TaskExecutionAgent):
244
+ """Agent specialized in creating detailed execution plans"""
245
+
246
+ def __init__(self, llm_router):
247
+ super().__init__(
248
+ llm_router,
249
+ agent_id="TASK_PLAN_001",
250
+ task_name="task_planning",
251
+ specialization="Detailed task planning and execution strategy"
252
+ )
253
+
254
+ def _get_task_instructions(self) -> str:
255
+ return """Task: Create a detailed execution plan for the requested task.
256
+ - Break down into clear, actionable steps
257
+ - Identify requirements and dependencies
258
+ - Outline expected outcomes and success criteria
259
+ - Consider potential challenges and solutions
260
+ - Provide timeline and resource estimates"""
261
+
262
+
263
+ class ExecutionStrategyAgent(TaskExecutionAgent):
264
+ """Agent specialized in developing strategic approaches"""
265
+
266
+ def __init__(self, llm_router):
267
+ super().__init__(
268
+ llm_router,
269
+ agent_id="EXEC_STRAT_001",
270
+ task_name="execution_strategy",
271
+ specialization="Strategic execution methodology development"
272
+ )
273
+
274
+ def _get_task_instructions(self) -> str:
275
+ return """Task: Develop a strategic approach for task execution.
276
+ - Define methodology and best practices
277
+ - Identify implementation considerations
278
+ - Provide actionable guidance with clear priorities
279
+ - Consider efficiency and effectiveness
280
+ - Address risk mitigation strategies"""
281
+
282
+
283
+ class CreativeBrainstormingAgent(TaskExecutionAgent):
284
+ """Agent specialized in creative ideation"""
285
+
286
+ def __init__(self, llm_router):
287
+ super().__init__(
288
+ llm_router,
289
+ agent_id="CREATIVE_BS_001",
290
+ task_name="creative_brainstorming",
291
+ specialization="Creative ideas generation and brainstorming"
292
+ )
293
+
294
+ def _get_task_instructions(self) -> str:
295
+ return """Task: Generate creative ideas and approaches for content creation.
296
+ - Explore different angles, styles, and formats
297
+ - Provide diverse creative options
298
+ - Include implementation suggestions
299
+ - Encourage innovative thinking
300
+ - Balance creativity with practicality"""
301
+
302
+
303
+ class ContentIdeationAgent(TaskExecutionAgent):
304
+ """Agent specialized in content concept development"""
305
+
306
+ def __init__(self, llm_router):
307
+ super().__init__(
308
+ llm_router,
309
+ agent_id="CONTENT_IDEATION_001",
310
+ task_name="content_ideation",
311
+ specialization="Content concepts and ideation development"
312
+ )
313
+
314
+ def _get_task_instructions(self) -> str:
315
+ return """Task: Develop content concepts and detailed ideation.
316
+ - Create outlines and structural frameworks
317
+ - Define themes and key messaging
318
+ - Suggest variations and refinement paths
319
+ - Provide detailed development paths
320
+ - Consider audience and purpose"""
321
+
322
+
323
+ class ResearchAnalysisAgent(TaskExecutionAgent):
324
+ """Agent specialized in research analysis"""
325
+
326
+ def __init__(self, llm_router):
327
+ super().__init__(
328
+ llm_router,
329
+ agent_id="RESEARCH_ANALYSIS_001",
330
+ task_name="research_analysis",
331
+ specialization="Thorough research analysis and insights"
332
+ )
333
+
334
+ def _get_task_instructions(self) -> str:
335
+ return """Task: Conduct thorough research analysis on the topic.
336
+ - Identify key findings, trends, and patterns
337
+ - Analyze different perspectives and methodologies
338
+ - Provide comprehensive insights
339
+ - Evaluate evidence and sources
340
+ - Synthesize complex information"""
341
+
342
+
343
+ class DataCollectionAgent(TaskExecutionAgent):
344
+ """Agent specialized in data collection and organization"""
345
+
346
+ def __init__(self, llm_router):
347
+ super().__init__(
348
+ llm_router,
349
+ agent_id="DATA_COLLECT_001",
350
+ task_name="data_collection",
351
+ specialization="Data point collection and evidence gathering"
352
+ )
353
+
354
+ def _get_task_instructions(self) -> str:
355
+ return """Task: Collect and organize relevant data points and evidence.
356
+ - Gather statistics, examples, and case studies
357
+ - Compile supporting information
358
+ - Structure data for easy analysis and reference
359
+ - Verify data quality and relevance
360
+ - Organize systematically"""
361
+
362
+
363
+ class PatternIdentificationAgent(TaskExecutionAgent):
364
+ """Agent specialized in pattern recognition and analysis"""
365
+
366
+ def __init__(self, llm_router):
367
+ super().__init__(
368
+ llm_router,
369
+ agent_id="PATTERN_ID_001",
370
+ task_name="pattern_identification",
371
+ specialization="Pattern recognition and correlation analysis"
372
+ )
373
+
374
+ def _get_task_instructions(self) -> str:
375
+ return """Task: Identify patterns, correlations, and significant relationships.
376
+ - Analyze trends and cause-effect relationships
377
+ - Discover underlying structures
378
+ - Provide insights based on pattern recognition
379
+ - Identify anomalies and exceptions
380
+ - Connect disparate information"""
381
+
382
+
383
+ class ProblemAnalysisAgent(TaskExecutionAgent):
384
+ """Agent specialized in problem analysis"""
385
+
386
+ def __init__(self, llm_router):
387
+ super().__init__(
388
+ llm_router,
389
+ agent_id="PROBLEM_ANALYSIS_001",
390
+ task_name="problem_analysis",
391
+ specialization="Detailed problem analysis and root cause identification"
392
+ )
393
+
394
+ def _get_task_instructions(self) -> str:
395
+ return """Task: Analyze the problem in detail.
396
+ - Identify root causes and contributing factors
397
+ - Understand constraints and limitations
398
+ - Break down the problem into components
399
+ - Map problem relationships
400
+ - Prioritize issues for systematic resolution"""
401
+
402
+
403
+ class SolutionResearchAgent(TaskExecutionAgent):
404
+ """Agent specialized in solution research and evaluation"""
405
+
406
+ def __init__(self, llm_router):
407
+ super().__init__(
408
+ llm_router,
409
+ agent_id="SOLUTION_RESEARCH_001",
410
+ task_name="solution_research",
411
+ specialization="Solution research and evaluation"
412
+ )
413
+
414
+ def _get_task_instructions(self) -> str:
415
+ return """Task: Research and evaluate potential solutions.
416
+ - Compare different approaches and methodologies
417
+ - Assess pros and cons of each option
418
+ - Recommend best practices
419
+ - Consider implementation feasibility
420
+ - Evaluate effectiveness and efficiency"""
421
+
422
+
423
+ class CurriculumPlanningAgent(TaskExecutionAgent):
424
+ """Agent specialized in educational curriculum design"""
425
+
426
+ def __init__(self, llm_router):
427
+ super().__init__(
428
+ llm_router,
429
+ agent_id="CURRICULUM_PLAN_001",
430
+ task_name="curriculum_planning",
431
+ specialization="Educational curriculum and learning path design"
432
+ )
433
+
434
+ def _get_task_instructions(self) -> str:
435
+ return """Task: Design educational curriculum and learning path.
436
+ - Structure content progressively
437
+ - Define clear learning objectives
438
+ - Suggest appropriate resources
439
+ - Create comprehensive learning framework
440
+ - Ensure pedagogical effectiveness"""
441
+
442
+
443
+ class EducationalContentAgent(TaskExecutionAgent):
444
+ """Agent specialized in educational content generation"""
445
+
446
+ def __init__(self, llm_router):
447
+ super().__init__(
448
+ llm_router,
449
+ agent_id="EDUC_CONTENT_001",
450
+ task_name="educational_content",
451
+ specialization="Educational content with clear explanations"
452
+ )
453
+
454
+ def _get_task_instructions(self) -> str:
455
+ return """Task: Generate educational content with clear explanations.
456
+ - Use effective teaching methods
457
+ - Provide examples and analogies
458
+ - Manage progressive complexity
459
+ - Make content accessible and engaging
460
+ - Support learning objectives"""
461
+
462
+
463
+ class TechnicalResearchAgent(TaskExecutionAgent):
464
+ """Agent specialized in technical research"""
465
+
466
+ def __init__(self, llm_router):
467
+ super().__init__(
468
+ llm_router,
469
+ agent_id="TECH_RESEARCH_001",
470
+ task_name="technical_research",
471
+ specialization="Technical aspects and solutions research"
472
+ )
473
+
474
+ def _get_task_instructions(self) -> str:
475
+ return """Task: Research technical aspects and solutions.
476
+ - Gather technical documentation
477
+ - Identify best practices and standards
478
+ - Compile implementation details
479
+ - Structure technical information clearly
480
+ - Provide practical guidance"""
481
+
482
+
483
+ class GuidanceGenerationAgent(TaskExecutionAgent):
484
+ """Agent specialized in step-by-step guidance"""
485
+
486
+ def __init__(self, llm_router):
487
+ super().__init__(
488
+ llm_router,
489
+ agent_id="GUIDANCE_GEN_001",
490
+ task_name="guidance_generation",
491
+ specialization="Step-by-step guidance and instructions"
492
+ )
493
+
494
+ def _get_task_instructions(self) -> str:
495
+ return """Task: Generate step-by-step guidance and instructions.
496
+ - Create clear, actionable steps
497
+ - Provide detailed explanations
498
+ - Include troubleshooting tips
499
+ - Ensure comprehensiveness
500
+ - Make guidance easy to follow"""
501
+
502
+
503
+ class ContextEnrichmentAgent(TaskExecutionAgent):
504
+ """Agent specialized in context enrichment"""
505
+
506
+ def __init__(self, llm_router):
507
+ super().__init__(
508
+ llm_router,
509
+ agent_id="CONTEXT_ENRICH_001",
510
+ task_name="context_enrichment",
511
+ specialization="Conversation context enrichment"
512
+ )
513
+
514
+ def _get_task_instructions(self) -> str:
515
+ return """Task: Enrich the conversation with relevant context and insights.
516
+ - Add helpful background information
517
+ - Connect to previous topics
518
+ - Include engaging details
519
+ - Enhance understanding
520
+ - Maintain conversation flow"""
521
+
522
+
523
+ class GeneralResearchAgent(TaskExecutionAgent):
524
+ """Agent for general research tasks"""
525
+
526
+ def __init__(self, llm_router):
527
+ super().__init__(
528
+ llm_router,
529
+ agent_id="GENERAL_RESEARCH_001",
530
+ task_name="general_research",
531
+ specialization="General research and information gathering"
532
+ )
533
+
534
+ def _get_task_instructions(self) -> str:
535
+ return """Task: Conduct general research and information gathering.
536
+ - Compile relevant information
537
+ - Gather insights and useful details
538
+ - Organize findings clearly
539
+ - Provide comprehensive coverage
540
+ - Structure for easy reference"""
541
+
542
+
543
+ # ============================================================================
544
+ # Factory Functions for Task Execution Agents
545
+ # ============================================================================
546
+
547
+ def create_task_execution_agent(task_name: str, llm_router) -> TaskExecutionAgent:
548
+ """
549
+ Factory function to create task-specific execution agents
550
+
551
+ Args:
552
+ task_name: Name of the task to create an agent for
553
+ llm_router: LLMRouter instance
554
+
555
+ Returns:
556
+ Appropriate TaskExecutionAgent instance
557
+ """
558
+ agent_map = {
559
+ "information_gathering": InformationGatheringAgent,
560
+ "content_research": ContentResearchAgent,
561
+ "task_planning": TaskPlanningAgent,
562
+ "execution_strategy": ExecutionStrategyAgent,
563
+ "creative_brainstorming": CreativeBrainstormingAgent,
564
+ "content_ideation": ContentIdeationAgent,
565
+ "research_analysis": ResearchAnalysisAgent,
566
+ "data_collection": DataCollectionAgent,
567
+ "pattern_identification": PatternIdentificationAgent,
568
+ "problem_analysis": ProblemAnalysisAgent,
569
+ "solution_research": SolutionResearchAgent,
570
+ "curriculum_planning": CurriculumPlanningAgent,
571
+ "educational_content": EducationalContentAgent,
572
+ "technical_research": TechnicalResearchAgent,
573
+ "guidance_generation": GuidanceGenerationAgent,
574
+ "context_enrichment": ContextEnrichmentAgent,
575
+ "general_research": GeneralResearchAgent,
576
+ }
577
+
578
+ agent_class = agent_map.get(task_name, GeneralResearchAgent)
579
+ return agent_class(llm_router)
580
+
581
+
582
+ def create_task_execution_agents(task_names: list, llm_router) -> Dict[str, TaskExecutionAgent]:
583
+ """
584
+ Factory function to create multiple task execution agents
585
+
586
+ Args:
587
+ task_names: List of task names to create agents for
588
+ llm_router: LLMRouter instance
589
+
590
+ Returns:
591
+ Dictionary mapping task names to agent instances
592
+ """
593
+ agents = {}
594
+ for task_name in task_names:
595
+ agents[task_name] = create_task_execution_agent(task_name, llm_router)
596
+ return agents
app.py CHANGED
@@ -45,7 +45,8 @@ try:
45
  orchestrator_available = True
46
  except ImportError as e:
47
  logger.warning(f"Could not import orchestration components: {e}")
48
- logger.info("Will use placeholder mode")
 
49
 
50
  try:
51
  from spaces import GPU
@@ -483,8 +484,9 @@ def setup_event_handlers(demo, event_handlers):
483
  outputs=[components.get('message_input'), components.get('chatbot')]
484
  )
485
  except Exception as e:
486
- print(f"Could not setup event handlers: {e}")
487
- # Fallback to basic functionality
 
488
 
489
  return demo
490
 
@@ -711,12 +713,18 @@ async def process_message_async(message: str, history: Optional[List], session_i
711
  else:
712
  response = str(result) if result else "Processing complete."
713
 
714
- # Final safety check - ensure response is not empty
715
  # Handle both string and dict types
716
  if isinstance(response, dict):
717
  response = str(response.get('content', response))
718
  if not response or (isinstance(response, str) and len(response.strip()) == 0):
719
- response = f"I understand you said: '{message}'. I'm here to assist you!"
 
 
 
 
 
 
720
 
721
  logger.info(f"Orchestrator returned response (length: {len(response)})")
722
 
 
45
  orchestrator_available = True
46
  except ImportError as e:
47
  logger.warning(f"Could not import orchestration components: {e}")
48
+ # Note: System will gracefully degrade if orchestrator unavailable
49
+ # This is handled in process_message_async with proper user-facing messages
50
 
51
  try:
52
  from spaces import GPU
 
484
  outputs=[components.get('message_input'), components.get('chatbot')]
485
  )
486
  except Exception as e:
487
+ logger.error(f"Could not setup event handlers: {e}", exc_info=True)
488
+ # Event handlers setup failure is logged but won't affect core chat functionality
489
+ # Gradio interface will still work with default handlers
490
 
491
  return demo
492
 
 
713
  else:
714
  response = str(result) if result else "Processing complete."
715
 
716
+ # Final safety check - ensure response is not empty (only for actual errors)
717
  # Handle both string and dict types
718
  if isinstance(response, dict):
719
  response = str(response.get('content', response))
720
  if not response or (isinstance(response, str) and len(response.strip()) == 0):
721
+ # This should only happen if LLM API completely fails - log it
722
+ logger.warning(f"Empty response received from orchestrator for message: {message[:50]}...")
723
+ response = (
724
+ f"I received your message about '{message[:50]}...'. "
725
+ f"I'm processing your request and working on providing you with a comprehensive answer. "
726
+ f"Please wait a moment and try again if needed."
727
+ )
728
 
729
  logger.info(f"Orchestrator returned response (length: {len(response)})")
730
 
llm_router.py CHANGED
@@ -1,5 +1,6 @@
1
  # llm_router.py - FIXED VERSION
2
  import logging
 
3
  from models_config import LLM_CONFIG
4
 
5
  logger = logging.getLogger(__name__)
@@ -73,12 +74,19 @@ class LLMRouter:
73
  async def _call_hf_endpoint(self, model_config: dict, prompt: str, task_type: str, **kwargs):
74
  """
75
  FIXED: Make actual call to Hugging Face Chat Completions API
76
- Uses the correct chat completions protocol
77
 
78
  IMPORTANT: task_type parameter is now properly included in the method signature
79
  """
 
 
 
 
 
 
80
  try:
81
  import requests
 
82
 
83
  model_id = model_config["model_id"]
84
 
@@ -125,51 +133,100 @@ class LLMRouter:
125
  "Content-Type": "application/json"
126
  }
127
 
128
- logger.info(f"Sending request to: {api_url}")
129
- logger.debug(f"Payload: {payload}")
130
-
131
- response = requests.post(api_url, json=payload, headers=headers, timeout=30)
132
-
133
- if response.status_code == 200:
134
- result = response.json()
135
- logger.debug(f"Raw response: {result}")
136
-
137
- if 'choices' in result and len(result['choices']) > 0:
138
- generated_text = result['choices'][0]['message']['content']
139
 
140
- if not generated_text or generated_text.strip() == "":
141
- logger.warning(f"Empty or invalid response, using fallback")
142
- return None
143
 
144
- logger.info(f"HF API returned response (length: {len(generated_text)})")
145
- logger.info("=" * 80)
146
- logger.info("COMPLETE LLM API RESPONSE:")
147
- logger.info("=" * 80)
148
- logger.info(f"Model: {model_id}")
149
 
150
- # FIXED: task_type is now properly available
151
- logger.info(f"Task Type: {task_type}")
152
- logger.info(f"Response Length: {len(generated_text)} characters")
153
- logger.info("-" * 40)
154
- logger.info("FULL RESPONSE CONTENT:")
155
- logger.info("-" * 40)
156
- logger.info(generated_text)
157
- logger.info("-" * 40)
158
- logger.info("END OF LLM RESPONSE")
159
- logger.info("=" * 80)
160
- return generated_text
161
- else:
162
- logger.error(f"Unexpected response format: {result}")
163
- return None
164
- elif response.status_code == 503:
165
- # Model is loading, retry with simpler model
166
- logger.warning(f"Model loading (503), trying fallback")
167
- fallback_config = self._get_fallback_model("response_synthesis")
168
-
169
- # FIXED: Ensure task_type is passed in recursive call
170
- return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
171
- else:
172
- logger.error(f"HF API error: {response.status_code} - {response.text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  return None
174
 
175
  except ImportError:
 
1
  # llm_router.py - FIXED VERSION
2
  import logging
3
+ import asyncio
4
  from models_config import LLM_CONFIG
5
 
6
  logger = logging.getLogger(__name__)
 
74
  async def _call_hf_endpoint(self, model_config: dict, prompt: str, task_type: str, **kwargs):
75
  """
76
  FIXED: Make actual call to Hugging Face Chat Completions API
77
+ Uses the correct chat completions protocol with retry logic and exponential backoff
78
 
79
  IMPORTANT: task_type parameter is now properly included in the method signature
80
  """
81
+ # Retry configuration
82
+ max_retries = kwargs.get('max_retries', 3)
83
+ initial_delay = kwargs.get('initial_delay', 1.0) # Start with 1 second
84
+ max_delay = kwargs.get('max_delay', 16.0) # Cap at 16 seconds
85
+ timeout = kwargs.get('timeout', 30)
86
+
87
  try:
88
  import requests
89
+ from requests.exceptions import Timeout, RequestException, ConnectionError as RequestsConnectionError
90
 
91
  model_id = model_config["model_id"]
92
 
 
133
  "Content-Type": "application/json"
134
  }
135
 
136
+ # Retry logic with exponential backoff
137
+ last_exception = None
138
+ for attempt in range(max_retries + 1):
139
+ try:
140
+ if attempt > 0:
141
+ # Calculate exponential backoff delay
142
+ delay = min(initial_delay * (2 ** (attempt - 1)), max_delay)
143
+ logger.warning(f"Retry attempt {attempt}/{max_retries} after {delay:.1f}s delay (exponential backoff)")
144
+ await asyncio.sleep(delay)
 
 
145
 
146
+ logger.info(f"Sending request to: {api_url} (attempt {attempt + 1}/{max_retries + 1})")
147
+ logger.debug(f"Payload: {payload}")
 
148
 
149
+ response = requests.post(api_url, json=payload, headers=headers, timeout=timeout)
 
 
 
 
150
 
151
+ if response.status_code == 200:
152
+ result = response.json()
153
+ logger.debug(f"Raw response: {result}")
154
+
155
+ if 'choices' in result and len(result['choices']) > 0:
156
+ generated_text = result['choices'][0]['message']['content']
157
+
158
+ if not generated_text or generated_text.strip() == "":
159
+ logger.warning(f"Empty or invalid response, using fallback")
160
+ return None
161
+
162
+ if attempt > 0:
163
+ logger.info(f"Successfully retrieved response after {attempt} retry attempts")
164
+
165
+ logger.info(f"HF API returned response (length: {len(generated_text)})")
166
+ logger.info("=" * 80)
167
+ logger.info("COMPLETE LLM API RESPONSE:")
168
+ logger.info("=" * 80)
169
+ logger.info(f"Model: {model_id}")
170
+
171
+ # FIXED: task_type is now properly available
172
+ logger.info(f"Task Type: {task_type}")
173
+ logger.info(f"Response Length: {len(generated_text)} characters")
174
+ logger.info("-" * 40)
175
+ logger.info("FULL RESPONSE CONTENT:")
176
+ logger.info("-" * 40)
177
+ logger.info(generated_text)
178
+ logger.info("-" * 40)
179
+ logger.info("END OF LLM RESPONSE")
180
+ logger.info("=" * 80)
181
+ return generated_text
182
+ else:
183
+ logger.error(f"Unexpected response format: {result}")
184
+ return None
185
+ elif response.status_code == 503:
186
+ # Model is loading - this is retryable
187
+ if attempt < max_retries:
188
+ logger.warning(f"Model loading (503), will retry (attempt {attempt + 1}/{max_retries + 1})")
189
+ last_exception = Exception(f"Model loading (503)")
190
+ continue
191
+ else:
192
+ # After max retries, try fallback model
193
+ logger.warning(f"Model loading (503) after {max_retries} retries, trying fallback model")
194
+ fallback_config = self._get_fallback_model(task_type)
195
+
196
+ # FIXED: Ensure task_type is passed in recursive call
197
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
198
+ else:
199
+ # Non-retryable HTTP errors
200
+ logger.error(f"HF API error: {response.status_code} - {response.text}")
201
+ return None
202
+
203
+ except Timeout as e:
204
+ last_exception = e
205
+ if attempt < max_retries:
206
+ logger.warning(f"Request timeout (attempt {attempt + 1}/{max_retries + 1}): {str(e)}")
207
+ continue
208
+ else:
209
+ logger.error(f"Request timeout after {max_retries} retries: {str(e)}")
210
+ # Try fallback model on final timeout
211
+ logger.warning("Attempting fallback model due to persistent timeout")
212
+ fallback_config = self._get_fallback_model(task_type)
213
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
214
+
215
+ except (RequestsConnectionError, RequestException) as e:
216
+ last_exception = e
217
+ if attempt < max_retries:
218
+ logger.warning(f"Connection error (attempt {attempt + 1}/{max_retries + 1}): {str(e)}")
219
+ continue
220
+ else:
221
+ logger.error(f"Connection error after {max_retries} retries: {str(e)}")
222
+ # Try fallback model on final connection error
223
+ logger.warning("Attempting fallback model due to persistent connection error")
224
+ fallback_config = self._get_fallback_model(task_type)
225
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
226
+
227
+ # If we exhausted all retries and didn't return
228
+ if last_exception:
229
+ logger.error(f"Failed after {max_retries} retries. Last error: {last_exception}")
230
  return None
231
 
232
  except ImportError:
orchestrator_engine.py CHANGED
@@ -262,20 +262,360 @@ class MVPOrchestrator:
262
  async def _create_execution_plan(self, intent_result: dict, context: dict) -> dict:
263
  """
264
  Create execution plan based on intent recognition
 
265
  """
266
- # TODO: Implement agent selection and sequencing logic
267
- return {
268
- "agents_to_execute": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
  "execution_order": "parallel",
270
  "priority": "normal"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  }
272
 
273
  async def _execute_agents(self, execution_plan: dict, user_input: str, context: dict) -> dict:
274
  """
275
  Execute agents in parallel or sequential order based on plan
 
276
  """
277
- # TODO: Implement parallel/sequential agent execution
278
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
 
280
  def _format_final_output(self, response: dict, interaction_id: str, additional_metadata: dict = None) -> dict:
281
  """
 
262
  async def _create_execution_plan(self, intent_result: dict, context: dict) -> dict:
263
  """
264
  Create execution plan based on intent recognition
265
+ Maps intent types to specific execution tasks
266
  """
267
+ primary_intent = intent_result.get('primary_intent', 'casual_conversation')
268
+ secondary_intents = intent_result.get('secondary_intents', [])
269
+ confidence = intent_result.get('confidence_scores', {}).get(primary_intent, 0.7)
270
+
271
+ # Map intent types to execution tasks
272
+ intent_task_mapping = {
273
+ "information_request": {
274
+ "tasks": ["information_gathering", "content_research"],
275
+ "execution_order": "sequential",
276
+ "priority": "high"
277
+ },
278
+ "task_execution": {
279
+ "tasks": ["task_planning", "execution_strategy"],
280
+ "execution_order": "sequential",
281
+ "priority": "high"
282
+ },
283
+ "creative_generation": {
284
+ "tasks": ["creative_brainstorming", "content_ideation"],
285
+ "execution_order": "parallel",
286
+ "priority": "normal"
287
+ },
288
+ "analysis_research": {
289
+ "tasks": ["research_analysis", "data_collection", "pattern_identification"],
290
+ "execution_order": "sequential",
291
+ "priority": "high"
292
+ },
293
+ "troubleshooting": {
294
+ "tasks": ["problem_analysis", "solution_research"],
295
+ "execution_order": "sequential",
296
+ "priority": "high"
297
+ },
298
+ "education_learning": {
299
+ "tasks": ["curriculum_planning", "educational_content"],
300
+ "execution_order": "sequential",
301
+ "priority": "normal"
302
+ },
303
+ "technical_support": {
304
+ "tasks": ["technical_research", "guidance_generation"],
305
+ "execution_order": "sequential",
306
+ "priority": "high"
307
+ },
308
+ "casual_conversation": {
309
+ "tasks": ["context_enrichment"],
310
+ "execution_order": "parallel",
311
+ "priority": "low"
312
+ }
313
+ }
314
+
315
+ # Get task plan for primary intent
316
+ plan = intent_task_mapping.get(primary_intent, {
317
+ "tasks": ["general_research"],
318
  "execution_order": "parallel",
319
  "priority": "normal"
320
+ })
321
+
322
+ # Add secondary intent tasks if confidence is high
323
+ if confidence > 0.7 and secondary_intents:
324
+ for secondary_intent in secondary_intents[:2]: # Limit to 2 secondary intents
325
+ secondary_plan = intent_task_mapping.get(secondary_intent)
326
+ if secondary_plan:
327
+ # Merge tasks, avoiding duplicates
328
+ existing_tasks = set(plan["tasks"])
329
+ for task in secondary_plan["tasks"]:
330
+ if task not in existing_tasks:
331
+ plan["tasks"].append(task)
332
+ existing_tasks.add(task)
333
+
334
+ logger.info(f"Execution plan created for intent '{primary_intent}': {len(plan['tasks'])} tasks, order={plan['execution_order']}")
335
+
336
+ return {
337
+ "agents_to_execute": plan["tasks"],
338
+ "execution_order": plan["execution_order"],
339
+ "priority": plan["priority"],
340
+ "primary_intent": primary_intent,
341
+ "secondary_intents": secondary_intents
342
  }
343
 
344
  async def _execute_agents(self, execution_plan: dict, user_input: str, context: dict) -> dict:
345
  """
346
  Execute agents in parallel or sequential order based on plan
347
+ Actually executes task-specific LLM calls based on intent
348
  """
349
+ tasks = execution_plan.get("agents_to_execute", [])
350
+ execution_order = execution_plan.get("execution_order", "parallel")
351
+ primary_intent = execution_plan.get("primary_intent", "casual_conversation")
352
+
353
+ if not tasks:
354
+ logger.warning("No tasks to execute in execution plan")
355
+ return {}
356
+
357
+ logger.info(f"Executing {len(tasks)} tasks in {execution_order} order for intent '{primary_intent}'")
358
+
359
+ results = {}
360
+
361
+ # Build context summary for task execution
362
+ context_summary = self._build_context_summary(context)
363
+
364
+ # Task prompt templates
365
+ task_prompts = self._build_task_prompts(user_input, context_summary, primary_intent)
366
+
367
+ if execution_order == "parallel":
368
+ # Execute all tasks in parallel
369
+ import asyncio
370
+ task_coroutines = []
371
+ for task in tasks:
372
+ if task in task_prompts:
373
+ coro = self._execute_single_task(task, task_prompts[task])
374
+ task_coroutines.append((task, coro))
375
+ else:
376
+ logger.warning(f"No prompt template for task: {task}")
377
+
378
+ # Execute all tasks concurrently
379
+ if task_coroutines:
380
+ task_results = await asyncio.gather(
381
+ *[coro for _, coro in task_coroutines],
382
+ return_exceptions=True
383
+ )
384
+
385
+ # Map results back to task names
386
+ for (task, _), result in zip(task_coroutines, task_results):
387
+ if isinstance(result, Exception):
388
+ logger.error(f"Task {task} failed: {result}")
389
+ results[task] = {"error": str(result), "status": "failed"}
390
+ else:
391
+ results[task] = result
392
+ logger.info(f"Task {task} completed: {len(str(result))} chars")
393
+ else:
394
+ # Execute tasks sequentially
395
+ previous_results = {}
396
+ for task in tasks:
397
+ if task in task_prompts:
398
+ # Pass previous results to sequential tasks for context
399
+ enhanced_prompt = task_prompts[task]
400
+ if previous_results:
401
+ enhanced_prompt += f"\n\nPrevious task results: {str(previous_results)}"
402
+
403
+ try:
404
+ result = await self._execute_single_task(task, enhanced_prompt)
405
+ results[task] = result
406
+ previous_results[task] = result
407
+ logger.info(f"Task {task} completed: {len(str(result))} chars")
408
+ except Exception as e:
409
+ logger.error(f"Task {task} failed: {e}")
410
+ results[task] = {"error": str(e), "status": "failed"}
411
+ previous_results[task] = results[task]
412
+ else:
413
+ logger.warning(f"No prompt template for task: {task}")
414
+
415
+ logger.info(f"Agent execution complete: {len(results)} results collected")
416
+ return results
417
+
418
+ def _build_context_summary(self, context: dict) -> str:
419
+ """Build a concise summary of context for task execution"""
420
+ summary_parts = []
421
+
422
+ # Extract interaction contexts
423
+ interaction_contexts = context.get('interaction_contexts', [])
424
+ if interaction_contexts:
425
+ recent_summaries = [ic.get('summary', '') for ic in interaction_contexts[-3:]]
426
+ if recent_summaries:
427
+ summary_parts.append(f"Recent conversation topics: {', '.join(recent_summaries)}")
428
+
429
+ # Extract user context
430
+ user_context = context.get('user_context', '')
431
+ if user_context:
432
+ summary_parts.append(f"User background: {user_context[:200]}")
433
+
434
+ return " | ".join(summary_parts) if summary_parts else "No prior context"
435
+
436
+ def _build_task_prompts(self, user_input: str, context_summary: str, primary_intent: str) -> dict:
437
+ """Build task-specific prompts for execution"""
438
+
439
+ base_context = f"User Query: {user_input}\nContext: {context_summary}"
440
+
441
+ prompts = {
442
+ "information_gathering": f"""
443
+ {base_context}
444
+
445
+ Task: Gather comprehensive, accurate information relevant to the user's query.
446
+ Focus on facts, definitions, explanations, and verified information.
447
+ Structure the information clearly and cite key points.
448
+ """,
449
+
450
+ "content_research": f"""
451
+ {base_context}
452
+
453
+ Task: Research and compile detailed content about the topic.
454
+ Include multiple perspectives, current information, and relevant examples.
455
+ Organize findings logically with clear sections.
456
+ """,
457
+
458
+ "task_planning": f"""
459
+ {base_context}
460
+
461
+ Task: Create a detailed execution plan for the requested task.
462
+ Break down into clear steps, identify requirements, and outline expected outcomes.
463
+ Consider potential challenges and solutions.
464
+ """,
465
+
466
+ "execution_strategy": f"""
467
+ {base_context}
468
+
469
+ Task: Develop a strategic approach for task execution.
470
+ Define methodology, best practices, and implementation considerations.
471
+ Provide actionable guidance with clear priorities.
472
+ """,
473
+
474
+ "creative_brainstorming": f"""
475
+ {base_context}
476
+
477
+ Task: Generate creative ideas and approaches for content creation.
478
+ Explore different angles, styles, and formats.
479
+ Provide diverse creative options with implementation suggestions.
480
+ """,
481
+
482
+ "content_ideation": f"""
483
+ {base_context}
484
+
485
+ Task: Develop content concepts and detailed ideation.
486
+ Create outlines, themes, and structural frameworks.
487
+ Suggest variations and refinement paths.
488
+ """,
489
+
490
+ "research_analysis": f"""
491
+ {base_context}
492
+
493
+ Task: Conduct thorough research analysis on the topic.
494
+ Identify key findings, trends, patterns, and insights.
495
+ Analyze different perspectives and methodologies.
496
+ """,
497
+
498
+ "data_collection": f"""
499
+ {base_context}
500
+
501
+ Task: Collect and organize relevant data points and evidence.
502
+ Gather statistics, examples, case studies, and supporting information.
503
+ Structure data for easy analysis and reference.
504
+ """,
505
+
506
+ "pattern_identification": f"""
507
+ {base_context}
508
+
509
+ Task: Identify patterns, correlations, and significant relationships.
510
+ Analyze trends, cause-effect relationships, and underlying structures.
511
+ Provide insights based on pattern recognition.
512
+ """,
513
+
514
+ "problem_analysis": f"""
515
+ {base_context}
516
+
517
+ Task: Analyze the problem in detail.
518
+ Identify root causes, contributing factors, and constraints.
519
+ Break down the problem into components for systematic resolution.
520
+ """,
521
+
522
+ "solution_research": f"""
523
+ {base_context}
524
+
525
+ Task: Research and evaluate potential solutions.
526
+ Compare approaches, assess pros/cons, and recommend best practices.
527
+ Consider implementation feasibility and effectiveness.
528
+ """,
529
+
530
+ "curriculum_planning": f"""
531
+ {base_context}
532
+
533
+ Task: Design educational curriculum and learning path.
534
+ Structure content progressively, define learning objectives, and suggest resources.
535
+ Create a comprehensive learning framework.
536
+ """,
537
+
538
+ "educational_content": f"""
539
+ {base_context}
540
+
541
+ Task: Generate educational content with clear explanations.
542
+ Use teaching methods, examples, analogies, and progressive complexity.
543
+ Make content accessible and engaging for learning.
544
+ """,
545
+
546
+ "technical_research": f"""
547
+ {base_context}
548
+
549
+ Task: Research technical aspects and solutions.
550
+ Gather technical documentation, best practices, and implementation details.
551
+ Structure technical information clearly with practical guidance.
552
+ """,
553
+
554
+ "guidance_generation": f"""
555
+ {base_context}
556
+
557
+ Task: Generate step-by-step guidance and instructions.
558
+ Create clear, actionable steps with explanations and troubleshooting tips.
559
+ Ensure guidance is comprehensive and easy to follow.
560
+ """,
561
+
562
+ "context_enrichment": f"""
563
+ {base_context}
564
+
565
+ Task: Enrich the conversation with relevant context and insights.
566
+ Add helpful background information, connections to previous topics, and engaging details.
567
+ Enhance understanding and engagement.
568
+ """,
569
+
570
+ "general_research": f"""
571
+ {base_context}
572
+
573
+ Task: Conduct general research and information gathering.
574
+ Compile relevant information, insights, and useful details about the topic.
575
+ Organize findings for clear presentation.
576
+ """
577
+ }
578
+
579
+ return prompts
580
+
581
+ async def _execute_single_task(self, task_name: str, prompt: str) -> dict:
582
+ """Execute a single task using the LLM router"""
583
+ try:
584
+ logger.debug(f"Executing task: {task_name}")
585
+ logger.debug(f"Task prompt length: {len(prompt)}")
586
+
587
+ # Use general reasoning for task execution
588
+ result = await self.llm_router.route_inference(
589
+ task_type="general_reasoning",
590
+ prompt=prompt,
591
+ max_tokens=2000,
592
+ temperature=0.7
593
+ )
594
+
595
+ if result:
596
+ return {
597
+ "task": task_name,
598
+ "status": "completed",
599
+ "content": result,
600
+ "content_length": len(str(result))
601
+ }
602
+ else:
603
+ logger.warning(f"Task {task_name} returned empty result")
604
+ return {
605
+ "task": task_name,
606
+ "status": "empty",
607
+ "content": "",
608
+ "content_length": 0
609
+ }
610
+
611
+ except Exception as e:
612
+ logger.error(f"Error executing task {task_name}: {e}", exc_info=True)
613
+ return {
614
+ "task": task_name,
615
+ "status": "error",
616
+ "error": str(e),
617
+ "content": ""
618
+ }
619
 
620
  def _format_final_output(self, response: dict, interaction_id: str, additional_metadata: dict = None) -> dict:
621
  """
src/llm_router.py CHANGED
@@ -1,5 +1,6 @@
1
  # llm_router.py - FIXED VERSION
2
  import logging
 
3
  from .models_config import LLM_CONFIG
4
 
5
  logger = logging.getLogger(__name__)
@@ -73,12 +74,19 @@ class LLMRouter:
73
  async def _call_hf_endpoint(self, model_config: dict, prompt: str, task_type: str, **kwargs):
74
  """
75
  FIXED: Make actual call to Hugging Face Chat Completions API
76
- Uses the correct chat completions protocol
77
 
78
  IMPORTANT: task_type parameter is now properly included in the method signature
79
  """
 
 
 
 
 
 
80
  try:
81
  import requests
 
82
 
83
  model_id = model_config["model_id"]
84
 
@@ -125,51 +133,100 @@ class LLMRouter:
125
  "Content-Type": "application/json"
126
  }
127
 
128
- logger.info(f"Sending request to: {api_url}")
129
- logger.debug(f"Payload: {payload}")
130
-
131
- response = requests.post(api_url, json=payload, headers=headers, timeout=30)
132
-
133
- if response.status_code == 200:
134
- result = response.json()
135
- logger.debug(f"Raw response: {result}")
136
-
137
- if 'choices' in result and len(result['choices']) > 0:
138
- generated_text = result['choices'][0]['message']['content']
139
 
140
- if not generated_text or generated_text.strip() == "":
141
- logger.warning(f"Empty or invalid response, using fallback")
142
- return None
143
 
144
- logger.info(f"HF API returned response (length: {len(generated_text)})")
145
- logger.info("=" * 80)
146
- logger.info("COMPLETE LLM API RESPONSE:")
147
- logger.info("=" * 80)
148
- logger.info(f"Model: {model_id}")
149
 
150
- # FIXED: task_type is now properly available
151
- logger.info(f"Task Type: {task_type}")
152
- logger.info(f"Response Length: {len(generated_text)} characters")
153
- logger.info("-" * 40)
154
- logger.info("FULL RESPONSE CONTENT:")
155
- logger.info("-" * 40)
156
- logger.info(generated_text)
157
- logger.info("-" * 40)
158
- logger.info("END OF LLM RESPONSE")
159
- logger.info("=" * 80)
160
- return generated_text
161
- else:
162
- logger.error(f"Unexpected response format: {result}")
163
- return None
164
- elif response.status_code == 503:
165
- # Model is loading, retry with simpler model
166
- logger.warning(f"Model loading (503), trying fallback")
167
- fallback_config = self._get_fallback_model("response_synthesis")
168
-
169
- # FIXED: Ensure task_type is passed in recursive call
170
- return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
171
- else:
172
- logger.error(f"HF API error: {response.status_code} - {response.text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173
  return None
174
 
175
  except ImportError:
 
1
  # llm_router.py - FIXED VERSION
2
  import logging
3
+ import asyncio
4
  from .models_config import LLM_CONFIG
5
 
6
  logger = logging.getLogger(__name__)
 
74
  async def _call_hf_endpoint(self, model_config: dict, prompt: str, task_type: str, **kwargs):
75
  """
76
  FIXED: Make actual call to Hugging Face Chat Completions API
77
+ Uses the correct chat completions protocol with retry logic and exponential backoff
78
 
79
  IMPORTANT: task_type parameter is now properly included in the method signature
80
  """
81
+ # Retry configuration
82
+ max_retries = kwargs.get('max_retries', 3)
83
+ initial_delay = kwargs.get('initial_delay', 1.0) # Start with 1 second
84
+ max_delay = kwargs.get('max_delay', 16.0) # Cap at 16 seconds
85
+ timeout = kwargs.get('timeout', 30)
86
+
87
  try:
88
  import requests
89
+ from requests.exceptions import Timeout, RequestException, ConnectionError as RequestsConnectionError
90
 
91
  model_id = model_config["model_id"]
92
 
 
133
  "Content-Type": "application/json"
134
  }
135
 
136
+ # Retry logic with exponential backoff
137
+ last_exception = None
138
+ for attempt in range(max_retries + 1):
139
+ try:
140
+ if attempt > 0:
141
+ # Calculate exponential backoff delay
142
+ delay = min(initial_delay * (2 ** (attempt - 1)), max_delay)
143
+ logger.warning(f"Retry attempt {attempt}/{max_retries} after {delay:.1f}s delay (exponential backoff)")
144
+ await asyncio.sleep(delay)
 
 
145
 
146
+ logger.info(f"Sending request to: {api_url} (attempt {attempt + 1}/{max_retries + 1})")
147
+ logger.debug(f"Payload: {payload}")
 
148
 
149
+ response = requests.post(api_url, json=payload, headers=headers, timeout=timeout)
 
 
 
 
150
 
151
+ if response.status_code == 200:
152
+ result = response.json()
153
+ logger.debug(f"Raw response: {result}")
154
+
155
+ if 'choices' in result and len(result['choices']) > 0:
156
+ generated_text = result['choices'][0]['message']['content']
157
+
158
+ if not generated_text or generated_text.strip() == "":
159
+ logger.warning(f"Empty or invalid response, using fallback")
160
+ return None
161
+
162
+ if attempt > 0:
163
+ logger.info(f"Successfully retrieved response after {attempt} retry attempts")
164
+
165
+ logger.info(f"HF API returned response (length: {len(generated_text)})")
166
+ logger.info("=" * 80)
167
+ logger.info("COMPLETE LLM API RESPONSE:")
168
+ logger.info("=" * 80)
169
+ logger.info(f"Model: {model_id}")
170
+
171
+ # FIXED: task_type is now properly available
172
+ logger.info(f"Task Type: {task_type}")
173
+ logger.info(f"Response Length: {len(generated_text)} characters")
174
+ logger.info("-" * 40)
175
+ logger.info("FULL RESPONSE CONTENT:")
176
+ logger.info("-" * 40)
177
+ logger.info(generated_text)
178
+ logger.info("-" * 40)
179
+ logger.info("END OF LLM RESPONSE")
180
+ logger.info("=" * 80)
181
+ return generated_text
182
+ else:
183
+ logger.error(f"Unexpected response format: {result}")
184
+ return None
185
+ elif response.status_code == 503:
186
+ # Model is loading - this is retryable
187
+ if attempt < max_retries:
188
+ logger.warning(f"Model loading (503), will retry (attempt {attempt + 1}/{max_retries + 1})")
189
+ last_exception = Exception(f"Model loading (503)")
190
+ continue
191
+ else:
192
+ # After max retries, try fallback model
193
+ logger.warning(f"Model loading (503) after {max_retries} retries, trying fallback model")
194
+ fallback_config = self._get_fallback_model(task_type)
195
+
196
+ # FIXED: Ensure task_type is passed in recursive call
197
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
198
+ else:
199
+ # Non-retryable HTTP errors
200
+ logger.error(f"HF API error: {response.status_code} - {response.text}")
201
+ return None
202
+
203
+ except Timeout as e:
204
+ last_exception = e
205
+ if attempt < max_retries:
206
+ logger.warning(f"Request timeout (attempt {attempt + 1}/{max_retries + 1}): {str(e)}")
207
+ continue
208
+ else:
209
+ logger.error(f"Request timeout after {max_retries} retries: {str(e)}")
210
+ # Try fallback model on final timeout
211
+ logger.warning("Attempting fallback model due to persistent timeout")
212
+ fallback_config = self._get_fallback_model(task_type)
213
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
214
+
215
+ except (RequestsConnectionError, RequestException) as e:
216
+ last_exception = e
217
+ if attempt < max_retries:
218
+ logger.warning(f"Connection error (attempt {attempt + 1}/{max_retries + 1}): {str(e)}")
219
+ continue
220
+ else:
221
+ logger.error(f"Connection error after {max_retries} retries: {str(e)}")
222
+ # Try fallback model on final connection error
223
+ logger.warning("Attempting fallback model due to persistent connection error")
224
+ fallback_config = self._get_fallback_model(task_type)
225
+ return await self._call_hf_endpoint(fallback_config, prompt, task_type, **kwargs)
226
+
227
+ # If we exhausted all retries and didn't return
228
+ if last_exception:
229
+ logger.error(f"Failed after {max_retries} retries. Last error: {last_exception}")
230
  return None
231
 
232
  except ImportError:
src/orchestrator_engine.py CHANGED
@@ -412,20 +412,359 @@ This response has been flagged for potential safety concerns:
412
  async def _create_execution_plan(self, intent_result: dict, context: dict) -> dict:
413
  """
414
  Create execution plan based on intent recognition
 
415
  """
416
- # TODO: Implement agent selection and sequencing logic
417
- return {
418
- "agents_to_execute": [],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
419
  "execution_order": "parallel",
420
  "priority": "normal"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
  }
422
 
423
  async def _execute_agents(self, execution_plan: dict, user_input: str, context: dict) -> dict:
424
  """
425
  Execute agents in parallel or sequential order based on plan
 
426
  """
427
- # TODO: Implement parallel/sequential agent execution
428
- return {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
429
 
430
  def _format_final_output(self, response: dict, interaction_id: str, additional_metadata: dict = None) -> dict:
431
  """
 
412
  async def _create_execution_plan(self, intent_result: dict, context: dict) -> dict:
413
  """
414
  Create execution plan based on intent recognition
415
+ Maps intent types to specific execution tasks
416
  """
417
+ primary_intent = intent_result.get('primary_intent', 'casual_conversation')
418
+ secondary_intents = intent_result.get('secondary_intents', [])
419
+ confidence = intent_result.get('confidence_scores', {}).get(primary_intent, 0.7)
420
+
421
+ # Map intent types to execution tasks
422
+ intent_task_mapping = {
423
+ "information_request": {
424
+ "tasks": ["information_gathering", "content_research"],
425
+ "execution_order": "sequential",
426
+ "priority": "high"
427
+ },
428
+ "task_execution": {
429
+ "tasks": ["task_planning", "execution_strategy"],
430
+ "execution_order": "sequential",
431
+ "priority": "high"
432
+ },
433
+ "creative_generation": {
434
+ "tasks": ["creative_brainstorming", "content_ideation"],
435
+ "execution_order": "parallel",
436
+ "priority": "normal"
437
+ },
438
+ "analysis_research": {
439
+ "tasks": ["research_analysis", "data_collection", "pattern_identification"],
440
+ "execution_order": "sequential",
441
+ "priority": "high"
442
+ },
443
+ "troubleshooting": {
444
+ "tasks": ["problem_analysis", "solution_research"],
445
+ "execution_order": "sequential",
446
+ "priority": "high"
447
+ },
448
+ "education_learning": {
449
+ "tasks": ["curriculum_planning", "educational_content"],
450
+ "execution_order": "sequential",
451
+ "priority": "normal"
452
+ },
453
+ "technical_support": {
454
+ "tasks": ["technical_research", "guidance_generation"],
455
+ "execution_order": "sequential",
456
+ "priority": "high"
457
+ },
458
+ "casual_conversation": {
459
+ "tasks": ["context_enrichment"],
460
+ "execution_order": "parallel",
461
+ "priority": "low"
462
+ }
463
+ }
464
+
465
+ # Get task plan for primary intent
466
+ plan = intent_task_mapping.get(primary_intent, {
467
+ "tasks": ["general_research"],
468
  "execution_order": "parallel",
469
  "priority": "normal"
470
+ })
471
+
472
+ # Add secondary intent tasks if confidence is high
473
+ if confidence > 0.7 and secondary_intents:
474
+ for secondary_intent in secondary_intents[:2]: # Limit to 2 secondary intents
475
+ secondary_plan = intent_task_mapping.get(secondary_intent)
476
+ if secondary_plan:
477
+ # Merge tasks, avoiding duplicates
478
+ existing_tasks = set(plan["tasks"])
479
+ for task in secondary_plan["tasks"]:
480
+ if task not in existing_tasks:
481
+ plan["tasks"].append(task)
482
+ existing_tasks.add(task)
483
+
484
+ logger.info(f"Execution plan created for intent '{primary_intent}': {len(plan['tasks'])} tasks, order={plan['execution_order']}")
485
+
486
+ return {
487
+ "agents_to_execute": plan["tasks"],
488
+ "execution_order": plan["execution_order"],
489
+ "priority": plan["priority"],
490
+ "primary_intent": primary_intent,
491
+ "secondary_intents": secondary_intents
492
  }
493
 
494
  async def _execute_agents(self, execution_plan: dict, user_input: str, context: dict) -> dict:
495
  """
496
  Execute agents in parallel or sequential order based on plan
497
+ Actually executes task-specific LLM calls based on intent
498
  """
499
+ tasks = execution_plan.get("agents_to_execute", [])
500
+ execution_order = execution_plan.get("execution_order", "parallel")
501
+ primary_intent = execution_plan.get("primary_intent", "casual_conversation")
502
+
503
+ if not tasks:
504
+ logger.warning("No tasks to execute in execution plan")
505
+ return {}
506
+
507
+ logger.info(f"Executing {len(tasks)} tasks in {execution_order} order for intent '{primary_intent}'")
508
+
509
+ results = {}
510
+
511
+ # Build context summary for task execution
512
+ context_summary = self._build_context_summary(context)
513
+
514
+ # Task prompt templates
515
+ task_prompts = self._build_task_prompts(user_input, context_summary, primary_intent)
516
+
517
+ if execution_order == "parallel":
518
+ # Execute all tasks in parallel
519
+ task_coroutines = []
520
+ for task in tasks:
521
+ if task in task_prompts:
522
+ coro = self._execute_single_task(task, task_prompts[task])
523
+ task_coroutines.append((task, coro))
524
+ else:
525
+ logger.warning(f"No prompt template for task: {task}")
526
+
527
+ # Execute all tasks concurrently
528
+ if task_coroutines:
529
+ task_results = await asyncio.gather(
530
+ *[coro for _, coro in task_coroutines],
531
+ return_exceptions=True
532
+ )
533
+
534
+ # Map results back to task names
535
+ for (task, _), result in zip(task_coroutines, task_results):
536
+ if isinstance(result, Exception):
537
+ logger.error(f"Task {task} failed: {result}")
538
+ results[task] = {"error": str(result), "status": "failed"}
539
+ else:
540
+ results[task] = result
541
+ logger.info(f"Task {task} completed: {len(str(result))} chars")
542
+ else:
543
+ # Execute tasks sequentially
544
+ previous_results = {}
545
+ for task in tasks:
546
+ if task in task_prompts:
547
+ # Pass previous results to sequential tasks for context
548
+ enhanced_prompt = task_prompts[task]
549
+ if previous_results:
550
+ enhanced_prompt += f"\n\nPrevious task results: {str(previous_results)}"
551
+
552
+ try:
553
+ result = await self._execute_single_task(task, enhanced_prompt)
554
+ results[task] = result
555
+ previous_results[task] = result
556
+ logger.info(f"Task {task} completed: {len(str(result))} chars")
557
+ except Exception as e:
558
+ logger.error(f"Task {task} failed: {e}")
559
+ results[task] = {"error": str(e), "status": "failed"}
560
+ previous_results[task] = results[task]
561
+ else:
562
+ logger.warning(f"No prompt template for task: {task}")
563
+
564
+ logger.info(f"Agent execution complete: {len(results)} results collected")
565
+ return results
566
+
567
+ def _build_context_summary(self, context: dict) -> str:
568
+ """Build a concise summary of context for task execution"""
569
+ summary_parts = []
570
+
571
+ # Extract interaction contexts
572
+ interaction_contexts = context.get('interaction_contexts', [])
573
+ if interaction_contexts:
574
+ recent_summaries = [ic.get('summary', '') for ic in interaction_contexts[-3:]]
575
+ if recent_summaries:
576
+ summary_parts.append(f"Recent conversation topics: {', '.join(recent_summaries)}")
577
+
578
+ # Extract user context
579
+ user_context = context.get('user_context', '')
580
+ if user_context:
581
+ summary_parts.append(f"User background: {user_context[:200]}")
582
+
583
+ return " | ".join(summary_parts) if summary_parts else "No prior context"
584
+
585
+ def _build_task_prompts(self, user_input: str, context_summary: str, primary_intent: str) -> dict:
586
+ """Build task-specific prompts for execution"""
587
+
588
+ base_context = f"User Query: {user_input}\nContext: {context_summary}"
589
+
590
+ prompts = {
591
+ "information_gathering": f"""
592
+ {base_context}
593
+
594
+ Task: Gather comprehensive, accurate information relevant to the user's query.
595
+ Focus on facts, definitions, explanations, and verified information.
596
+ Structure the information clearly and cite key points.
597
+ """,
598
+
599
+ "content_research": f"""
600
+ {base_context}
601
+
602
+ Task: Research and compile detailed content about the topic.
603
+ Include multiple perspectives, current information, and relevant examples.
604
+ Organize findings logically with clear sections.
605
+ """,
606
+
607
+ "task_planning": f"""
608
+ {base_context}
609
+
610
+ Task: Create a detailed execution plan for the requested task.
611
+ Break down into clear steps, identify requirements, and outline expected outcomes.
612
+ Consider potential challenges and solutions.
613
+ """,
614
+
615
+ "execution_strategy": f"""
616
+ {base_context}
617
+
618
+ Task: Develop a strategic approach for task execution.
619
+ Define methodology, best practices, and implementation considerations.
620
+ Provide actionable guidance with clear priorities.
621
+ """,
622
+
623
+ "creative_brainstorming": f"""
624
+ {base_context}
625
+
626
+ Task: Generate creative ideas and approaches for content creation.
627
+ Explore different angles, styles, and formats.
628
+ Provide diverse creative options with implementation suggestions.
629
+ """,
630
+
631
+ "content_ideation": f"""
632
+ {base_context}
633
+
634
+ Task: Develop content concepts and detailed ideation.
635
+ Create outlines, themes, and structural frameworks.
636
+ Suggest variations and refinement paths.
637
+ """,
638
+
639
+ "research_analysis": f"""
640
+ {base_context}
641
+
642
+ Task: Conduct thorough research analysis on the topic.
643
+ Identify key findings, trends, patterns, and insights.
644
+ Analyze different perspectives and methodologies.
645
+ """,
646
+
647
+ "data_collection": f"""
648
+ {base_context}
649
+
650
+ Task: Collect and organize relevant data points and evidence.
651
+ Gather statistics, examples, case studies, and supporting information.
652
+ Structure data for easy analysis and reference.
653
+ """,
654
+
655
+ "pattern_identification": f"""
656
+ {base_context}
657
+
658
+ Task: Identify patterns, correlations, and significant relationships.
659
+ Analyze trends, cause-effect relationships, and underlying structures.
660
+ Provide insights based on pattern recognition.
661
+ """,
662
+
663
+ "problem_analysis": f"""
664
+ {base_context}
665
+
666
+ Task: Analyze the problem in detail.
667
+ Identify root causes, contributing factors, and constraints.
668
+ Break down the problem into components for systematic resolution.
669
+ """,
670
+
671
+ "solution_research": f"""
672
+ {base_context}
673
+
674
+ Task: Research and evaluate potential solutions.
675
+ Compare approaches, assess pros/cons, and recommend best practices.
676
+ Consider implementation feasibility and effectiveness.
677
+ """,
678
+
679
+ "curriculum_planning": f"""
680
+ {base_context}
681
+
682
+ Task: Design educational curriculum and learning path.
683
+ Structure content progressively, define learning objectives, and suggest resources.
684
+ Create a comprehensive learning framework.
685
+ """,
686
+
687
+ "educational_content": f"""
688
+ {base_context}
689
+
690
+ Task: Generate educational content with clear explanations.
691
+ Use teaching methods, examples, analogies, and progressive complexity.
692
+ Make content accessible and engaging for learning.
693
+ """,
694
+
695
+ "technical_research": f"""
696
+ {base_context}
697
+
698
+ Task: Research technical aspects and solutions.
699
+ Gather technical documentation, best practices, and implementation details.
700
+ Structure technical information clearly with practical guidance.
701
+ """,
702
+
703
+ "guidance_generation": f"""
704
+ {base_context}
705
+
706
+ Task: Generate step-by-step guidance and instructions.
707
+ Create clear, actionable steps with explanations and troubleshooting tips.
708
+ Ensure guidance is comprehensive and easy to follow.
709
+ """,
710
+
711
+ "context_enrichment": f"""
712
+ {base_context}
713
+
714
+ Task: Enrich the conversation with relevant context and insights.
715
+ Add helpful background information, connections to previous topics, and engaging details.
716
+ Enhance understanding and engagement.
717
+ """,
718
+
719
+ "general_research": f"""
720
+ {base_context}
721
+
722
+ Task: Conduct general research and information gathering.
723
+ Compile relevant information, insights, and useful details about the topic.
724
+ Organize findings for clear presentation.
725
+ """
726
+ }
727
+
728
+ return prompts
729
+
730
+ async def _execute_single_task(self, task_name: str, prompt: str) -> dict:
731
+ """Execute a single task using the LLM router"""
732
+ try:
733
+ logger.debug(f"Executing task: {task_name}")
734
+ logger.debug(f"Task prompt length: {len(prompt)}")
735
+
736
+ # Use general reasoning for task execution
737
+ result = await self.llm_router.route_inference(
738
+ task_type="general_reasoning",
739
+ prompt=prompt,
740
+ max_tokens=2000,
741
+ temperature=0.7
742
+ )
743
+
744
+ if result:
745
+ return {
746
+ "task": task_name,
747
+ "status": "completed",
748
+ "content": result,
749
+ "content_length": len(str(result))
750
+ }
751
+ else:
752
+ logger.warning(f"Task {task_name} returned empty result")
753
+ return {
754
+ "task": task_name,
755
+ "status": "empty",
756
+ "content": "",
757
+ "content_length": 0
758
+ }
759
+
760
+ except Exception as e:
761
+ logger.error(f"Error executing task {task_name}: {e}", exc_info=True)
762
+ return {
763
+ "task": task_name,
764
+ "status": "error",
765
+ "error": str(e),
766
+ "content": ""
767
+ }
768
 
769
  def _format_final_output(self, response: dict, interaction_id: str, additional_metadata: dict = None) -> dict:
770
  """