Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -131,6 +131,7 @@ def RaiseEvent(event: str, data: Any):
|
|
| 131 |
def UnregisterEvent(event: str, handler: Callable):
|
| 132 |
EVENT_MANAGER.unregister(event, handler)
|
| 133 |
|
|
|
|
| 134 |
class LLMAgent:
|
| 135 |
"""Main Agent Driver !
|
| 136 |
Agent For Multiple messages at once ,
|
|
@@ -157,6 +158,7 @@ class LLMAgent:
|
|
| 157 |
self.is_running = False
|
| 158 |
self._stop_event = Event()
|
| 159 |
self.processing_thread = None
|
|
|
|
| 160 |
# Conversation tracking
|
| 161 |
self.conversations: Dict[str, List[LLMMessage]] = {}
|
| 162 |
self.max_history_length = 20
|
|
@@ -166,17 +168,12 @@ class LLMAgent:
|
|
| 166 |
self.max_tokens = max_tokens
|
| 167 |
self.temperature = temperature
|
| 168 |
self.async_client = self.CreateClient(base_url, api_key)
|
|
|
|
|
|
|
|
|
|
| 169 |
# Active requests waiting for responses
|
| 170 |
self.pending_requests: Dict[str, LLMRequest] = {}
|
| 171 |
self.pending_requests_lock = Lock()
|
| 172 |
-
|
| 173 |
-
# Canvas Artifacts - NEW
|
| 174 |
-
self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
|
| 175 |
-
self.canvas_lock = Lock()
|
| 176 |
-
|
| 177 |
-
# Register internal event handlers
|
| 178 |
-
self._register_event_handlers()
|
| 179 |
-
|
| 180 |
# Speech synthesis
|
| 181 |
try:
|
| 182 |
self.tts_engine = pyttsx3.init()
|
|
@@ -184,11 +181,12 @@ class LLMAgent:
|
|
| 184 |
self.speech_enabled = True
|
| 185 |
except Exception as e:
|
| 186 |
console.log(f"[yellow]TTS not available: {e}[/yellow]")
|
| 187 |
-
self.speech_enabled = False
|
| 188 |
-
|
|
|
|
|
|
|
| 189 |
# Start the processing thread immediately
|
| 190 |
self.start()
|
| 191 |
-
|
| 192 |
def setup_tts(self):
|
| 193 |
"""Configure text-to-speech engine"""
|
| 194 |
if hasattr(self, 'tts_engine'):
|
|
@@ -218,18 +216,19 @@ class LLMAgent:
|
|
| 218 |
console.log(f"[red]TTS Error: {e}[/red]")
|
| 219 |
thread = threading.Thread(target=_speak, daemon=True)
|
| 220 |
thread.start()
|
| 221 |
-
|
| 222 |
async def _default_generate(self, messages: List[Dict[str, str]]) -> str:
|
| 223 |
"""Default generate function if none provided"""
|
| 224 |
return await self.openai_generate(messages)
|
| 225 |
-
|
| 226 |
def _register_event_handlers(self):
|
| 227 |
"""Register internal event handlers for response routing"""
|
| 228 |
RegisterEvent("llm_internal_response", self._handle_internal_response)
|
| 229 |
-
|
| 230 |
def _handle_internal_response(self, response: LLMResponse):
|
| 231 |
"""Route responses to the appropriate request handlers"""
|
| 232 |
console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]")
|
|
|
|
| 233 |
request = None
|
| 234 |
with self.pending_requests_lock:
|
| 235 |
if response.request_id in self.pending_requests:
|
|
@@ -239,10 +238,12 @@ class LLMAgent:
|
|
| 239 |
else:
|
| 240 |
console.log(f"No pending request found for: {response.request_id}", style="yellow")
|
| 241 |
return
|
|
|
|
| 242 |
# Raise the specific response event
|
| 243 |
if request.response_event:
|
| 244 |
console.log(f"[bold green]Raising event: {request.response_event}[/bold green]")
|
| 245 |
RaiseEvent(request.response_event, response)
|
|
|
|
| 246 |
# Call callback if provided
|
| 247 |
if request.callback:
|
| 248 |
try:
|
|
@@ -250,30 +251,36 @@ class LLMAgent:
|
|
| 250 |
request.callback(response)
|
| 251 |
except Exception as e:
|
| 252 |
console.log(f"Error in callback: {e}", style="bold red")
|
| 253 |
-
|
| 254 |
def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage):
|
| 255 |
"""Add message to conversation history"""
|
| 256 |
if conversation_id not in self.conversations:
|
| 257 |
self.conversations[conversation_id] = []
|
|
|
|
| 258 |
self.conversations[conversation_id].append(message)
|
|
|
|
| 259 |
# Trim history if too long
|
| 260 |
if len(self.conversations[conversation_id]) > self.max_history_length * 2:
|
| 261 |
self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):]
|
| 262 |
-
|
| 263 |
def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]:
|
| 264 |
"""Build message list from conversation history"""
|
| 265 |
messages = []
|
|
|
|
| 266 |
# Add system prompt
|
| 267 |
if self.system_prompt:
|
| 268 |
messages.append({"role": "system", "content": self.system_prompt})
|
|
|
|
| 269 |
# Add conversation history
|
| 270 |
if conversation_id in self.conversations:
|
| 271 |
for msg in self.conversations[conversation_id][-self.max_history_length:]:
|
| 272 |
messages.append({"role": msg.role, "content": msg.content})
|
|
|
|
| 273 |
# Add the new message
|
| 274 |
messages.append({"role": new_message.role, "content": new_message.content})
|
|
|
|
| 275 |
return messages
|
| 276 |
-
|
| 277 |
def _process_llm_request(self, request: LLMRequest):
|
| 278 |
"""Process a single LLM request"""
|
| 279 |
console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]")
|
|
@@ -283,10 +290,14 @@ class LLMAgent:
|
|
| 283 |
request.message.conversation_id or "default",
|
| 284 |
request.message
|
| 285 |
)
|
|
|
|
| 286 |
console.log(f"Calling LLM with {len(messages)} messages")
|
|
|
|
| 287 |
# Call LLM - Use sync call for thread compatibility
|
| 288 |
response_content = self._call_llm_sync(messages)
|
|
|
|
| 289 |
console.log(f"[bold green]LLM response received: {response_content}...[/bold green]")
|
|
|
|
| 290 |
# Create response message
|
| 291 |
response_message = LLMMessage(
|
| 292 |
role="assistant",
|
|
@@ -294,6 +305,7 @@ class LLMAgent:
|
|
| 294 |
conversation_id=request.message.conversation_id,
|
| 295 |
metadata={"request_id": request.message.message_id}
|
| 296 |
)
|
|
|
|
| 297 |
# Update conversation history
|
| 298 |
self._add_to_conversation_history(
|
| 299 |
request.message.conversation_id or "default",
|
|
@@ -303,14 +315,17 @@ class LLMAgent:
|
|
| 303 |
request.message.conversation_id or "default",
|
| 304 |
response_message
|
| 305 |
)
|
|
|
|
| 306 |
# Create and send response
|
| 307 |
response = LLMResponse(
|
| 308 |
message=response_message,
|
| 309 |
request_id=request.message.message_id,
|
| 310 |
success=True
|
| 311 |
)
|
|
|
|
| 312 |
console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]")
|
| 313 |
RaiseEvent("llm_internal_response", response)
|
|
|
|
| 314 |
except Exception as e:
|
| 315 |
console.log(f"[bold red]Error processing LLM request: {e}[/bold red]")
|
| 316 |
traceback.print_exc()
|
|
@@ -325,8 +340,9 @@ class LLMAgent:
|
|
| 325 |
success=False,
|
| 326 |
error=str(e)
|
| 327 |
)
|
|
|
|
| 328 |
RaiseEvent("llm_internal_response", error_response)
|
| 329 |
-
|
| 330 |
def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str:
|
| 331 |
"""Sync call to the LLM with retry logic"""
|
| 332 |
console.log(f"Making LLM call to {self.model_id}")
|
|
@@ -345,8 +361,8 @@ class LLMAgent:
|
|
| 345 |
console.log(f"LLM call attempt {attempt + 1} failed: {e}")
|
| 346 |
if attempt == self.max_retries - 1:
|
| 347 |
raise e
|
| 348 |
-
|
| 349 |
-
|
| 350 |
def _process_queue(self):
|
| 351 |
"""Main queue processing loop"""
|
| 352 |
console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]")
|
|
@@ -363,7 +379,7 @@ class LLMAgent:
|
|
| 363 |
console.log(f"Error in queue processing: {e}", style="bold red")
|
| 364 |
traceback.print_exc()
|
| 365 |
console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]")
|
| 366 |
-
|
| 367 |
def send_message(
|
| 368 |
self,
|
| 369 |
content: str,
|
|
@@ -376,6 +392,7 @@ class LLMAgent:
|
|
| 376 |
"""Send a message to the LLM and get response via events"""
|
| 377 |
if not self.is_running:
|
| 378 |
raise RuntimeError("LLM Agent is not running. Call start() first.")
|
|
|
|
| 379 |
# Create message
|
| 380 |
message = LLMMessage(
|
| 381 |
role=role,
|
|
@@ -383,16 +400,19 @@ class LLMAgent:
|
|
| 383 |
conversation_id=conversation_id,
|
| 384 |
metadata=metadata or {}
|
| 385 |
)
|
|
|
|
| 386 |
# Create request
|
| 387 |
request = LLMRequest(
|
| 388 |
message=message,
|
| 389 |
response_event=response_event,
|
| 390 |
callback=callback
|
| 391 |
)
|
|
|
|
| 392 |
# Store in pending requests BEFORE adding to queue
|
| 393 |
with self.pending_requests_lock:
|
| 394 |
self.pending_requests[message.message_id] = request
|
| 395 |
console.log(f"Added to pending requests: {message.message_id}")
|
|
|
|
| 396 |
# Add to queue
|
| 397 |
try:
|
| 398 |
self.request_queue.put(request, timeout=5.0)
|
|
@@ -404,7 +424,7 @@ class LLMAgent:
|
|
| 404 |
if message.message_id in self.pending_requests:
|
| 405 |
del self.pending_requests[message.message_id]
|
| 406 |
raise RuntimeError("LLM Agent queue is full")
|
| 407 |
-
|
| 408 |
async def chat(self, messages: List[Dict[str, str]]) -> str:
|
| 409 |
"""
|
| 410 |
Async chat method that sends message via queue and returns response string.
|
|
@@ -413,9 +433,11 @@ class LLMAgent:
|
|
| 413 |
# Create future for the response
|
| 414 |
loop = asyncio.get_event_loop()
|
| 415 |
response_future = loop.create_future()
|
|
|
|
| 416 |
def chat_callback(response: LLMResponse):
|
| 417 |
"""Callback when LLM responds - thread-safe"""
|
| 418 |
console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]")
|
|
|
|
| 419 |
if not response_future.done():
|
| 420 |
if response.success:
|
| 421 |
content = response.message.content
|
|
@@ -428,15 +450,19 @@ class LLMAgent:
|
|
| 428 |
loop.call_soon_threadsafe(response_future.set_result, error_msg)
|
| 429 |
else:
|
| 430 |
console.log(f"[bold red]Future already done, ignoring callback[/bold red]")
|
|
|
|
| 431 |
console.log(f"Sending message to LLM agent...")
|
|
|
|
| 432 |
# Extract the actual message content from the messages list
|
| 433 |
user_message = ""
|
| 434 |
for msg in messages:
|
| 435 |
if msg.get("role") == "user":
|
| 436 |
user_message = msg.get("content", "")
|
| 437 |
break
|
|
|
|
| 438 |
if not user_message.strip():
|
| 439 |
return ""
|
|
|
|
| 440 |
# Send message with callback using the queue system
|
| 441 |
try:
|
| 442 |
message_id = self.send_message(
|
|
@@ -444,12 +470,15 @@ class LLMAgent:
|
|
| 444 |
conversation_id="default",
|
| 445 |
callback=chat_callback
|
| 446 |
)
|
|
|
|
| 447 |
console.log(f"Message sent with ID: {message_id}, waiting for response...")
|
|
|
|
| 448 |
# Wait for the response and return it
|
| 449 |
try:
|
| 450 |
response = await asyncio.wait_for(response_future, timeout=self.timeout)
|
| 451 |
console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]")
|
| 452 |
return response
|
|
|
|
| 453 |
except asyncio.TimeoutError:
|
| 454 |
console.log("[bold red]Response timeout[/bold red]")
|
| 455 |
# Clean up the pending request
|
|
@@ -457,11 +486,12 @@ class LLMAgent:
|
|
| 457 |
if message_id in self.pending_requests:
|
| 458 |
del self.pending_requests[message_id]
|
| 459 |
return "❌ Response timeout - check if LLM server is running"
|
|
|
|
| 460 |
except Exception as e:
|
| 461 |
console.log(f"[bold red]Error sending message: {e}[/bold red]")
|
| 462 |
traceback.print_exc()
|
| 463 |
return f"❌ Error sending message: {e}"
|
| 464 |
-
|
| 465 |
def start(self):
|
| 466 |
"""Start the LLM agent"""
|
| 467 |
if not self.is_running:
|
|
@@ -470,7 +500,7 @@ class LLMAgent:
|
|
| 470 |
self.processing_thread = Thread(target=self._process_queue, daemon=True)
|
| 471 |
self.processing_thread.start()
|
| 472 |
console.log("[bold green]LLM Agent started[/bold green]")
|
| 473 |
-
|
| 474 |
def stop(self):
|
| 475 |
"""Stop the LLM agent"""
|
| 476 |
console.log("Stopping LLM Agent...")
|
|
@@ -479,19 +509,20 @@ class LLMAgent:
|
|
| 479 |
self.processing_thread.join(timeout=10)
|
| 480 |
self.is_running = False
|
| 481 |
console.log("LLM Agent stopped")
|
| 482 |
-
|
| 483 |
def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]:
|
| 484 |
"""Get conversation history"""
|
| 485 |
return self.conversations.get(conversation_id, [])[:]
|
| 486 |
-
|
| 487 |
def clear_conversation(self, conversation_id: str = "default"):
|
| 488 |
"""Clear conversation history"""
|
| 489 |
if conversation_id in self.conversations:
|
| 490 |
del self.conversations[conversation_id]
|
| 491 |
|
|
|
|
| 492 |
async def _chat(self, messages: List[Dict[str, str]]) -> str:
|
| 493 |
return await self._generate(messages)
|
| 494 |
-
|
| 495 |
@staticmethod
|
| 496 |
async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = BASEMODEL_ID,tools=None) -> str:
|
| 497 |
"""Static method for generating responses using OpenAI API"""
|
|
@@ -508,7 +539,7 @@ class LLMAgent:
|
|
| 508 |
except Exception as e:
|
| 509 |
console.log(f"[bold red]Error in openai_generate: {e}[/bold red]")
|
| 510 |
return f"[LLM_Agent Error - openai_generate: {str(e)}]"
|
| 511 |
-
|
| 512 |
async def _call_(self, messages: List[Dict[str, str]]) -> str:
|
| 513 |
"""Internal call method using instance client"""
|
| 514 |
try:
|
|
@@ -523,7 +554,7 @@ class LLMAgent:
|
|
| 523 |
except Exception as e:
|
| 524 |
console.log(f"[bold red]Error in _call_: {e}[/bold red]")
|
| 525 |
return f"[LLM_Agent Error - _call_: {str(e)}]"
|
| 526 |
-
|
| 527 |
@staticmethod
|
| 528 |
def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI:
|
| 529 |
'''Create async OpenAI Client required for multi tasking'''
|
|
@@ -531,7 +562,7 @@ class LLMAgent:
|
|
| 531 |
base_url=base_url,
|
| 532 |
api_key=api_key
|
| 533 |
)
|
| 534 |
-
|
| 535 |
@staticmethod
|
| 536 |
async def fetch_available_models(base_url: str, api_key: str) -> List[str]:
|
| 537 |
"""Fetches available models from the OpenAI API."""
|
|
@@ -543,20 +574,21 @@ class LLMAgent:
|
|
| 543 |
except Exception as e:
|
| 544 |
console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]")
|
| 545 |
return ["LLM_Agent Error fetching models"]
|
| 546 |
-
|
| 547 |
def get_models(self) -> List[str]:
|
| 548 |
"""Get available models using instance credentials"""
|
| 549 |
return asyncio.run(self.fetch_available_models(self.base_url, self.api_key))
|
|
|
|
| 550 |
|
| 551 |
def get_queue_size(self) -> int:
|
| 552 |
"""Get current queue size"""
|
| 553 |
return self.request_queue.qsize()
|
| 554 |
-
|
| 555 |
def get_pending_requests_count(self) -> int:
|
| 556 |
"""Get number of pending requests"""
|
| 557 |
with self.pending_requests_lock:
|
| 558 |
return len(self.pending_requests)
|
| 559 |
-
|
| 560 |
def get_status(self) -> Dict[str, Any]:
|
| 561 |
"""Get agent status information"""
|
| 562 |
return {
|
|
@@ -625,6 +657,376 @@ class LLMAgent:
|
|
| 625 |
|
| 626 |
return await self.chat(messages)
|
| 627 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 628 |
|
| 629 |
console = Console()
|
| 630 |
|
|
|
|
| 131 |
def UnregisterEvent(event: str, handler: Callable):
|
| 132 |
EVENT_MANAGER.unregister(event, handler)
|
| 133 |
|
| 134 |
+
|
| 135 |
class LLMAgent:
|
| 136 |
"""Main Agent Driver !
|
| 137 |
Agent For Multiple messages at once ,
|
|
|
|
| 158 |
self.is_running = False
|
| 159 |
self._stop_event = Event()
|
| 160 |
self.processing_thread = None
|
| 161 |
+
|
| 162 |
# Conversation tracking
|
| 163 |
self.conversations: Dict[str, List[LLMMessage]] = {}
|
| 164 |
self.max_history_length = 20
|
|
|
|
| 168 |
self.max_tokens = max_tokens
|
| 169 |
self.temperature = temperature
|
| 170 |
self.async_client = self.CreateClient(base_url, api_key)
|
| 171 |
+
# Canvas Artifacts - NEW
|
| 172 |
+
self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
|
| 173 |
+
self.canvas_lock = Lock()
|
| 174 |
# Active requests waiting for responses
|
| 175 |
self.pending_requests: Dict[str, LLMRequest] = {}
|
| 176 |
self.pending_requests_lock = Lock()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
# Speech synthesis
|
| 178 |
try:
|
| 179 |
self.tts_engine = pyttsx3.init()
|
|
|
|
| 181 |
self.speech_enabled = True
|
| 182 |
except Exception as e:
|
| 183 |
console.log(f"[yellow]TTS not available: {e}[/yellow]")
|
| 184 |
+
self.speech_enabled = False
|
| 185 |
+
# Register internal event handlers
|
| 186 |
+
self._register_event_handlers()
|
| 187 |
+
|
| 188 |
# Start the processing thread immediately
|
| 189 |
self.start()
|
|
|
|
| 190 |
def setup_tts(self):
|
| 191 |
"""Configure text-to-speech engine"""
|
| 192 |
if hasattr(self, 'tts_engine'):
|
|
|
|
| 216 |
console.log(f"[red]TTS Error: {e}[/red]")
|
| 217 |
thread = threading.Thread(target=_speak, daemon=True)
|
| 218 |
thread.start()
|
| 219 |
+
|
| 220 |
async def _default_generate(self, messages: List[Dict[str, str]]) -> str:
|
| 221 |
"""Default generate function if none provided"""
|
| 222 |
return await self.openai_generate(messages)
|
| 223 |
+
|
| 224 |
def _register_event_handlers(self):
|
| 225 |
"""Register internal event handlers for response routing"""
|
| 226 |
RegisterEvent("llm_internal_response", self._handle_internal_response)
|
| 227 |
+
|
| 228 |
def _handle_internal_response(self, response: LLMResponse):
|
| 229 |
"""Route responses to the appropriate request handlers"""
|
| 230 |
console.log(f"[bold cyan]Handling internal response for: {response.request_id}[/bold cyan]")
|
| 231 |
+
|
| 232 |
request = None
|
| 233 |
with self.pending_requests_lock:
|
| 234 |
if response.request_id in self.pending_requests:
|
|
|
|
| 238 |
else:
|
| 239 |
console.log(f"No pending request found for: {response.request_id}", style="yellow")
|
| 240 |
return
|
| 241 |
+
|
| 242 |
# Raise the specific response event
|
| 243 |
if request.response_event:
|
| 244 |
console.log(f"[bold green]Raising event: {request.response_event}[/bold green]")
|
| 245 |
RaiseEvent(request.response_event, response)
|
| 246 |
+
|
| 247 |
# Call callback if provided
|
| 248 |
if request.callback:
|
| 249 |
try:
|
|
|
|
| 251 |
request.callback(response)
|
| 252 |
except Exception as e:
|
| 253 |
console.log(f"Error in callback: {e}", style="bold red")
|
| 254 |
+
|
| 255 |
def _add_to_conversation_history(self, conversation_id: str, message: LLMMessage):
|
| 256 |
"""Add message to conversation history"""
|
| 257 |
if conversation_id not in self.conversations:
|
| 258 |
self.conversations[conversation_id] = []
|
| 259 |
+
|
| 260 |
self.conversations[conversation_id].append(message)
|
| 261 |
+
|
| 262 |
# Trim history if too long
|
| 263 |
if len(self.conversations[conversation_id]) > self.max_history_length * 2:
|
| 264 |
self.conversations[conversation_id] = self.conversations[conversation_id][-(self.max_history_length * 2):]
|
| 265 |
+
|
| 266 |
def _build_messages_from_conversation(self, conversation_id: str, new_message: LLMMessage) -> List[Dict[str, str]]:
|
| 267 |
"""Build message list from conversation history"""
|
| 268 |
messages = []
|
| 269 |
+
|
| 270 |
# Add system prompt
|
| 271 |
if self.system_prompt:
|
| 272 |
messages.append({"role": "system", "content": self.system_prompt})
|
| 273 |
+
|
| 274 |
# Add conversation history
|
| 275 |
if conversation_id in self.conversations:
|
| 276 |
for msg in self.conversations[conversation_id][-self.max_history_length:]:
|
| 277 |
messages.append({"role": msg.role, "content": msg.content})
|
| 278 |
+
|
| 279 |
# Add the new message
|
| 280 |
messages.append({"role": new_message.role, "content": new_message.content})
|
| 281 |
+
|
| 282 |
return messages
|
| 283 |
+
|
| 284 |
def _process_llm_request(self, request: LLMRequest):
|
| 285 |
"""Process a single LLM request"""
|
| 286 |
console.log(f"[bold green]Processing LLM request: {request.message.message_id}[/bold green]")
|
|
|
|
| 290 |
request.message.conversation_id or "default",
|
| 291 |
request.message
|
| 292 |
)
|
| 293 |
+
|
| 294 |
console.log(f"Calling LLM with {len(messages)} messages")
|
| 295 |
+
|
| 296 |
# Call LLM - Use sync call for thread compatibility
|
| 297 |
response_content = self._call_llm_sync(messages)
|
| 298 |
+
|
| 299 |
console.log(f"[bold green]LLM response received: {response_content}...[/bold green]")
|
| 300 |
+
|
| 301 |
# Create response message
|
| 302 |
response_message = LLMMessage(
|
| 303 |
role="assistant",
|
|
|
|
| 305 |
conversation_id=request.message.conversation_id,
|
| 306 |
metadata={"request_id": request.message.message_id}
|
| 307 |
)
|
| 308 |
+
|
| 309 |
# Update conversation history
|
| 310 |
self._add_to_conversation_history(
|
| 311 |
request.message.conversation_id or "default",
|
|
|
|
| 315 |
request.message.conversation_id or "default",
|
| 316 |
response_message
|
| 317 |
)
|
| 318 |
+
|
| 319 |
# Create and send response
|
| 320 |
response = LLMResponse(
|
| 321 |
message=response_message,
|
| 322 |
request_id=request.message.message_id,
|
| 323 |
success=True
|
| 324 |
)
|
| 325 |
+
|
| 326 |
console.log(f"[bold blue]Sending internal response for: {request.message.message_id}[/bold blue]")
|
| 327 |
RaiseEvent("llm_internal_response", response)
|
| 328 |
+
|
| 329 |
except Exception as e:
|
| 330 |
console.log(f"[bold red]Error processing LLM request: {e}[/bold red]")
|
| 331 |
traceback.print_exc()
|
|
|
|
| 340 |
success=False,
|
| 341 |
error=str(e)
|
| 342 |
)
|
| 343 |
+
|
| 344 |
RaiseEvent("llm_internal_response", error_response)
|
| 345 |
+
|
| 346 |
def _call_llm_sync(self, messages: List[Dict[str, str]]) -> str:
|
| 347 |
"""Sync call to the LLM with retry logic"""
|
| 348 |
console.log(f"Making LLM call to {self.model_id}")
|
|
|
|
| 361 |
console.log(f"LLM call attempt {attempt + 1} failed: {e}")
|
| 362 |
if attempt == self.max_retries - 1:
|
| 363 |
raise e
|
| 364 |
+
# Wait before retry
|
| 365 |
+
|
| 366 |
def _process_queue(self):
|
| 367 |
"""Main queue processing loop"""
|
| 368 |
console.log("[bold cyan]LLM Agent queue processor started[/bold cyan]")
|
|
|
|
| 379 |
console.log(f"Error in queue processing: {e}", style="bold red")
|
| 380 |
traceback.print_exc()
|
| 381 |
console.log("[bold cyan]LLM Agent queue processor stopped[/bold cyan]")
|
| 382 |
+
|
| 383 |
def send_message(
|
| 384 |
self,
|
| 385 |
content: str,
|
|
|
|
| 392 |
"""Send a message to the LLM and get response via events"""
|
| 393 |
if not self.is_running:
|
| 394 |
raise RuntimeError("LLM Agent is not running. Call start() first.")
|
| 395 |
+
|
| 396 |
# Create message
|
| 397 |
message = LLMMessage(
|
| 398 |
role=role,
|
|
|
|
| 400 |
conversation_id=conversation_id,
|
| 401 |
metadata=metadata or {}
|
| 402 |
)
|
| 403 |
+
|
| 404 |
# Create request
|
| 405 |
request = LLMRequest(
|
| 406 |
message=message,
|
| 407 |
response_event=response_event,
|
| 408 |
callback=callback
|
| 409 |
)
|
| 410 |
+
|
| 411 |
# Store in pending requests BEFORE adding to queue
|
| 412 |
with self.pending_requests_lock:
|
| 413 |
self.pending_requests[message.message_id] = request
|
| 414 |
console.log(f"Added to pending requests: {message.message_id}")
|
| 415 |
+
|
| 416 |
# Add to queue
|
| 417 |
try:
|
| 418 |
self.request_queue.put(request, timeout=5.0)
|
|
|
|
| 424 |
if message.message_id in self.pending_requests:
|
| 425 |
del self.pending_requests[message.message_id]
|
| 426 |
raise RuntimeError("LLM Agent queue is full")
|
| 427 |
+
|
| 428 |
async def chat(self, messages: List[Dict[str, str]]) -> str:
|
| 429 |
"""
|
| 430 |
Async chat method that sends message via queue and returns response string.
|
|
|
|
| 433 |
# Create future for the response
|
| 434 |
loop = asyncio.get_event_loop()
|
| 435 |
response_future = loop.create_future()
|
| 436 |
+
|
| 437 |
def chat_callback(response: LLMResponse):
|
| 438 |
"""Callback when LLM responds - thread-safe"""
|
| 439 |
console.log(f"[bold yellow]✓ CHAT CALLBACK TRIGGERED![/bold yellow]")
|
| 440 |
+
|
| 441 |
if not response_future.done():
|
| 442 |
if response.success:
|
| 443 |
content = response.message.content
|
|
|
|
| 450 |
loop.call_soon_threadsafe(response_future.set_result, error_msg)
|
| 451 |
else:
|
| 452 |
console.log(f"[bold red]Future already done, ignoring callback[/bold red]")
|
| 453 |
+
|
| 454 |
console.log(f"Sending message to LLM agent...")
|
| 455 |
+
|
| 456 |
# Extract the actual message content from the messages list
|
| 457 |
user_message = ""
|
| 458 |
for msg in messages:
|
| 459 |
if msg.get("role") == "user":
|
| 460 |
user_message = msg.get("content", "")
|
| 461 |
break
|
| 462 |
+
|
| 463 |
if not user_message.strip():
|
| 464 |
return ""
|
| 465 |
+
|
| 466 |
# Send message with callback using the queue system
|
| 467 |
try:
|
| 468 |
message_id = self.send_message(
|
|
|
|
| 470 |
conversation_id="default",
|
| 471 |
callback=chat_callback
|
| 472 |
)
|
| 473 |
+
|
| 474 |
console.log(f"Message sent with ID: {message_id}, waiting for response...")
|
| 475 |
+
|
| 476 |
# Wait for the response and return it
|
| 477 |
try:
|
| 478 |
response = await asyncio.wait_for(response_future, timeout=self.timeout)
|
| 479 |
console.log(f"[bold green]✓ Chat complete! Response length: {len(response)}[/bold green]")
|
| 480 |
return response
|
| 481 |
+
|
| 482 |
except asyncio.TimeoutError:
|
| 483 |
console.log("[bold red]Response timeout[/bold red]")
|
| 484 |
# Clean up the pending request
|
|
|
|
| 486 |
if message_id in self.pending_requests:
|
| 487 |
del self.pending_requests[message_id]
|
| 488 |
return "❌ Response timeout - check if LLM server is running"
|
| 489 |
+
|
| 490 |
except Exception as e:
|
| 491 |
console.log(f"[bold red]Error sending message: {e}[/bold red]")
|
| 492 |
traceback.print_exc()
|
| 493 |
return f"❌ Error sending message: {e}"
|
| 494 |
+
|
| 495 |
def start(self):
|
| 496 |
"""Start the LLM agent"""
|
| 497 |
if not self.is_running:
|
|
|
|
| 500 |
self.processing_thread = Thread(target=self._process_queue, daemon=True)
|
| 501 |
self.processing_thread.start()
|
| 502 |
console.log("[bold green]LLM Agent started[/bold green]")
|
| 503 |
+
|
| 504 |
def stop(self):
|
| 505 |
"""Stop the LLM agent"""
|
| 506 |
console.log("Stopping LLM Agent...")
|
|
|
|
| 509 |
self.processing_thread.join(timeout=10)
|
| 510 |
self.is_running = False
|
| 511 |
console.log("LLM Agent stopped")
|
| 512 |
+
|
| 513 |
def get_conversation_history(self, conversation_id: str = "default") -> List[LLMMessage]:
|
| 514 |
"""Get conversation history"""
|
| 515 |
return self.conversations.get(conversation_id, [])[:]
|
| 516 |
+
|
| 517 |
def clear_conversation(self, conversation_id: str = "default"):
|
| 518 |
"""Clear conversation history"""
|
| 519 |
if conversation_id in self.conversations:
|
| 520 |
del self.conversations[conversation_id]
|
| 521 |
|
| 522 |
+
|
| 523 |
async def _chat(self, messages: List[Dict[str, str]]) -> str:
|
| 524 |
return await self._generate(messages)
|
| 525 |
+
|
| 526 |
@staticmethod
|
| 527 |
async def openai_generate(messages: List[Dict[str, str]], max_tokens: int = 8096, temperature: float = 0.4, model: str = BASEMODEL_ID,tools=None) -> str:
|
| 528 |
"""Static method for generating responses using OpenAI API"""
|
|
|
|
| 539 |
except Exception as e:
|
| 540 |
console.log(f"[bold red]Error in openai_generate: {e}[/bold red]")
|
| 541 |
return f"[LLM_Agent Error - openai_generate: {str(e)}]"
|
| 542 |
+
|
| 543 |
async def _call_(self, messages: List[Dict[str, str]]) -> str:
|
| 544 |
"""Internal call method using instance client"""
|
| 545 |
try:
|
|
|
|
| 554 |
except Exception as e:
|
| 555 |
console.log(f"[bold red]Error in _call_: {e}[/bold red]")
|
| 556 |
return f"[LLM_Agent Error - _call_: {str(e)}]"
|
| 557 |
+
|
| 558 |
@staticmethod
|
| 559 |
def CreateClient(base_url: str, api_key: str) -> AsyncOpenAI:
|
| 560 |
'''Create async OpenAI Client required for multi tasking'''
|
|
|
|
| 562 |
base_url=base_url,
|
| 563 |
api_key=api_key
|
| 564 |
)
|
| 565 |
+
|
| 566 |
@staticmethod
|
| 567 |
async def fetch_available_models(base_url: str, api_key: str) -> List[str]:
|
| 568 |
"""Fetches available models from the OpenAI API."""
|
|
|
|
| 574 |
except Exception as e:
|
| 575 |
console.log(f"[bold red]LLM_Agent Error fetching models: {e}[/bold red]")
|
| 576 |
return ["LLM_Agent Error fetching models"]
|
| 577 |
+
|
| 578 |
def get_models(self) -> List[str]:
|
| 579 |
"""Get available models using instance credentials"""
|
| 580 |
return asyncio.run(self.fetch_available_models(self.base_url, self.api_key))
|
| 581 |
+
|
| 582 |
|
| 583 |
def get_queue_size(self) -> int:
|
| 584 |
"""Get current queue size"""
|
| 585 |
return self.request_queue.qsize()
|
| 586 |
+
|
| 587 |
def get_pending_requests_count(self) -> int:
|
| 588 |
"""Get number of pending requests"""
|
| 589 |
with self.pending_requests_lock:
|
| 590 |
return len(self.pending_requests)
|
| 591 |
+
|
| 592 |
def get_status(self) -> Dict[str, Any]:
|
| 593 |
"""Get agent status information"""
|
| 594 |
return {
|
|
|
|
| 657 |
|
| 658 |
return await self.chat(messages)
|
| 659 |
|
| 660 |
+
|
| 661 |
+
class AI_Agent:
|
| 662 |
+
def __init__(self, model_id: str, system_prompt: str = "You are a helpful assistant. Respond concisely in 1-2 sentences.", history: List[Dict] = None):
|
| 663 |
+
self.model_id = model_id
|
| 664 |
+
self.system_prompt = system_prompt
|
| 665 |
+
self.history = history or []
|
| 666 |
+
self.conversation_id = f"conv_{uuid.uuid4().hex[:8]}"
|
| 667 |
+
|
| 668 |
+
# Create agent instance
|
| 669 |
+
self.client = LLMAgent(
|
| 670 |
+
model_id=model_id,
|
| 671 |
+
system_prompt=self.system_prompt,
|
| 672 |
+
generate_fn=LLMAgent.openai_generate
|
| 673 |
+
)
|
| 674 |
+
|
| 675 |
+
console.log(f"[bold green]✓ MyAgent initialized with model: {model_id}[/bold green]")
|
| 676 |
+
|
| 677 |
+
async def call_llm(self, messages: List[Dict], use_history: bool = True) -> str:
|
| 678 |
+
"""
|
| 679 |
+
Send messages to LLM and get response
|
| 680 |
+
Args:
|
| 681 |
+
messages: List of message dicts with 'role' and 'content'
|
| 682 |
+
use_history: Whether to include conversation history
|
| 683 |
+
Returns:
|
| 684 |
+
str: LLM response
|
| 685 |
+
"""
|
| 686 |
+
try:
|
| 687 |
+
console.log(f"[bold yellow]Sending {len(messages)} messages to LLM (use_history: {use_history})...[/bold yellow]")
|
| 688 |
+
|
| 689 |
+
# Enhance messages based on history setting
|
| 690 |
+
enhanced_messages = await self._enhance_messages(messages, use_history)
|
| 691 |
+
|
| 692 |
+
response = await self.client.chat(enhanced_messages)
|
| 693 |
+
console.log(f"[bold green]✓ Response received ({len(response)} chars)[/bold green]")
|
| 694 |
+
|
| 695 |
+
# Update conversation history ONLY if we're using history
|
| 696 |
+
if use_history:
|
| 697 |
+
self._update_history(messages, response)
|
| 698 |
+
|
| 699 |
+
return response
|
| 700 |
+
|
| 701 |
+
except Exception as e:
|
| 702 |
+
console.log(f"[bold red]✗ ERROR: {e}[/bold red]")
|
| 703 |
+
traceback.print_exc()
|
| 704 |
+
return f"Error: {str(e)}"
|
| 705 |
+
|
| 706 |
+
async def _enhance_messages(self, messages: List[Dict], use_history: bool) -> List[Dict]:
|
| 707 |
+
"""Enhance messages with system prompt and optional history"""
|
| 708 |
+
enhanced = []
|
| 709 |
+
|
| 710 |
+
# Add system prompt if not already in messages
|
| 711 |
+
has_system = any(msg.get('role') == 'system' for msg in messages)
|
| 712 |
+
if not has_system and self.system_prompt:
|
| 713 |
+
enhanced.append({"role": "system", "content": self.system_prompt})
|
| 714 |
+
|
| 715 |
+
# Add conversation history only if requested
|
| 716 |
+
if use_history and self.history:
|
| 717 |
+
enhanced.extend(self.history[-10:]) # Last 10 messages for context
|
| 718 |
+
|
| 719 |
+
# Add current messages
|
| 720 |
+
enhanced.extend(messages)
|
| 721 |
+
|
| 722 |
+
return enhanced
|
| 723 |
+
|
| 724 |
+
def _update_history(self, messages: List[Dict], response: str):
|
| 725 |
+
"""Update conversation history with new exchange"""
|
| 726 |
+
# Add user messages to history
|
| 727 |
+
for msg in messages:
|
| 728 |
+
if msg.get('role') in ['user', 'assistant']:
|
| 729 |
+
self.history.append(msg)
|
| 730 |
+
|
| 731 |
+
# Add assistant response to history
|
| 732 |
+
self.history.append({"role": "assistant", "content": response})
|
| 733 |
+
|
| 734 |
+
# Keep history manageable (last 20 exchanges)
|
| 735 |
+
if len(self.history) > 40: # 20 user + 20 assistant messages
|
| 736 |
+
self.history = self.history[-40:]
|
| 737 |
+
|
| 738 |
+
async def simple_query(self, query: str) -> str:
|
| 739 |
+
"""Simple one-shot query method - NO history/context"""
|
| 740 |
+
messages = [{"role": "user", "content": query}]
|
| 741 |
+
return await self.call_llm(messages, use_history=False)
|
| 742 |
+
|
| 743 |
+
async def multi_turn_chat(self, user_input: str) -> str:
|
| 744 |
+
"""Multi-turn chat that maintains context across calls"""
|
| 745 |
+
messages = [{"role": "user", "content": user_input}]
|
| 746 |
+
response = await self.call_llm(messages, use_history=True)
|
| 747 |
+
return response
|
| 748 |
+
|
| 749 |
+
|
| 750 |
+
def get_conversation_summary(self) -> Dict:
|
| 751 |
+
"""Get conversation summary"""
|
| 752 |
+
return {
|
| 753 |
+
"conversation_id": self.conversation_id,
|
| 754 |
+
"total_messages": len(self.history),
|
| 755 |
+
"user_messages": len([msg for msg in self.history if msg.get('role') == 'user']),
|
| 756 |
+
"assistant_messages": len([msg for msg in self.history if msg.get('role') == 'assistant']),
|
| 757 |
+
"recent_exchanges": self.history[-4:] if self.history else []
|
| 758 |
+
}
|
| 759 |
+
|
| 760 |
+
def clear_history(self):
|
| 761 |
+
"""Clear conversation history"""
|
| 762 |
+
self.history.clear()
|
| 763 |
+
console.log("[bold yellow]Conversation history cleared[/bold yellow]")
|
| 764 |
+
|
| 765 |
+
def update_system_prompt(self, new_prompt: str):
|
| 766 |
+
"""Update the system prompt"""
|
| 767 |
+
self.system_prompt = new_prompt
|
| 768 |
+
console.log(f"[bold blue]System prompt updated[/bold blue]")
|
| 769 |
+
|
| 770 |
+
def stop(self):
|
| 771 |
+
"""Stop the client gracefully"""
|
| 772 |
+
if hasattr(self, 'client') and self.client:
|
| 773 |
+
self.client.stop()
|
| 774 |
+
console.log("[bold yellow]MyAgent client stopped[/bold yellow]")
|
| 775 |
+
async def contextual_query(self, query: str, context_messages: List[Dict] = None,
|
| 776 |
+
context_text: str = None, context_files: List[str] = None) -> str:
|
| 777 |
+
"""
|
| 778 |
+
Query with specific context but doesn't update main history
|
| 779 |
+
|
| 780 |
+
Args:
|
| 781 |
+
query: The user question
|
| 782 |
+
context_messages: List of message dicts for context
|
| 783 |
+
context_text: Plain text context (will be converted to system message)
|
| 784 |
+
context_files: List of file paths to read and include as context
|
| 785 |
+
"""
|
| 786 |
+
messages = []
|
| 787 |
+
|
| 788 |
+
# Add system prompt
|
| 789 |
+
if self.system_prompt:
|
| 790 |
+
messages.append({"role": "system", "content": self.system_prompt})
|
| 791 |
+
|
| 792 |
+
# Handle different context types
|
| 793 |
+
if context_messages:
|
| 794 |
+
messages.extend(context_messages)
|
| 795 |
+
|
| 796 |
+
if context_text:
|
| 797 |
+
messages.append({"role": "system", "content": f"Additional context: {context_text}"})
|
| 798 |
+
|
| 799 |
+
if context_files:
|
| 800 |
+
file_context = await self._read_files_context(context_files)
|
| 801 |
+
if file_context:
|
| 802 |
+
messages.append({"role": "system", "content": f"File contents:\n{file_context}"})
|
| 803 |
+
|
| 804 |
+
# Add the actual query
|
| 805 |
+
messages.append({"role": "user", "content": query})
|
| 806 |
+
|
| 807 |
+
return await self.call_llm(messages, use_history=False)
|
| 808 |
+
|
| 809 |
+
async def _read_files_context(self, file_paths: List[str]) -> str:
|
| 810 |
+
"""Read multiple files and return as context string"""
|
| 811 |
+
contexts = []
|
| 812 |
+
for file_path in file_paths:
|
| 813 |
+
try:
|
| 814 |
+
if os.path.exists(file_path):
|
| 815 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 816 |
+
content = f.read()
|
| 817 |
+
contexts.append(f"--- {os.path.basename(file_path)} ---\n{content}")
|
| 818 |
+
else:
|
| 819 |
+
console.log(f"[bold yellow]File not found: {file_path}[/bold yellow]")
|
| 820 |
+
except Exception as e:
|
| 821 |
+
console.log(f"[bold red]Error reading file {file_path}: {e}[/bold red]")
|
| 822 |
+
|
| 823 |
+
return "\n\n".join(contexts) if contexts else ""
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
async def query_with_code_context(self, query: str, code_snippets: List[str] = None,
|
| 827 |
+
code_files: List[str] = None) -> str:
|
| 828 |
+
"""
|
| 829 |
+
Specialized contextual query for code-related questions
|
| 830 |
+
"""
|
| 831 |
+
code_context = "CODE CONTEXT:\n"
|
| 832 |
+
|
| 833 |
+
if code_snippets:
|
| 834 |
+
for i, snippet in enumerate(code_snippets, 1):
|
| 835 |
+
code_context += f"\nSnippet {i}:\n```\n{snippet}\n```\n"
|
| 836 |
+
|
| 837 |
+
if code_files:
|
| 838 |
+
# Read code files and include them
|
| 839 |
+
for file_path in code_files:
|
| 840 |
+
if file_path.endswith(('.py', '.js', '.java', '.cpp', '.c', '.html', '.css')):
|
| 841 |
+
code_context += f"\nFile: {file_path}\n```\n"
|
| 842 |
+
try:
|
| 843 |
+
with open(file_path, 'r') as f:
|
| 844 |
+
code_context += f.read()
|
| 845 |
+
except Exception as e:
|
| 846 |
+
code_context += f"Error reading file: {e}"
|
| 847 |
+
code_context += "\n```\n"
|
| 848 |
+
|
| 849 |
+
return await self.contextual_query(query, context_text=code_context)
|
| 850 |
+
|
| 851 |
+
async def multi_context_query(self, query: str, contexts: Dict[str, Any]) -> str:
|
| 852 |
+
"""
|
| 853 |
+
Advanced contextual query with multiple context types
|
| 854 |
+
|
| 855 |
+
Args:
|
| 856 |
+
query: The user question
|
| 857 |
+
contexts: Dict with various context types
|
| 858 |
+
- 'messages': List of message dicts
|
| 859 |
+
- 'text': Plain text context
|
| 860 |
+
- 'files': List of file paths
|
| 861 |
+
- 'urls': List of URLs
|
| 862 |
+
- 'code': List of code snippets or files
|
| 863 |
+
- 'metadata': Any additional metadata
|
| 864 |
+
"""
|
| 865 |
+
all_context_messages = []
|
| 866 |
+
|
| 867 |
+
# Build context from different sources
|
| 868 |
+
if contexts.get('text'):
|
| 869 |
+
all_context_messages.append({"role": "system", "content": f"Context: {contexts['text']}"})
|
| 870 |
+
|
| 871 |
+
if contexts.get('messages'):
|
| 872 |
+
all_context_messages.extend(contexts['messages'])
|
| 873 |
+
|
| 874 |
+
if contexts.get('files'):
|
| 875 |
+
file_context = await self._read_files_context(contexts['files'])
|
| 876 |
+
if file_context:
|
| 877 |
+
all_context_messages.append({"role": "system", "content": f"File Contents:\n{file_context}"})
|
| 878 |
+
|
| 879 |
+
if contexts.get('code'):
|
| 880 |
+
code_context = "\n".join([f"Code snippet {i}:\n```\n{code}\n```"
|
| 881 |
+
for i, code in enumerate(contexts['code'], 1)])
|
| 882 |
+
all_context_messages.append({"role": "system", "content": f"Code Context:\n{code_context}"})
|
| 883 |
+
|
| 884 |
+
if contexts.get('metadata'):
|
| 885 |
+
all_context_messages.append({"role": "system", "content": f"Metadata: {contexts['metadata']}"})
|
| 886 |
+
|
| 887 |
+
return await self.contextual_query(query, context_messages=all_context_messages)
|
| 888 |
+
|
| 889 |
+
|
| 890 |
+
console = Console()
|
| 891 |
+
|
| 892 |
+
# --- Canvas Artifact Support ---
|
| 893 |
+
@dataclass
|
| 894 |
+
class CanvasArtifact:
|
| 895 |
+
id: str
|
| 896 |
+
type: str # 'code', 'diagram', 'text', 'image'
|
| 897 |
+
content: str
|
| 898 |
+
title: str
|
| 899 |
+
timestamp: float
|
| 900 |
+
metadata: Dict[str, Any]
|
| 901 |
+
|
| 902 |
+
class EnhancedAIAgent:
|
| 903 |
+
"""
|
| 904 |
+
Wrapper around your AI_Agent that adds canvas/artifact management
|
| 905 |
+
without modifying the original agent.
|
| 906 |
+
"""
|
| 907 |
+
def __init__(self, ai_agent):
|
| 908 |
+
self.agent = ai_agent
|
| 909 |
+
self.canvas_artifacts: Dict[str, List[CanvasArtifact]] = {}
|
| 910 |
+
self.max_canvas_artifacts = 50
|
| 911 |
+
console.log("[bold green]✓ Enhanced AI Agent wrapper initialized[/bold green]")
|
| 912 |
+
|
| 913 |
+
def add_artifact_to_canvas(self, conversation_id: str, content: str,
|
| 914 |
+
artifact_type: str = "code", title: str = None):
|
| 915 |
+
"""Add artifacts to the collaborative canvas"""
|
| 916 |
+
if conversation_id not in self.canvas_artifacts:
|
| 917 |
+
self.canvas_artifacts[conversation_id] = []
|
| 918 |
+
|
| 919 |
+
artifact = CanvasArtifact(
|
| 920 |
+
id=str(uuid.uuid4())[:8],
|
| 921 |
+
type=artifact_type,
|
| 922 |
+
content=content,
|
| 923 |
+
title=title or f"{artifact_type}_{len(self.canvas_artifacts[conversation_id]) + 1}",
|
| 924 |
+
timestamp=time.time(),
|
| 925 |
+
metadata={"conversation_id": conversation_id}
|
| 926 |
+
)
|
| 927 |
+
|
| 928 |
+
self.canvas_artifacts[conversation_id].append(artifact)
|
| 929 |
+
|
| 930 |
+
# Keep only recent artifacts
|
| 931 |
+
if len(self.canvas_artifacts[conversation_id]) > self.max_canvas_artifacts:
|
| 932 |
+
self.canvas_artifacts[conversation_id] = self.canvas_artifacts[conversation_id][-self.max_canvas_artifacts:]
|
| 933 |
+
|
| 934 |
+
console.log(f"[green]Added artifact to canvas: {artifact.title}[/green]")
|
| 935 |
+
return artifact
|
| 936 |
+
|
| 937 |
+
def get_canvas_context(self, conversation_id: str) -> str:
|
| 938 |
+
"""Get formatted canvas context for LLM prompts"""
|
| 939 |
+
if conversation_id not in self.canvas_artifacts or not self.canvas_artifacts[conversation_id]:
|
| 940 |
+
return ""
|
| 941 |
+
|
| 942 |
+
context_lines = ["\n=== COLLABORATIVE CANVAS ARTIFACTS ==="]
|
| 943 |
+
for artifact in self.canvas_artifacts[conversation_id][-10:]: # Last 10 artifacts
|
| 944 |
+
context_lines.append(f"\n--- {artifact.title} [{artifact.type.upper()}] ---")
|
| 945 |
+
preview = artifact.content[:500] + "..." if len(artifact.content) > 500 else artifact.content
|
| 946 |
+
context_lines.append(preview)
|
| 947 |
+
|
| 948 |
+
return "\n".join(context_lines) + "\n=================================\n"
|
| 949 |
+
|
| 950 |
+
async def chat_with_canvas(self, message: str, conversation_id: str = "default",
|
| 951 |
+
include_canvas: bool = True) -> str:
|
| 952 |
+
"""Enhanced chat that includes canvas context"""
|
| 953 |
+
# Build context with canvas artifacts if requested
|
| 954 |
+
full_message = message
|
| 955 |
+
if include_canvas:
|
| 956 |
+
canvas_context = self.get_canvas_context(conversation_id)
|
| 957 |
+
if canvas_context:
|
| 958 |
+
full_message = f"{canvas_context}\n\nUser Query: {message}"
|
| 959 |
+
|
| 960 |
+
try:
|
| 961 |
+
# Use your original agent's multi_turn_chat method
|
| 962 |
+
response = await self.agent.multi_turn_chat(full_message)
|
| 963 |
+
|
| 964 |
+
# Auto-extract and add code artifacts to canvas
|
| 965 |
+
self._extract_artifacts_to_canvas(response, conversation_id)
|
| 966 |
+
|
| 967 |
+
return response
|
| 968 |
+
|
| 969 |
+
except Exception as e:
|
| 970 |
+
error_msg = f"Error in chat_with_canvas: {str(e)}"
|
| 971 |
+
console.log(f"[red]{error_msg}[/red]")
|
| 972 |
+
return error_msg
|
| 973 |
+
|
| 974 |
+
def _extract_artifacts_to_canvas(self, response: str, conversation_id: str):
|
| 975 |
+
"""Automatically extract code blocks and add to canvas"""
|
| 976 |
+
# Find all code blocks with optional language specification
|
| 977 |
+
code_blocks = re.findall(r'```(?:(\w+)\n)?(.*?)```', response, re.DOTALL)
|
| 978 |
+
for i, (lang, code_block) in enumerate(code_blocks):
|
| 979 |
+
if len(code_block.strip()) > 10: # Only add substantial code blocks
|
| 980 |
+
self.add_artifact_to_canvas(
|
| 981 |
+
conversation_id,
|
| 982 |
+
code_block.strip(),
|
| 983 |
+
"code",
|
| 984 |
+
f"code_snippet_{lang or 'unknown'}_{len(self.canvas_artifacts.get(conversation_id, [])) + 1}"
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
def get_canvas_summary(self, conversation_id: str) -> List[Dict]:
|
| 988 |
+
"""Get summary of canvas artifacts for display"""
|
| 989 |
+
if conversation_id not in self.canvas_artifacts:
|
| 990 |
+
return []
|
| 991 |
+
|
| 992 |
+
artifacts = []
|
| 993 |
+
for artifact in reversed(self.canvas_artifacts[conversation_id]): # Newest first
|
| 994 |
+
artifacts.append({
|
| 995 |
+
"id": artifact.id,
|
| 996 |
+
"type": artifact.type.upper(),
|
| 997 |
+
"title": artifact.title,
|
| 998 |
+
"preview": artifact.content[:100] + "..." if len(artifact.content) > 100 else artifact.content,
|
| 999 |
+
"timestamp": time.strftime("%H:%M:%S", time.localtime(artifact.timestamp))
|
| 1000 |
+
})
|
| 1001 |
+
|
| 1002 |
+
return artifacts
|
| 1003 |
+
|
| 1004 |
+
def get_artifact_by_id(self, conversation_id: str, artifact_id: str) -> Optional[CanvasArtifact]:
|
| 1005 |
+
"""Get specific artifact by ID"""
|
| 1006 |
+
if conversation_id not in self.canvas_artifacts:
|
| 1007 |
+
return None
|
| 1008 |
+
|
| 1009 |
+
for artifact in self.canvas_artifacts[conversation_id]:
|
| 1010 |
+
if artifact.id == artifact_id:
|
| 1011 |
+
return artifact
|
| 1012 |
+
return None
|
| 1013 |
+
|
| 1014 |
+
def clear_canvas(self, conversation_id: str = "default"):
|
| 1015 |
+
"""Clear canvas artifacts"""
|
| 1016 |
+
if conversation_id in self.canvas_artifacts:
|
| 1017 |
+
self.canvas_artifacts[conversation_id] = []
|
| 1018 |
+
console.log(f"[yellow]Cleared canvas: {conversation_id}[/yellow]")
|
| 1019 |
+
|
| 1020 |
+
def get_latest_code_artifact(self, conversation_id: str) -> Optional[str]:
|
| 1021 |
+
"""Get the most recent code artifact content"""
|
| 1022 |
+
if conversation_id not in self.canvas_artifacts:
|
| 1023 |
+
return None
|
| 1024 |
+
|
| 1025 |
+
for artifact in reversed(self.canvas_artifacts[conversation_id]):
|
| 1026 |
+
if artifact.type == "code":
|
| 1027 |
+
return artifact.content
|
| 1028 |
+
return None
|
| 1029 |
+
|
| 1030 |
|
| 1031 |
console = Console()
|
| 1032 |
|