Carlex22222 commited on
Commit
530ace3
·
verified ·
1 Parent(s): 736db8d

Update aduc_framework/engineers/composer.py

Browse files
aduc_framework/engineers/composer.py CHANGED
@@ -2,7 +2,7 @@
2
  #
3
  # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
  #
5
- # Versão 2.4.0 (JSON Serializable Schemas)
6
 
7
  import logging
8
  import json
@@ -38,8 +38,6 @@ def robust_json_parser(raw_text: str) -> dict:
38
  class Composer:
39
  def __init__(self, model_map_name: str = "llama_3_2_vision"):
40
  self.task_templates = self._load_task_templates()
41
-
42
- # CORREÇÃO: Substituído '...' por exemplos válidos e serializáveis em JSON.
43
  self.json_schemas = {
44
  "PREPROD_01_CATALOG_ASSETS": {
45
  "scenarios": [{"id": "string (ex: scenario_01)", "description": "string", "tags": ["exemplo_tag"], "best_photo_id": 0}],
@@ -92,7 +90,12 @@ class Composer:
92
  return templates
93
 
94
  def _talk_to_llama(self, generic_prompt: str, images: Optional[List[Image.Image]] = None, expected_format="text") -> Any:
95
- final_model_prompt = prompt_engine_singleton.translate(generic_prompt)
 
 
 
 
 
96
  logger.info(f"COMPOSER: Enviando tarefa para o Llama (Esperando {expected_format}).")
97
  response_raw = llama_multimodal_manager_singleton.process_turn(prompt_text=final_model_prompt, image_list=images)
98
  if expected_format == "json":
@@ -110,7 +113,6 @@ class Composer:
110
  callback: Optional[Callable] = None
111
  ) -> Generator[Dict[str, Any], None, Dict[str, Any]]:
112
 
113
-
114
  dna = {
115
  "global_prompt": initial_data["global_prompt"],
116
  "initial_media_paths": initial_data["user_media_paths"],
 
2
  #
3
  # Copyright (C) August 4, 2025 Carlos Rodrigues dos Santos
4
  #
5
+ # Versão 2.4.2 (Corrected Translator Call)
6
 
7
  import logging
8
  import json
 
38
  class Composer:
39
  def __init__(self, model_map_name: str = "llama_3_2_vision"):
40
  self.task_templates = self._load_task_templates()
 
 
41
  self.json_schemas = {
42
  "PREPROD_01_CATALOG_ASSETS": {
43
  "scenarios": [{"id": "string (ex: scenario_01)", "description": "string", "tags": ["exemplo_tag"], "best_photo_id": 0}],
 
90
  return templates
91
 
92
  def _talk_to_llama(self, generic_prompt: str, images: Optional[List[Image.Image]] = None, expected_format="text") -> Any:
93
+ # CORREÇÃO: Passa a flag 'has_image' para o tradutor.
94
+ final_model_prompt = prompt_engine_singleton.translate(
95
+ generic_prompt_content=generic_prompt,
96
+ has_image=bool(images)
97
+ )
98
+
99
  logger.info(f"COMPOSER: Enviando tarefa para o Llama (Esperando {expected_format}).")
100
  response_raw = llama_multimodal_manager_singleton.process_turn(prompt_text=final_model_prompt, image_list=images)
101
  if expected_format == "json":
 
113
  callback: Optional[Callable] = None
114
  ) -> Generator[Dict[str, Any], None, Dict[str, Any]]:
115
 
 
116
  dna = {
117
  "global_prompt": initial_data["global_prompt"],
118
  "initial_media_paths": initial_data["user_media_paths"],