| from .base import MetaItem, BasePipe, Segment | |
| from llama_cpp import Llama | |
| from ..helpers.translator import QwenTranslator | |
| from config import LLM_MODEL_PATH, LLM_SYS_PROMPT_EN, LLM_SYS_PROMPT_ZH, LLM_LARGE_MODEL_PATH, ALL_MARKERS | |
| class TranslatePipe(BasePipe): | |
| translator = None | |
| def init(cls): | |
| if cls.translator is None: | |
| cls.translator = QwenTranslator(LLM_MODEL_PATH, LLM_SYS_PROMPT_EN, LLM_SYS_PROMPT_ZH) | |
| def process(self, in_data: MetaItem) -> MetaItem: | |
| context = in_data.transcribe_content | |
| if not context.strip(): | |
| result = "" | |
| elif all([ch in ALL_MARKERS for ch in context.strip()]): | |
| result = "" | |
| else: | |
| result = self.translator.translate( | |
| context, src_lang=in_data.source_language, dst_lang=in_data.destination_language) | |
| in_data.translate_content = result | |
| return in_data | |
| class Translate7BPipe(TranslatePipe): | |
| translator = None | |
| def init(cls): | |
| if cls.translator is None: | |
| cls.translator = QwenTranslator(LLM_LARGE_MODEL_PATH, LLM_SYS_PROMPT_EN, LLM_SYS_PROMPT_ZH) | |