daihui.zhang commited on
Commit
4d568e6
·
1 Parent(s): 9ce0bfb

update add fix

Browse files
.gitignore CHANGED
@@ -68,7 +68,7 @@ instance/
68
 
69
  # Sphinx documentation
70
  docs/_build/
71
-
72
  # PyBuilder
73
  .pybuilder/
74
  target/
 
68
 
69
  # Sphinx documentation
70
  docs/_build/
71
+ test/
72
  # PyBuilder
73
  .pybuilder/
74
  target/
main.py ADDED
File without changes
moyoyo_asr_models/Qwen2.5-3B.Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35a1eec88348fd963b06bb2e3ba8d4d27d4f7a79d9ed4bab645f931d7b2b7ac7
3
+ size 2169666016
moyoyo_asr_models/qwen2.5-7b-instruct-q5_0-00001-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:715a0c76b1b074ed65fe0fabf88b1df6f5f638318f15ba9f62a04d493e236d5f
3
+ size 4001112160
moyoyo_asr_models/qwen2.5-7b-instruct-q5_0-00002-of-00002.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a254c888ebffcddf98b0c277433a339d9a8cbf970f246b0b6e6a9195e5d6cfb
3
+ size 1314064416
pyproject.toml CHANGED
@@ -6,12 +6,15 @@ readme = "README.md"
6
  requires-python = ">=3.11"
7
  dependencies = [
8
  "av>=14.2.0",
 
9
  "librosa>=0.11.0",
10
  "numpy>=2.1.3",
11
  "onnxruntime>=1.21.0",
12
  "pyaudio>=0.2.14",
13
  "pydantic>=2.11.2",
 
14
  "setuptools>=78.1.0",
 
15
  "soundfile>=0.13.1",
16
  "torch>=2.6.0",
17
  "tqdm>=4.67.1",
 
6
  requires-python = ">=3.11"
7
  dependencies = [
8
  "av>=14.2.0",
9
+ "fastapi>=0.115.12",
10
  "librosa>=0.11.0",
11
  "numpy>=2.1.3",
12
  "onnxruntime>=1.21.0",
13
  "pyaudio>=0.2.14",
14
  "pydantic>=2.11.2",
15
+ "pydub>=0.25.1",
16
  "setuptools>=78.1.0",
17
+ "silero-vad>=5.1.2",
18
  "soundfile>=0.13.1",
19
  "torch>=2.6.0",
20
  "tqdm>=4.67.1",
requirements.txt CHANGED
@@ -2,6 +2,8 @@
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  annotated-types==0.7.0
4
  # via pydantic
 
 
5
  audioread==3.0.1
6
  # via librosa
7
  av==14.3.0
@@ -16,8 +18,8 @@ coloredlogs==15.0.1
16
  # via onnxruntime
17
  decorator==5.2.1
18
  # via librosa
19
- diskcache==5.6.3
20
- # via llama-cpp-python
21
  filelock==3.18.0
22
  # via torch
23
  flatbuffers==25.2.10
@@ -27,11 +29,11 @@ fsspec==2025.3.2
27
  humanfriendly==10.0
28
  # via coloredlogs
29
  idna==3.10
30
- # via requests
31
- jinja2==3.1.6
32
  # via
33
- # llama-cpp-python
34
- # torch
 
 
35
  joblib==1.4.2
36
  # via
37
  # librosa
@@ -40,8 +42,6 @@ lazy-loader==0.4
40
  # via librosa
41
  librosa==0.11.0
42
  # via trans (pyproject.toml)
43
- llama-cpp-python==0.3.8
44
- # via trans (pyproject.toml)
45
  llvmlite==0.44.0
46
  # via numba
47
  markupsafe==3.0.2
@@ -58,25 +58,23 @@ numpy==2.1.3
58
  # via
59
  # trans (pyproject.toml)
60
  # librosa
61
- # llama-cpp-python
62
  # numba
63
  # onnxruntime
64
- # pywhispercpp
65
  # scikit-learn
66
  # scipy
67
  # soundfile
68
  # soxr
69
  onnxruntime==1.21.0
70
- # via trans (pyproject.toml)
 
 
71
  packaging==24.2
72
  # via
73
  # lazy-loader
74
  # onnxruntime
75
  # pooch
76
  platformdirs==4.3.7
77
- # via
78
- # pooch
79
- # pywhispercpp
80
  pooch==1.8.2
81
  # via librosa
82
  protobuf==6.30.2
@@ -86,15 +84,15 @@ pyaudio==0.2.14
86
  pycparser==2.22
87
  # via cffi
88
  pydantic==2.11.2
89
- # via trans (pyproject.toml)
 
 
90
  pydantic-core==2.33.1
91
  # via pydantic
92
- pywhispercpp==1.3.0
93
  # via trans (pyproject.toml)
94
  requests==2.32.3
95
- # via
96
- # pooch
97
- # pywhispercpp
98
  scikit-learn==1.6.1
99
  # via librosa
100
  scipy==1.15.2
@@ -103,12 +101,18 @@ scipy==1.15.2
103
  # scikit-learn
104
  setuptools==78.1.0
105
  # via trans (pyproject.toml)
 
 
 
 
106
  soundfile==0.13.1
107
  # via
108
  # trans (pyproject.toml)
109
  # librosa
110
  soxr==0.5.0.post1
111
  # via librosa
 
 
112
  sympy==1.13.1
113
  # via
114
  # onnxruntime
@@ -116,15 +120,19 @@ sympy==1.13.1
116
  threadpoolctl==3.6.0
117
  # via scikit-learn
118
  torch==2.6.0
119
- # via trans (pyproject.toml)
120
- tqdm==4.67.1
121
  # via
122
  # trans (pyproject.toml)
123
- # pywhispercpp
 
 
 
 
 
124
  typing-extensions==4.13.1
125
  # via
 
 
126
  # librosa
127
- # llama-cpp-python
128
  # pydantic
129
  # pydantic-core
130
  # torch
 
2
  # uv pip compile pyproject.toml -o requirements.txt
3
  annotated-types==0.7.0
4
  # via pydantic
5
+ anyio==4.9.0
6
+ # via starlette
7
  audioread==3.0.1
8
  # via librosa
9
  av==14.3.0
 
18
  # via onnxruntime
19
  decorator==5.2.1
20
  # via librosa
21
+ fastapi==0.115.12
22
+ # via trans (pyproject.toml)
23
  filelock==3.18.0
24
  # via torch
25
  flatbuffers==25.2.10
 
29
  humanfriendly==10.0
30
  # via coloredlogs
31
  idna==3.10
 
 
32
  # via
33
+ # anyio
34
+ # requests
35
+ jinja2==3.1.6
36
+ # via torch
37
  joblib==1.4.2
38
  # via
39
  # librosa
 
42
  # via librosa
43
  librosa==0.11.0
44
  # via trans (pyproject.toml)
 
 
45
  llvmlite==0.44.0
46
  # via numba
47
  markupsafe==3.0.2
 
58
  # via
59
  # trans (pyproject.toml)
60
  # librosa
 
61
  # numba
62
  # onnxruntime
 
63
  # scikit-learn
64
  # scipy
65
  # soundfile
66
  # soxr
67
  onnxruntime==1.21.0
68
+ # via
69
+ # trans (pyproject.toml)
70
+ # silero-vad
71
  packaging==24.2
72
  # via
73
  # lazy-loader
74
  # onnxruntime
75
  # pooch
76
  platformdirs==4.3.7
77
+ # via pooch
 
 
78
  pooch==1.8.2
79
  # via librosa
80
  protobuf==6.30.2
 
84
  pycparser==2.22
85
  # via cffi
86
  pydantic==2.11.2
87
+ # via
88
+ # trans (pyproject.toml)
89
+ # fastapi
90
  pydantic-core==2.33.1
91
  # via pydantic
92
+ pydub==0.25.1
93
  # via trans (pyproject.toml)
94
  requests==2.32.3
95
+ # via pooch
 
 
96
  scikit-learn==1.6.1
97
  # via librosa
98
  scipy==1.15.2
 
101
  # scikit-learn
102
  setuptools==78.1.0
103
  # via trans (pyproject.toml)
104
+ silero-vad==5.1.2
105
+ # via trans (pyproject.toml)
106
+ sniffio==1.3.1
107
+ # via anyio
108
  soundfile==0.13.1
109
  # via
110
  # trans (pyproject.toml)
111
  # librosa
112
  soxr==0.5.0.post1
113
  # via librosa
114
+ starlette==0.46.1
115
+ # via fastapi
116
  sympy==1.13.1
117
  # via
118
  # onnxruntime
 
120
  threadpoolctl==3.6.0
121
  # via scikit-learn
122
  torch==2.6.0
 
 
123
  # via
124
  # trans (pyproject.toml)
125
+ # silero-vad
126
+ # torchaudio
127
+ torchaudio==2.6.0
128
+ # via silero-vad
129
+ tqdm==4.67.1
130
+ # via trans (pyproject.toml)
131
  typing-extensions==4.13.1
132
  # via
133
+ # anyio
134
+ # fastapi
135
  # librosa
 
136
  # pydantic
137
  # pydantic-core
138
  # torch
transcribe/helpers/__init__.py ADDED
File without changes
transcribe/{translator.py → helpers/translator.py} RENAMED
File without changes
transcribe/helpers/vadprocessor.py ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+ import time
3
+ import numpy as np
4
+ import onnxruntime
5
+ from datetime import timedelta
6
+ from pydub import AudioSegment
7
+ from silero_vad import load_silero_vad, get_speech_timestamps
8
+ import os
9
+ import logging
10
+
11
+
12
+ class SileroVADProcessor:
13
+ """
14
+ A class for processing audio files using Silero VAD to detect voice activity
15
+ and extract voice segments from audio files.
16
+ """
17
+
18
+ def __init__(self,
19
+ activate_threshold=0.5,
20
+ fusion_threshold=0.3,
21
+ min_speech_duration=0.25,
22
+ max_speech_duration=20,
23
+ min_silence_duration=250,
24
+ sample_rate=16000,
25
+ ort_providers=None):
26
+ """
27
+ Initialize the SileroVADProcessor.
28
+
29
+ Args:
30
+ activate_threshold (float): Threshold for voice activity detection
31
+ fusion_threshold (float): Threshold for merging close speech segments (seconds)
32
+ min_speech_duration (float): Minimum duration of speech to be considered valid (seconds)
33
+ max_speech_duration (float): Maximum duration of speech (seconds)
34
+ min_silence_duration (int): Minimum silence duration (ms)
35
+ sample_rate (int): Sample rate of the audio (8000 or 16000 Hz)
36
+ ort_providers (list): ONNX Runtime providers for acceleration
37
+ """
38
+ # VAD parameters
39
+ self.activate_threshold = activate_threshold
40
+ self.fusion_threshold = fusion_threshold
41
+ self.min_speech_duration = min_speech_duration
42
+ self.max_speech_duration = max_speech_duration
43
+ self.min_silence_duration = min_silence_duration
44
+ self.sample_rate = sample_rate
45
+ self.ort_providers = ort_providers if ort_providers else []
46
+
47
+ # Initialize logger
48
+ self.logger = logging.getLogger(__name__)
49
+
50
+ # Load Silero VAD model
51
+ self._init_onnx_session()
52
+ self.silero_vad = load_silero_vad(onnx=True)
53
+
54
+ def _init_onnx_session(self):
55
+ """Initialize ONNX Runtime session with appropriate settings."""
56
+ session_opts = onnxruntime.SessionOptions()
57
+ session_opts.log_severity_level = 3
58
+ session_opts.inter_op_num_threads = 0
59
+ session_opts.intra_op_num_threads = 0
60
+ session_opts.enable_cpu_mem_arena = True
61
+ session_opts.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
62
+ session_opts.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
63
+
64
+ session_opts.add_session_config_entry("session.intra_op.allow_spinning", "1")
65
+ session_opts.add_session_config_entry("session.inter_op.allow_spinning", "1")
66
+ session_opts.add_session_config_entry("session.set_denormal_as_zero", "1")
67
+
68
+ # Set the session_opts to be used by silero_vad
69
+ # onnxruntime.capi._pybind_state.get_default_session_options(session_opts)
70
+
71
+ def load_audio(self, audio_path):
72
+ """
73
+ Load audio file and prepare it for VAD processing.
74
+
75
+ Args:
76
+ audio_path (str): Path to the audio file
77
+
78
+ Returns:
79
+ numpy.ndarray: Audio data as numpy array
80
+ """
81
+ self.logger.info(f"Loading audio from {audio_path}")
82
+ audio_segment = AudioSegment.from_file(audio_path)
83
+ audio_segment = audio_segment.set_channels(1).set_frame_rate(self.sample_rate)
84
+
85
+ # Convert to numpy array and normalize
86
+ dtype = np.float16 if self.use_gpu_fp16 else np.float32
87
+ audio_array = np.array(audio_segment.get_array_of_samples(), dtype=dtype) * 0.000030517578 # 1/32768
88
+
89
+ self.audio_segment = audio_segment # Store for later use
90
+ return audio_array
91
+
92
+ @property
93
+ def model(self):
94
+ return self.silero_vad
95
+
96
+ def process_timestamps(self, timestamps):
97
+ """
98
+ Process VAD timestamps: filter short segments and merge close segments.
99
+
100
+ Args:
101
+ timestamps (list): List of (start, end) tuples
102
+
103
+ Returns:
104
+ list: Processed list of (start, end) tuples
105
+ """
106
+ # Filter out short durations
107
+ filtered_timestamps = [(start, end) for start, end in timestamps
108
+ if (end - start) >= self.min_speech_duration]
109
+
110
+ # Fuse timestamps in two passes for better merging
111
+ fused_timestamps_1st = []
112
+ for start, end in filtered_timestamps:
113
+ if fused_timestamps_1st and (start - fused_timestamps_1st[-1][1] <= self.fusion_threshold):
114
+ fused_timestamps_1st[-1] = (fused_timestamps_1st[-1][0], end)
115
+ else:
116
+ fused_timestamps_1st.append((start, end))
117
+
118
+ fused_timestamps_2nd = []
119
+ for start, end in fused_timestamps_1st:
120
+ if fused_timestamps_2nd and (start - fused_timestamps_2nd[-1][1] <= self.fusion_threshold):
121
+ fused_timestamps_2nd[-1] = (fused_timestamps_2nd[-1][0], end)
122
+ else:
123
+ fused_timestamps_2nd.append((start, end))
124
+
125
+ return fused_timestamps_2nd
126
+
127
+ def format_time(self, seconds):
128
+ """
129
+ Convert seconds to VTT time format 'hh:mm:ss.mmm'.
130
+
131
+ Args:
132
+ seconds (float): Time in seconds
133
+
134
+ Returns:
135
+ str: Formatted time string
136
+ """
137
+ td = timedelta(seconds=seconds)
138
+ td_sec = td.total_seconds()
139
+ total_seconds = int(td_sec)
140
+ milliseconds = int((td_sec - total_seconds) * 1000)
141
+ hours = total_seconds // 3600
142
+ minutes = (total_seconds % 3600) // 60
143
+ seconds = total_seconds % 60
144
+ return f"{hours:02}:{minutes:02}:{seconds:02}.{milliseconds:03}"
145
+
146
+ def detect_speech(self, audio:np.array):
147
+ """
148
+ Run VAD on the audio file to detect speech segments.
149
+
150
+ Args:
151
+ audio_path (str): Path to the audio file
152
+
153
+ Returns:
154
+ list: List of processed timestamps as (start, end) tuples
155
+ """
156
+ self.logger.info("Starting VAD process")
157
+ start_time = time.time()
158
+ # Get speech timestamps
159
+ raw_timestamps = get_speech_timestamps(
160
+ audio,
161
+ model=self.silero_vad,
162
+ threshold=self.activate_threshold,
163
+ max_speech_duration_s=self.max_speech_duration,
164
+ min_speech_duration_ms=int(self.min_speech_duration * 1000),
165
+ min_silence_duration_ms=self.min_silence_duration,
166
+ return_seconds=True
167
+ )
168
+
169
+ # Convert to simple format and process
170
+ timestamps = [(item['start'], item['end']) for item in raw_timestamps]
171
+ processed_timestamps = self.process_timestamps(timestamps)
172
+
173
+ # Clean up
174
+ del audio
175
+ gc.collect()
176
+
177
+ self.logger.info(f"VAD completed in {time.time() - start_time:.3f} seconds")
178
+ return processed_timestamps
179
+
180
+ """
181
+ Save timestamps in both second and sample indices formats.
182
+
183
+ Args:
184
+ timestamps (list): List of (start, end) tuples
185
+ output_prefix (str): Prefix for output files
186
+ """
187
+ # Save timestamps in seconds (VTT format)
188
+ seconds_path = f"{output_prefix}_timestamps_second.txt"
189
+ with open(seconds_path, "w", encoding='UTF-8') as file:
190
+ self.logger.info("Saving timestamps in seconds format")
191
+ for start, end in timestamps:
192
+ s_time = self.format_time(start)
193
+ e_time = self.format_time(end)
194
+ line = f"{s_time} --> {e_time}\n"
195
+ file.write(line)
196
+
197
+ # Save timestamps in sample indices
198
+ indices_path = f"{output_prefix}_timestamps_indices.txt"
199
+ with open(indices_path, "w", encoding='UTF-8') as file:
200
+ self.logger.info("Saving timestamps in indices format")
201
+ for start, end in timestamps:
202
+ line = f"{int(start * self.sample_rate)} --> {int(end * self.sample_rate)}\n"
203
+ file.write(line)
204
+
205
+ self.logger.info(f"Timestamps saved to {seconds_path} and {indices_path}")
206
+
207
+ def extract_speech_segments(self, audio_segment, timestamps):
208
+ """
209
+ Extract speech segments from the audio and combine them into a single audio file.
210
+
211
+ Args:
212
+ timestamps (list): List of (start, end) tuples indicating speech segments
213
+
214
+ Returns:
215
+ AudioSegment: The combined speech segments
216
+ """
217
+ audio_segment = audio_segment.numpy()
218
+ combined_speech = np.array([], dtype=np.float32)
219
+
220
+ # Extract and combine each speech segment
221
+ for i, (start, end) in enumerate(timestamps):
222
+ # Convert seconds to milliseconds for pydub
223
+ start_ms = int(start * 1000)
224
+ end_ms = int(end * 1000)
225
+
226
+ # Ensure the end time does not exceed the length of the audio segment
227
+ if end_ms > len(audio_segment):
228
+ end_ms = len(audio_segment)
229
+
230
+ # Extract the segment
231
+ segment = audio_segment[start_ms:end_ms]
232
+
233
+ # Add to combined audio
234
+ combined_speech = np.append(combined_speech, segment)
235
+
236
+ return combined_speech
237
+
238
+ def process_audio(self, audio_array:np.array):
239
+ """
240
+ Complete processing pipeline: detect speech, save timestamps, and optionally extract speech.
241
+
242
+ Returns:
243
+ tuple: (timestamps, output_speech_path if extract_speech else None)
244
+ """
245
+
246
+ # Run VAD to detect speech
247
+ timestamps = self.detect_speech(audio_array)
248
+
249
+ combined_speech = self.extract_speech_segments(audio_array, timestamps)
250
+
251
+ return timestamps, combined_speech
transcribe/{whisper.py → helpers/whisper.py} RENAMED
File without changes
transcribe/pipelines/__init__.py CHANGED
@@ -1,4 +1,5 @@
1
 
2
  from .pipe_translate import TranslatePipe
3
  from .pipe_whisper import WhisperPipe
 
4
  from .base import MetaItem
 
1
 
2
  from .pipe_translate import TranslatePipe
3
  from .pipe_whisper import WhisperPipe
4
+ from .pipe_vad import VadPipe
5
  from .base import MetaItem
transcribe/pipelines/base.py CHANGED
@@ -12,6 +12,7 @@ class Segment:
12
  @dataclass
13
  class MetaItem:
14
  segments: list[Segment] = field(default_factory=list)
 
15
  audio: bytes = b''
16
  transcribe_content: str = ''
17
  translate_content: str = ''
 
12
  @dataclass
13
  class MetaItem:
14
  segments: list[Segment] = field(default_factory=list)
15
+ source_audio: bytes = b""
16
  audio: bytes = b''
17
  transcribe_content: str = ''
18
  translate_content: str = ''
transcribe/pipelines/pipe_translate.py CHANGED
@@ -1,7 +1,7 @@
1
 
2
  from .base import MetaItem, BasePipe, Segment
3
  from llama_cpp import Llama
4
- from ..translator import QwenTranslator
5
  from config import LLM_MODEL_PATH, LLM_SYS_PROMPT
6
 
7
 
 
1
 
2
  from .base import MetaItem, BasePipe, Segment
3
  from llama_cpp import Llama
4
+ from ..helpers.translator import QwenTranslator
5
  from config import LLM_MODEL_PATH, LLM_SYS_PROMPT
6
 
7
 
transcribe/pipelines/pipe_vad.py CHANGED
@@ -1,3 +1,70 @@
1
 
2
-
3
  from .base import MetaItem, BasePipe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
 
 
2
  from .base import MetaItem, BasePipe
3
+ from ..helpers.vadprocessor import SileroVADProcessor
4
+ import numpy as np
5
+ from silero_vad import read_audio, get_speech_timestamps,collect_chunks, VADIterator
6
+ import torch
7
+
8
+ class VadPipe(BasePipe):
9
+ model = None
10
+ sample_rate=16000
11
+ window_size_samples = 512
12
+
13
+ @classmethod
14
+ def init(cls):
15
+ if cls.model is None:
16
+ cls.model = SileroVADProcessor(
17
+ activate_threshold=0.5,
18
+ fusion_threshold=0.3,
19
+ min_speech_duration=0.25,
20
+ max_speech_duration=20,
21
+ min_silence_duration=250,
22
+ sample_rate=cls.sample_rate
23
+ )
24
+
25
+ @property
26
+ def vad_iterator(self):
27
+ return VADIterator(self.model.silero_vad, sampling_rate=self.sample_rate,)
28
+
29
+ def process(self, in_data: MetaItem) -> MetaItem:
30
+ source_audio = in_data.source_audio
31
+ source_audio = np.frombuffer(source_audio, dtype=np.float32)
32
+ speech_segments = []
33
+ is_speech_active = False
34
+ # current_segment_end = len(source_audio)
35
+
36
+ for i in range(0, len(source_audio), self.window_size_samples):
37
+
38
+ window = source_audio[i:i+self.window_size_samples]
39
+
40
+ if len(window) < self.window_size_samples:
41
+ padded_window = np.zeros(self.window_size_samples, dtype=np.float32)
42
+ padded_window[:len(window)] = window
43
+ window = padded_window
44
+
45
+ speech_dict = self.vad_iterator(window, return_seconds=False)
46
+ if not speech_dict:
47
+ continue
48
+
49
+ # 计算当前偏移量
50
+
51
+ if speech_dict and 'start' in speech_dict and not is_speech_active:
52
+ is_speech_active = True
53
+ # current_segment_start = speech_dict['start'] + i
54
+
55
+ if is_speech_active:
56
+ speech_segments.append(window)
57
+ # # 如果检测到语音结束
58
+ # if speech_dict and 'end' in speech_dict and is_speech_active:
59
+ # # 调整语音结束时间,加上窗口偏移
60
+ # current_segment_end = min(speech_dict['end'] + i, current_segment_end)
61
+ # is_speech_active = False
62
+ # speech_audio = source_audio[current_segment_start: current_segment_end]
63
+ # speech_segments.append(speech_audio)
64
+
65
+ self.vad_iterator.reset_states()
66
+ combied_audio = np.concatenate(speech_segments, axis=0).tobytes() if len(speech_segments) else b""
67
+ in_data.audio = combied_audio
68
+ in_data.source_audio = b""
69
+
70
+ return in_data
transcribe/pipelines/pipe_whisper.py CHANGED
@@ -1,7 +1,7 @@
1
 
2
 
3
  from .base import MetaItem, BasePipe, Segment
4
- from ..whisper import WhisperCPP
5
 
6
 
7
  class WhisperPipe(BasePipe):
 
1
 
2
 
3
  from .base import MetaItem, BasePipe, Segment
4
+ from ..helpers.whisper import WhisperCPP
5
 
6
 
7
  class WhisperPipe(BasePipe):
transcribe/translatepipes.py CHANGED
@@ -1,4 +1,4 @@
1
- from transcribe.pipelines import WhisperPipe, TranslatePipe, MetaItem
2
  import multiprocessing as mp
3
  import config
4
 
@@ -10,21 +10,23 @@ class TranslatePipes:
10
  # self.result_queue = mp.Queue()
11
 
12
  # whisper 转录
13
- self._whisper_pipe = WhisperPipe()
14
 
15
  # llm 翻译
16
- self._translate_pipe = TranslatePipe()
17
 
18
-
19
- self._whisper_pipe.daemon = True
20
- self._whisper_pipe.start()
21
-
22
- self._translate_pipe.daemon = True
23
- self._translate_pipe.start()
 
24
 
25
  def wait_ready(self):
26
  self._whisper_pipe.wait()
27
  self._translate_pipe.wait()
 
28
 
29
  def translate(self, text, src_lang, dst_lang) -> MetaItem:
30
  item = MetaItem(
@@ -40,13 +42,20 @@ class TranslatePipes:
40
  self._whisper_pipe.input_queue.put(item)
41
  return self._whisper_pipe.output_queue.get()
42
 
 
 
 
 
 
 
43
 
44
  if __name__ == "__main__":
45
  import soundfile
46
  tp = TranslatePipes()
47
  # result = tp.translate("你好,今天天气怎么样?", src_lang="zh", dst_lang="en")
48
  mel, _, = soundfile.read("assets/jfk.flac")
49
- result = tp.transcrible(mel, 'en')
 
50
  print(result)
51
 
52
 
 
1
+ from transcribe.pipelines import WhisperPipe, TranslatePipe, MetaItem, VadPipe
2
  import multiprocessing as mp
3
  import config
4
 
 
10
  # self.result_queue = mp.Queue()
11
 
12
  # whisper 转录
13
+ self._whisper_pipe = self._launch_process(WhisperPipe())
14
 
15
  # llm 翻译
16
+ self._translate_pipe = self._launch_process(TranslatePipe())
17
 
18
+ # vad
19
+ self._vad_pipe = self._launch_process(VadPipe())
20
+
21
+ def _launch_process(self, process_obj):
22
+ process_obj.daemon = True
23
+ process_obj.start()
24
+ return process_obj
25
 
26
  def wait_ready(self):
27
  self._whisper_pipe.wait()
28
  self._translate_pipe.wait()
29
+ self._vad_pipe.wait()
30
 
31
  def translate(self, text, src_lang, dst_lang) -> MetaItem:
32
  item = MetaItem(
 
42
  self._whisper_pipe.input_queue.put(item)
43
  return self._whisper_pipe.output_queue.get()
44
 
45
+ def voice_detect(self, audio_buffer:bytes) -> MetaItem:
46
+ item = MetaItem(source_audio=audio_buffer)
47
+ self._vad_pipe.input_queue.put(item)
48
+ return self._vad_pipe.output_queue.get()
49
+
50
+
51
 
52
  if __name__ == "__main__":
53
  import soundfile
54
  tp = TranslatePipes()
55
  # result = tp.translate("你好,今天天气怎么样?", src_lang="zh", dst_lang="en")
56
  mel, _, = soundfile.read("assets/jfk.flac")
57
+ # result = tp.transcrible(mel, 'en')
58
+ result = tp.voice_detect(mel)
59
  print(result)
60
 
61
 
transcribe/utils.py CHANGED
@@ -2,6 +2,8 @@ import os
2
  import textwrap
3
  from pathlib import Path
4
  import logging
 
 
5
 
6
 
7
  import av
@@ -86,3 +88,7 @@ def resample(file: str, sr: int = 16000):
86
 
87
  output_container.close()
88
  return resampled_file
 
 
 
 
 
2
  import textwrap
3
  from pathlib import Path
4
  import logging
5
+ import numpy as np
6
+ from scipy.io.wavfile import write
7
 
8
 
9
  import av
 
88
 
89
  output_container.close()
90
  return resampled_file
91
+
92
+
93
+ def save_to_wave(filename, data:np.ndarray, sample_rate=16000):
94
+ write(filename, sample_rate, data)
transcribe/whisper_llm_serve.py CHANGED
@@ -5,13 +5,13 @@ import multiprocessing as mp
5
  import numpy as np
6
  from logging import getLogger
7
 
8
- import config
9
  import time
10
  import json
11
  import threading
12
  from .server import ServeClientBase
13
-
14
- from scipy.io.wavfile import write
15
  from api_model import TransResult, Message
16
  from .utils import log_block
17
  from .translatepipes import TranslatePipes
@@ -23,14 +23,11 @@ translate_pipes = TranslatePipes()
23
  translate_pipes.wait_ready()
24
  logger.info("Pipeline is ready.")
25
 
26
- def save_to_wave(filename, data:np.ndarray, sample_rate=16000):
27
- write(filename, sample_rate, data)
28
 
29
 
30
 
31
  class PyWhiperCppServe(ServeClientBase):
32
 
33
-
34
  def __init__(self, websocket, language=None, dst_lang=None, client_uid=None,):
35
  super().__init__(client_uid, websocket)
36
  self.language = language
@@ -42,16 +39,23 @@ class PyWhiperCppServe(ServeClientBase):
42
 
43
  self.lock = threading.Lock()
44
  self.frames_np = None
 
 
45
  self.sample_rate = 16000
46
 
47
  self.send_ready_state()
48
 
49
- self.trans_thread = threading.Thread(target=self.speech_to_text)
50
- self.trans_thread.daemon = True
51
- self.trans_thread.start()
 
 
 
 
 
 
52
 
53
  def send_ready_state(self):
54
-
55
  self.websocket.send(json.dumps({
56
  "uid": self.client_uid,
57
  "message": self.SERVER_READY,
@@ -63,12 +67,34 @@ class PyWhiperCppServe(ServeClientBase):
63
  self.dst_lang = dst_lang
64
 
65
  def add_frames(self, frame_np):
66
- # self._audio_queue.put(frame_np)
67
- with self.lock:
68
- if self.frames_np is None:
69
- self.frames_np = frame_np.copy()
70
- else:
71
- self.frames_np = np.append(self.frames_np,frame_np)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
72
 
73
 
74
  def update_audio_buffer(self, last_offset):
@@ -99,8 +125,10 @@ class PyWhiperCppServe(ServeClientBase):
99
  log_block("LLM translate input", f"{text}")
100
  start_time = time.perf_counter()
101
  ret = translate_pipes.translate(text, self.language, self.dst_lang)
 
102
  log_block("LLM translate time", f"{(time.perf_counter() - start_time):.3f}", "s")
103
- return ret.translate_content
 
104
 
105
 
106
 
@@ -109,8 +137,8 @@ class PyWhiperCppServe(ServeClientBase):
109
  # 当左边确认后,右边段才会进入观察
110
  # 当左边确认后,会从缓冲区中删除对应的buffer,减少下次输入的数据量
111
  left_watch_idx, left_watch_sequences, right_watch_sequences, is_end_sentence = segments_split(segments, audio_buffer)
112
- left_watch_string = "".join(i.text for i in left_watch_sequences)
113
- right_watch_string = "".join(i.text for i in right_watch_sequences)
114
 
115
  if left_watch_idx != 0:
116
  # 将观察字符串临时存储
@@ -121,8 +149,8 @@ class PyWhiperCppServe(ServeClientBase):
121
 
122
  # 整句消除 后两句之前的内容
123
  left_watch_idx, left_watch_sequences, right_watch_sequences, is_end_sentence = sequences_split(segments, audio_buffer)
124
- left_watch_string = "".join(i.text for i in left_watch_sequences)
125
- right_watch_string = "".join(i.text for i in right_watch_sequences)
126
  if left_watch_idx != 0:
127
  return left_watch_idx, left_watch_string, right_watch_string, is_end_sentence
128
 
@@ -157,7 +185,7 @@ class PyWhiperCppServe(ServeClientBase):
157
  # logger.error(f"{e}")
158
 
159
  def handle_transcription_output(self, segments, audio_buffer):
160
- texts = "".join(i.text for i in segments)
161
  if not len(texts):
162
  return
163
  self._segment_manager.handle(texts)
 
5
  import numpy as np
6
  from logging import getLogger
7
 
8
+ from .utils import save_to_wave
9
  import time
10
  import json
11
  import threading
12
  from .server import ServeClientBase
13
+ import queue
14
+ import collections
15
  from api_model import TransResult, Message
16
  from .utils import log_block
17
  from .translatepipes import TranslatePipes
 
23
  translate_pipes.wait_ready()
24
  logger.info("Pipeline is ready.")
25
 
 
 
26
 
27
 
28
 
29
  class PyWhiperCppServe(ServeClientBase):
30
 
 
31
  def __init__(self, websocket, language=None, dst_lang=None, client_uid=None,):
32
  super().__init__(client_uid, websocket)
33
  self.language = language
 
39
 
40
  self.lock = threading.Lock()
41
  self.frames_np = None
42
+ self._frame_queue = queue.Queue()
43
+ self._previous_frame_queue = collections.deque(maxlen=2)
44
  self.sample_rate = 16000
45
 
46
  self.send_ready_state()
47
 
48
+ self.run_in_thread(self.speech_to_text)
49
+ self.run_in_thread(self.get_frame_from_queue)
50
+
51
+ self.text_sep = "" if self.language == "zh" else " "
52
+
53
+ def run_in_thread(self, func):
54
+ t = threading.Thread(target=func)
55
+ t.daemon = True
56
+ t.start()
57
 
58
  def send_ready_state(self):
 
59
  self.websocket.send(json.dumps({
60
  "uid": self.client_uid,
61
  "message": self.SERVER_READY,
 
67
  self.dst_lang = dst_lang
68
 
69
  def add_frames(self, frame_np):
70
+ self._frame_queue.put(frame_np)
71
+
72
+ def get_prev_frame(self, ):
73
+ if len(self._previous_frame_queue) == 2:
74
+ return self._previous_frame_queue[-1]
75
+
76
+
77
+ def get_frame_from_queue(self,):
78
+ while True:
79
+ try:
80
+ frame_np = self._frame_queue.get(timeout=0.1)
81
+ # frame_np = item.source_audio
82
+ # self._previous_frame_queue.appendleft(frame_np.copy())
83
+ # prev_frame_np = self.get_prev_frame()
84
+ # if prev_frame_np is not None:
85
+ # frame_np = np.concatenate([prev_frame_np[int(-0.05 * self.sample_rate):],frame_np], axis=0)
86
+ # item = translate_pipes.voice_detect(frame_np.tobytes())
87
+ # if item.audio == b"":
88
+ # continue
89
+ # frame_np = np.frombuffer(item.audio, dtype=np.float32)
90
+ with self.lock:
91
+ if self.frames_np is None:
92
+ self.frames_np = frame_np.copy()
93
+ else:
94
+ self.frames_np = np.append(self.frames_np,frame_np)
95
+ except queue.Empty:
96
+ pass
97
+
98
 
99
 
100
  def update_audio_buffer(self, last_offset):
 
125
  log_block("LLM translate input", f"{text}")
126
  start_time = time.perf_counter()
127
  ret = translate_pipes.translate(text, self.language, self.dst_lang)
128
+ translated_text = ret.translate_content
129
  log_block("LLM translate time", f"{(time.perf_counter() - start_time):.3f}", "s")
130
+ log_block("LLM translate out", f"{translated_text}")
131
+ return translated_text
132
 
133
 
134
 
 
137
  # 当左边确认后,右边段才会进入观察
138
  # 当左边确认后,会从缓冲区中删除对应的buffer,减少下次输入的数据量
139
  left_watch_idx, left_watch_sequences, right_watch_sequences, is_end_sentence = segments_split(segments, audio_buffer)
140
+ left_watch_string = self.text_sep.join(i.text for i in left_watch_sequences)
141
+ right_watch_string = self.text_sep.join(i.text for i in right_watch_sequences)
142
 
143
  if left_watch_idx != 0:
144
  # 将观察字符串临时存储
 
149
 
150
  # 整句消除 后两句之前的内容
151
  left_watch_idx, left_watch_sequences, right_watch_sequences, is_end_sentence = sequences_split(segments, audio_buffer)
152
+ left_watch_string = self.text_sep.join(i.text for i in left_watch_sequences)
153
+ right_watch_string = self.text_sep.join(i.text for i in right_watch_sequences)
154
  if left_watch_idx != 0:
155
  return left_watch_idx, left_watch_string, right_watch_string, is_end_sentence
156
 
 
185
  # logger.error(f"{e}")
186
 
187
  def handle_transcription_output(self, segments, audio_buffer):
188
+ texts = self.text_sep.join(i.text for i in segments)
189
  if not len(texts):
190
  return
191
  self._segment_manager.handle(texts)
uv.lock CHANGED
@@ -16,6 +16,20 @@ wheels = [
16
  { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
17
  ]
18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  [[package]]
20
  name = "audioop-lts"
21
  version = "0.2.1"
@@ -223,6 +237,20 @@ wheels = [
223
  { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190 },
224
  ]
225
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
  [[package]]
227
  name = "filelock"
228
  version = "3.18.0"
@@ -831,6 +859,15 @@ wheels = [
831
  { url = "https://files.pythonhosted.org/packages/12/6f/5596dc418f2e292ffc661d21931ab34591952e2843e7168ea5a52591f6ff/pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5", size = 2080951 },
832
  ]
833
 
 
 
 
 
 
 
 
 
 
834
  [[package]]
835
  name = "pyreadline3"
836
  version = "3.5.4"
@@ -944,6 +981,29 @@ wheels = [
944
  { url = "https://files.pythonhosted.org/packages/54/21/f43f0a1fa8b06b32812e0975981f4677d28e0f3271601dc88ac5a5b83220/setuptools-78.1.0-py3-none-any.whl", hash = "sha256:3e386e96793c8702ae83d17b853fb93d3e09ef82ec62722e61da5cd22376dcd8", size = 1256108 },
945
  ]
946
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
947
  [[package]]
948
  name = "soundfile"
949
  version = "0.13.1"
@@ -1018,6 +1078,18 @@ wheels = [
1018
  { url = "https://files.pythonhosted.org/packages/34/ae/e3707f6c1bc6f7aa0df600ba8075bfb8a19252140cd595335be60e25f9ee/standard_sunau-3.13.0-py3-none-any.whl", hash = "sha256:53af624a9529c41062f4c2fd33837f297f3baa196b0cfceffea6555654602622", size = 7364 },
1019
  ]
1020
 
 
 
 
 
 
 
 
 
 
 
 
 
1021
  [[package]]
1022
  name = "sympy"
1023
  version = "1.13.1"
@@ -1081,6 +1153,28 @@ wheels = [
1081
  { url = "https://files.pythonhosted.org/packages/88/8b/d60c0491ab63634763be1537ad488694d316ddc4a20eaadd639cedc53971/torch-2.6.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:ff96f4038f8af9f7ec4231710ed4549da1bdebad95923953a25045dcf6fd87e2", size = 66536783 },
1082
  ]
1083
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1084
  [[package]]
1085
  name = "tqdm"
1086
  version = "4.67.1"
@@ -1099,12 +1193,15 @@ version = "0.1.0"
1099
  source = { virtual = "." }
1100
  dependencies = [
1101
  { name = "av" },
 
1102
  { name = "librosa" },
1103
  { name = "numpy" },
1104
  { name = "onnxruntime" },
1105
  { name = "pyaudio" },
1106
  { name = "pydantic" },
 
1107
  { name = "setuptools" },
 
1108
  { name = "soundfile" },
1109
  { name = "torch" },
1110
  { name = "tqdm" },
@@ -1115,12 +1212,15 @@ dependencies = [
1115
  [package.metadata]
1116
  requires-dist = [
1117
  { name = "av", specifier = ">=14.2.0" },
 
1118
  { name = "librosa", specifier = ">=0.11.0" },
1119
  { name = "numpy", specifier = ">=2.1.3" },
1120
  { name = "onnxruntime", specifier = ">=1.21.0" },
1121
  { name = "pyaudio", specifier = ">=0.2.14" },
1122
  { name = "pydantic", specifier = ">=2.11.2" },
 
1123
  { name = "setuptools", specifier = ">=78.1.0" },
 
1124
  { name = "soundfile", specifier = ">=0.13.1" },
1125
  { name = "torch", specifier = ">=2.6.0" },
1126
  { name = "tqdm", specifier = ">=4.67.1" },
 
16
  { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
17
  ]
18
 
19
+ [[package]]
20
+ name = "anyio"
21
+ version = "4.9.0"
22
+ source = { registry = "https://pypi.org/simple" }
23
+ dependencies = [
24
+ { name = "idna" },
25
+ { name = "sniffio" },
26
+ { name = "typing-extensions", marker = "python_full_version < '3.13'" },
27
+ ]
28
+ sdist = { url = "https://files.pythonhosted.org/packages/95/7d/4c1bd541d4dffa1b52bd83fb8527089e097a106fc90b467a7313b105f840/anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028", size = 190949 }
29
+ wheels = [
30
+ { url = "https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c", size = 100916 },
31
+ ]
32
+
33
  [[package]]
34
  name = "audioop-lts"
35
  version = "0.2.1"
 
237
  { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190 },
238
  ]
239
 
240
+ [[package]]
241
+ name = "fastapi"
242
+ version = "0.115.12"
243
+ source = { registry = "https://pypi.org/simple" }
244
+ dependencies = [
245
+ { name = "pydantic" },
246
+ { name = "starlette" },
247
+ { name = "typing-extensions" },
248
+ ]
249
+ sdist = { url = "https://files.pythonhosted.org/packages/f4/55/ae499352d82338331ca1e28c7f4a63bfd09479b16395dce38cf50a39e2c2/fastapi-0.115.12.tar.gz", hash = "sha256:1e2c2a2646905f9e83d32f04a3f86aff4a286669c6c950ca95b5fd68c2602681", size = 295236 }
250
+ wheels = [
251
+ { url = "https://files.pythonhosted.org/packages/50/b3/b51f09c2ba432a576fe63758bddc81f78f0c6309d9e5c10d194313bf021e/fastapi-0.115.12-py3-none-any.whl", hash = "sha256:e94613d6c05e27be7ffebdd6ea5f388112e5e430c8f7d6494a9d1d88d43e814d", size = 95164 },
252
+ ]
253
+
254
  [[package]]
255
  name = "filelock"
256
  version = "3.18.0"
 
859
  { url = "https://files.pythonhosted.org/packages/12/6f/5596dc418f2e292ffc661d21931ab34591952e2843e7168ea5a52591f6ff/pydantic_core-2.33.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:f995719707e0e29f0f41a8aa3bcea6e761a36c9136104d3189eafb83f5cec5e5", size = 2080951 },
860
  ]
861
 
862
+ [[package]]
863
+ name = "pydub"
864
+ version = "0.25.1"
865
+ source = { registry = "https://pypi.org/simple" }
866
+ sdist = { url = "https://files.pythonhosted.org/packages/fe/9a/e6bca0eed82db26562c73b5076539a4a08d3cffd19c3cc5913a3e61145fd/pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f", size = 38326 }
867
+ wheels = [
868
+ { url = "https://files.pythonhosted.org/packages/a6/53/d78dc063216e62fc55f6b2eebb447f6a4b0a59f55c8406376f76bf959b08/pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6", size = 32327 },
869
+ ]
870
+
871
  [[package]]
872
  name = "pyreadline3"
873
  version = "3.5.4"
 
981
  { url = "https://files.pythonhosted.org/packages/54/21/f43f0a1fa8b06b32812e0975981f4677d28e0f3271601dc88ac5a5b83220/setuptools-78.1.0-py3-none-any.whl", hash = "sha256:3e386e96793c8702ae83d17b853fb93d3e09ef82ec62722e61da5cd22376dcd8", size = 1256108 },
982
  ]
983
 
984
+ [[package]]
985
+ name = "silero-vad"
986
+ version = "5.1.2"
987
+ source = { registry = "https://pypi.org/simple" }
988
+ dependencies = [
989
+ { name = "onnxruntime" },
990
+ { name = "torch" },
991
+ { name = "torchaudio" },
992
+ ]
993
+ sdist = { url = "https://files.pythonhosted.org/packages/b1/b4/d0311b2e6220a11f8f4699f4a278cb088131573286cdfe804c87c7eb5123/silero_vad-5.1.2.tar.gz", hash = "sha256:c442971160026d2d7aa0ad83f0c7ee86c89797a65289fe625c8ea59fc6fb828d", size = 5098526 }
994
+ wheels = [
995
+ { url = "https://files.pythonhosted.org/packages/98/f7/5ae11d13fbb733cd3bfd7ff1c3a3902e6f55437df4b72307c1f168146268/silero_vad-5.1.2-py3-none-any.whl", hash = "sha256:93b41953d7774b165407fda6b533c119c5803864e367d5034dc626c82cfdf661", size = 5026737 },
996
+ ]
997
+
998
+ [[package]]
999
+ name = "sniffio"
1000
+ version = "1.3.1"
1001
+ source = { registry = "https://pypi.org/simple" }
1002
+ sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
1003
+ wheels = [
1004
+ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
1005
+ ]
1006
+
1007
  [[package]]
1008
  name = "soundfile"
1009
  version = "0.13.1"
 
1078
  { url = "https://files.pythonhosted.org/packages/34/ae/e3707f6c1bc6f7aa0df600ba8075bfb8a19252140cd595335be60e25f9ee/standard_sunau-3.13.0-py3-none-any.whl", hash = "sha256:53af624a9529c41062f4c2fd33837f297f3baa196b0cfceffea6555654602622", size = 7364 },
1079
  ]
1080
 
1081
+ [[package]]
1082
+ name = "starlette"
1083
+ version = "0.46.1"
1084
+ source = { registry = "https://pypi.org/simple" }
1085
+ dependencies = [
1086
+ { name = "anyio" },
1087
+ ]
1088
+ sdist = { url = "https://files.pythonhosted.org/packages/04/1b/52b27f2e13ceedc79a908e29eac426a63465a1a01248e5f24aa36a62aeb3/starlette-0.46.1.tar.gz", hash = "sha256:3c88d58ee4bd1bb807c0d1acb381838afc7752f9ddaec81bbe4383611d833230", size = 2580102 }
1089
+ wheels = [
1090
+ { url = "https://files.pythonhosted.org/packages/a0/4b/528ccf7a982216885a1ff4908e886b8fb5f19862d1962f56a3fce2435a70/starlette-0.46.1-py3-none-any.whl", hash = "sha256:77c74ed9d2720138b25875133f3a2dae6d854af2ec37dceb56aef370c1d8a227", size = 71995 },
1091
+ ]
1092
+
1093
  [[package]]
1094
  name = "sympy"
1095
  version = "1.13.1"
 
1153
  { url = "https://files.pythonhosted.org/packages/88/8b/d60c0491ab63634763be1537ad488694d316ddc4a20eaadd639cedc53971/torch-2.6.0-cp313-none-macosx_11_0_arm64.whl", hash = "sha256:ff96f4038f8af9f7ec4231710ed4549da1bdebad95923953a25045dcf6fd87e2", size = 66536783 },
1154
  ]
1155
 
1156
+ [[package]]
1157
+ name = "torchaudio"
1158
+ version = "2.6.0"
1159
+ source = { registry = "https://pypi.org/simple" }
1160
+ dependencies = [
1161
+ { name = "torch" },
1162
+ ]
1163
+ wheels = [
1164
+ { url = "https://files.pythonhosted.org/packages/a9/30/bba293c8300245a09b7f82d3cfc04aee1950228da49c6cdd637d1145b6f5/torchaudio-2.6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c12fc41241b8dfce3ccc1917f1c81a0f92f532d9917706600046f1eb21d2d765", size = 1815253 },
1165
+ { url = "https://files.pythonhosted.org/packages/3e/00/2c69d436c613043f3051210d2f84a4c9062a815fa609c5f54d25ea8bfd07/torchaudio-2.6.0-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:377b177a3d683a9163e4cab5a06f0346dac9ff96fa527477338fd90fc6a2a4b6", size = 3382518 },
1166
+ { url = "https://files.pythonhosted.org/packages/f5/b8/7d4dbbf6b505caddbfccd38e2882e47a791310b32b347f977a0a66efbf80/torchaudio-2.6.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:0f0db5c997d031c34066d8be1c0ce7d2a1f2b6c016a92885b20b00bfeb17b753", size = 1652980 },
1167
+ { url = "https://files.pythonhosted.org/packages/1f/31/417d6955585be76842e9b0159d3801c0b5f9a4ea0db39db1a72bc262c861/torchaudio-2.6.0-cp311-cp311-win_amd64.whl", hash = "sha256:52182f6de4e7b342d139e54b703185d428de9cce3c4cf914a9b2ab2359d192a3", size = 2454430 },
1168
+ { url = "https://files.pythonhosted.org/packages/ac/4a/d71b932bda4171970bdf4997541b5c778daa0e2967ed5009d207fca86ded/torchaudio-2.6.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7d0e4b08c42325bf4b887de9a25c44ed882997001740e1bd7d901f65581cf1ab", size = 1812899 },
1169
+ { url = "https://files.pythonhosted.org/packages/ed/aa/9082e715a673dd8e22b6a60cec7f301e897406023672b2090f8bcd8a5959/torchaudio-2.6.0-cp312-cp312-manylinux1_x86_64.whl", hash = "sha256:715aa21f6bdbd085454c313ae3a2c7cc07bf2e8cf05752f819afb5b4c57f4e6f", size = 3379510 },
1170
+ { url = "https://files.pythonhosted.org/packages/f2/e7/0bcb2e33f4bdec69477344eccfe25c515b90496888095e99f837ea422089/torchaudio-2.6.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:6291d9507dc1d6b4ffe8843fbfb201e6c8270dd8c42ad70bb76226c0ebdcad56", size = 1653523 },
1171
+ { url = "https://files.pythonhosted.org/packages/80/95/29e917905328337c7b104ce81f3bb5e2ad8dc70af2edf1d43f67eb621513/torchaudio-2.6.0-cp312-cp312-win_amd64.whl", hash = "sha256:86d6239792bf94741a41acd6fe3d549faaf0d50e7275d17d076a190bd007e2f9", size = 2449191 },
1172
+ { url = "https://files.pythonhosted.org/packages/fb/73/861afa5864e95fbf42b693e0359b2bf0177b6b5f4274fa4472fd51e5298e/torchaudio-2.6.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:66f2e0bd5ab56fd81419d2f5afb74a9a70141688594646441756c8c24f424a73", size = 1813188 },
1173
+ { url = "https://files.pythonhosted.org/packages/d2/f0/daffd9afa60bd835a2d7980eddfe44524adcb3ee0837486ceae4cd1f68e2/torchaudio-2.6.0-cp313-cp313-manylinux1_x86_64.whl", hash = "sha256:52f15185349c370fc1faa84e8b8b2782c007472db9d586a16bba314130b322f2", size = 3380706 },
1174
+ { url = "https://files.pythonhosted.org/packages/94/7b/887b91372e34119aa140cf67614e5ba901bf6a0db86f2c39e30ff71eec54/torchaudio-2.6.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b521ea9618fb4c29a6f8071628170c222291f46a48a3bf424cfeb488f54af714", size = 1653553 },
1175
+ { url = "https://files.pythonhosted.org/packages/55/c8/3010878a5e7f15d89450e22769697173c6dc244a0647ddc5386c28b6dacc/torchaudio-2.6.0-cp313-cp313-win_amd64.whl", hash = "sha256:393fa74ec40d167f0170728ea21c9b5e0f830648fd02df7db2bf7e62f64245ec", size = 2449350 },
1176
+ ]
1177
+
1178
  [[package]]
1179
  name = "tqdm"
1180
  version = "4.67.1"
 
1193
  source = { virtual = "." }
1194
  dependencies = [
1195
  { name = "av" },
1196
+ { name = "fastapi" },
1197
  { name = "librosa" },
1198
  { name = "numpy" },
1199
  { name = "onnxruntime" },
1200
  { name = "pyaudio" },
1201
  { name = "pydantic" },
1202
+ { name = "pydub" },
1203
  { name = "setuptools" },
1204
+ { name = "silero-vad" },
1205
  { name = "soundfile" },
1206
  { name = "torch" },
1207
  { name = "tqdm" },
 
1212
  [package.metadata]
1213
  requires-dist = [
1214
  { name = "av", specifier = ">=14.2.0" },
1215
+ { name = "fastapi", specifier = ">=0.115.12" },
1216
  { name = "librosa", specifier = ">=0.11.0" },
1217
  { name = "numpy", specifier = ">=2.1.3" },
1218
  { name = "onnxruntime", specifier = ">=1.21.0" },
1219
  { name = "pyaudio", specifier = ">=0.2.14" },
1220
  { name = "pydantic", specifier = ">=2.11.2" },
1221
+ { name = "pydub", specifier = ">=0.25.1" },
1222
  { name = "setuptools", specifier = ">=78.1.0" },
1223
+ { name = "silero-vad", specifier = ">=5.1.2" },
1224
  { name = "soundfile", specifier = ">=0.13.1" },
1225
  { name = "torch", specifier = ">=2.6.0" },
1226
  { name = "tqdm", specifier = ">=4.67.1" },