Model save
Browse files- README.md +115 -9
- generation_config.json +2 -90
README.md
CHANGED
|
@@ -1,12 +1,118 @@
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
base_model:
|
| 10 |
-
- imvladikon/hebrew_speech_coursera
|
| 11 |
-
pipeline_tag: automatic-speech-recognition
|
| 12 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
library_name: transformers
|
| 3 |
+
base_model: ivrit-ai/whisper-large-v3-turbo
|
| 4 |
+
tags:
|
| 5 |
+
- generated_from_trainer
|
| 6 |
+
model-index:
|
| 7 |
+
- name: whisper-large-v3-turbo-ivrit-ai-coursera-fine-tuned
|
| 8 |
+
results: []
|
|
|
|
|
|
|
|
|
|
| 9 |
---
|
| 10 |
+
|
| 11 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 12 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 13 |
+
|
| 14 |
+
# whisper-large-v3-turbo-ivrit-ai-coursera-fine-tuned
|
| 15 |
+
|
| 16 |
+
This model is a fine-tuned version of [ivrit-ai/whisper-large-v3-turbo](https://huggingface.co/ivrit-ai/whisper-large-v3-turbo) on an unknown dataset.
|
| 17 |
+
It achieves the following results on the evaluation set:
|
| 18 |
+
- Loss: 0.2829
|
| 19 |
+
|
| 20 |
+
## Model description
|
| 21 |
+
|
| 22 |
+
More information needed
|
| 23 |
+
|
| 24 |
+
## Intended uses & limitations
|
| 25 |
+
|
| 26 |
+
More information needed
|
| 27 |
+
|
| 28 |
+
## Training and evaluation data
|
| 29 |
+
|
| 30 |
+
More information needed
|
| 31 |
+
|
| 32 |
+
## Training procedure
|
| 33 |
+
|
| 34 |
+
### Training hyperparameters
|
| 35 |
+
|
| 36 |
+
The following hyperparameters were used during training:
|
| 37 |
+
- learning_rate: 1e-05
|
| 38 |
+
- train_batch_size: 5
|
| 39 |
+
- eval_batch_size: 5
|
| 40 |
+
- seed: 42
|
| 41 |
+
- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
| 42 |
+
- lr_scheduler_type: constant_with_warmup
|
| 43 |
+
- lr_scheduler_warmup_ratio: 0.1
|
| 44 |
+
- lr_scheduler_warmup_steps: 500
|
| 45 |
+
- num_epochs: 10
|
| 46 |
+
|
| 47 |
+
### Training results
|
| 48 |
+
|
| 49 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 50 |
+
|:-------------:|:------:|:-----:|:---------------:|
|
| 51 |
+
| 0.1907 | 0.1641 | 500 | 0.2266 |
|
| 52 |
+
| 0.2283 | 0.3283 | 1000 | 0.2217 |
|
| 53 |
+
| 0.2253 | 0.4924 | 1500 | 0.2154 |
|
| 54 |
+
| 0.2257 | 0.6566 | 2000 | 0.2080 |
|
| 55 |
+
| 0.2138 | 0.8207 | 2500 | 0.2102 |
|
| 56 |
+
| 0.2153 | 0.9849 | 3000 | 0.2056 |
|
| 57 |
+
| 0.1615 | 1.1490 | 3500 | 0.2128 |
|
| 58 |
+
| 0.1588 | 1.3132 | 4000 | 0.1677 |
|
| 59 |
+
| 0.1628 | 1.4773 | 4500 | 0.1656 |
|
| 60 |
+
| 0.168 | 1.6415 | 5000 | 0.1798 |
|
| 61 |
+
| 0.167 | 1.8056 | 5500 | 0.1710 |
|
| 62 |
+
| 0.1663 | 1.9698 | 6000 | 0.1828 |
|
| 63 |
+
| 0.1297 | 2.1339 | 6500 | 0.1722 |
|
| 64 |
+
| 0.1196 | 2.2981 | 7000 | 0.1762 |
|
| 65 |
+
| 0.1336 | 2.4622 | 7500 | 0.1779 |
|
| 66 |
+
| 0.1258 | 2.6264 | 8000 | 0.1821 |
|
| 67 |
+
| 0.1275 | 2.7905 | 8500 | 0.1796 |
|
| 68 |
+
| 0.1331 | 2.9547 | 9000 | 0.1786 |
|
| 69 |
+
| 0.0988 | 3.1188 | 9500 | 0.1982 |
|
| 70 |
+
| 0.0933 | 3.2830 | 10000 | 0.1888 |
|
| 71 |
+
| 0.0963 | 3.4471 | 10500 | 0.1927 |
|
| 72 |
+
| 0.0946 | 3.6113 | 11000 | 0.1979 |
|
| 73 |
+
| 0.1018 | 3.7754 | 11500 | 0.2031 |
|
| 74 |
+
| 0.1027 | 3.9396 | 12000 | 0.1971 |
|
| 75 |
+
| 0.0795 | 4.1037 | 12500 | 0.2016 |
|
| 76 |
+
| 0.0698 | 4.2679 | 13000 | 0.2017 |
|
| 77 |
+
| 0.0736 | 4.4320 | 13500 | 0.2058 |
|
| 78 |
+
| 0.0747 | 4.5962 | 14000 | 0.2033 |
|
| 79 |
+
| 0.0768 | 4.7603 | 14500 | 0.2057 |
|
| 80 |
+
| 0.0801 | 4.9245 | 15000 | 0.2076 |
|
| 81 |
+
| 0.067 | 5.0886 | 15500 | 0.2196 |
|
| 82 |
+
| 0.0539 | 5.2528 | 16000 | 0.2185 |
|
| 83 |
+
| 0.0563 | 5.4169 | 16500 | 0.2220 |
|
| 84 |
+
| 0.0594 | 5.5811 | 17000 | 0.2265 |
|
| 85 |
+
| 0.0651 | 5.7452 | 17500 | 0.2176 |
|
| 86 |
+
| 0.0655 | 5.9094 | 18000 | 0.2227 |
|
| 87 |
+
| 0.0533 | 6.0735 | 18500 | 0.2387 |
|
| 88 |
+
| 0.0441 | 6.2377 | 19000 | 0.2334 |
|
| 89 |
+
| 0.0474 | 6.4018 | 19500 | 0.2343 |
|
| 90 |
+
| 0.0506 | 6.5660 | 20000 | 0.2387 |
|
| 91 |
+
| 0.0504 | 6.7301 | 20500 | 0.2373 |
|
| 92 |
+
| 0.0502 | 6.8943 | 21000 | 0.2318 |
|
| 93 |
+
| 0.0441 | 7.0584 | 21500 | 0.2524 |
|
| 94 |
+
| 0.0375 | 7.2226 | 22000 | 0.2533 |
|
| 95 |
+
| 0.0379 | 7.3867 | 22500 | 0.2491 |
|
| 96 |
+
| 0.0382 | 7.5509 | 23000 | 0.2635 |
|
| 97 |
+
| 0.0427 | 7.7150 | 23500 | 0.2506 |
|
| 98 |
+
| 0.0439 | 7.8792 | 24000 | 0.2430 |
|
| 99 |
+
| 0.043 | 8.0433 | 24500 | 0.2575 |
|
| 100 |
+
| 0.0296 | 8.2075 | 25000 | 0.2617 |
|
| 101 |
+
| 0.0309 | 8.3716 | 25500 | 0.2797 |
|
| 102 |
+
| 0.0366 | 8.5358 | 26000 | 0.2689 |
|
| 103 |
+
| 0.0351 | 8.6999 | 26500 | 0.2687 |
|
| 104 |
+
| 0.0384 | 8.8641 | 27000 | 0.2643 |
|
| 105 |
+
| 0.0365 | 9.0282 | 27500 | 0.2688 |
|
| 106 |
+
| 0.0265 | 9.1924 | 28000 | 0.2903 |
|
| 107 |
+
| 0.0299 | 9.3565 | 28500 | 0.2742 |
|
| 108 |
+
| 0.0347 | 9.5207 | 29000 | 0.2754 |
|
| 109 |
+
| 0.0311 | 9.6848 | 29500 | 0.2744 |
|
| 110 |
+
| 0.0345 | 9.8490 | 30000 | 0.2829 |
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
### Framework versions
|
| 114 |
+
|
| 115 |
+
- Transformers 4.48.1
|
| 116 |
+
- Pytorch 2.6.0+cu124
|
| 117 |
+
- Datasets 3.4.1
|
| 118 |
+
- Tokenizers 0.21.1
|
generation_config.json
CHANGED
|
@@ -25,6 +25,7 @@
|
|
| 25 |
14
|
| 26 |
]
|
| 27 |
],
|
|
|
|
| 28 |
"begin_suppress_tokens": [
|
| 29 |
220,
|
| 30 |
50257
|
|
@@ -151,96 +152,7 @@
|
|
| 151 |
"pad_token_id": 50257,
|
| 152 |
"prev_sot_token_id": 50362,
|
| 153 |
"return_timestamps": false,
|
| 154 |
-
"suppress_tokens": [
|
| 155 |
-
1,
|
| 156 |
-
2,
|
| 157 |
-
7,
|
| 158 |
-
8,
|
| 159 |
-
9,
|
| 160 |
-
10,
|
| 161 |
-
14,
|
| 162 |
-
25,
|
| 163 |
-
26,
|
| 164 |
-
27,
|
| 165 |
-
28,
|
| 166 |
-
29,
|
| 167 |
-
31,
|
| 168 |
-
58,
|
| 169 |
-
59,
|
| 170 |
-
60,
|
| 171 |
-
61,
|
| 172 |
-
62,
|
| 173 |
-
63,
|
| 174 |
-
90,
|
| 175 |
-
91,
|
| 176 |
-
92,
|
| 177 |
-
93,
|
| 178 |
-
359,
|
| 179 |
-
503,
|
| 180 |
-
522,
|
| 181 |
-
542,
|
| 182 |
-
873,
|
| 183 |
-
893,
|
| 184 |
-
902,
|
| 185 |
-
918,
|
| 186 |
-
922,
|
| 187 |
-
931,
|
| 188 |
-
1350,
|
| 189 |
-
1853,
|
| 190 |
-
1982,
|
| 191 |
-
2460,
|
| 192 |
-
2627,
|
| 193 |
-
3246,
|
| 194 |
-
3253,
|
| 195 |
-
3268,
|
| 196 |
-
3536,
|
| 197 |
-
3846,
|
| 198 |
-
3961,
|
| 199 |
-
4183,
|
| 200 |
-
4667,
|
| 201 |
-
6585,
|
| 202 |
-
6647,
|
| 203 |
-
7273,
|
| 204 |
-
9061,
|
| 205 |
-
9383,
|
| 206 |
-
10428,
|
| 207 |
-
10929,
|
| 208 |
-
11938,
|
| 209 |
-
12033,
|
| 210 |
-
12331,
|
| 211 |
-
12562,
|
| 212 |
-
13793,
|
| 213 |
-
14157,
|
| 214 |
-
14635,
|
| 215 |
-
15265,
|
| 216 |
-
15618,
|
| 217 |
-
16553,
|
| 218 |
-
16604,
|
| 219 |
-
18362,
|
| 220 |
-
18956,
|
| 221 |
-
20075,
|
| 222 |
-
21675,
|
| 223 |
-
22520,
|
| 224 |
-
26130,
|
| 225 |
-
26161,
|
| 226 |
-
26435,
|
| 227 |
-
28279,
|
| 228 |
-
29464,
|
| 229 |
-
31650,
|
| 230 |
-
32302,
|
| 231 |
-
32470,
|
| 232 |
-
36865,
|
| 233 |
-
42863,
|
| 234 |
-
47425,
|
| 235 |
-
49870,
|
| 236 |
-
50254,
|
| 237 |
-
50258,
|
| 238 |
-
50359,
|
| 239 |
-
50360,
|
| 240 |
-
50361,
|
| 241 |
-
50362,
|
| 242 |
-
50363
|
| 243 |
-
],
|
| 244 |
"task_to_id": {
|
| 245 |
"transcribe": 50360,
|
| 246 |
"translate": 50359
|
|
|
|
| 25 |
14
|
| 26 |
]
|
| 27 |
],
|
| 28 |
+
"attn_implementation": null,
|
| 29 |
"begin_suppress_tokens": [
|
| 30 |
220,
|
| 31 |
50257
|
|
|
|
| 152 |
"pad_token_id": 50257,
|
| 153 |
"prev_sot_token_id": 50362,
|
| 154 |
"return_timestamps": false,
|
| 155 |
+
"suppress_tokens": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 156 |
"task_to_id": {
|
| 157 |
"transcribe": 50360,
|
| 158 |
"translate": 50359
|