Datasets:
Commit
·
3a31e45
1
Parent(s):
182e12e
Update esc-datasets.py
Browse files- esc-datasets.py +47 -46
esc-datasets.py
CHANGED
|
@@ -32,8 +32,6 @@ from pathlib import Path
|
|
| 32 |
from huggingface_hub import HfApi, HfFolder
|
| 33 |
import datasets
|
| 34 |
|
| 35 |
-
from .cv_release_stats import STATS as _COMMON_VOICE_STATS
|
| 36 |
-
|
| 37 |
|
| 38 |
_DESCRIPTIONS = {
|
| 39 |
"ami": """
|
|
@@ -391,7 +389,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 391 |
|
| 392 |
transcriptions[audio_filename] = {
|
| 393 |
"id": _id,
|
| 394 |
-
"text": text,
|
| 395 |
}
|
| 396 |
|
| 397 |
features = ["id", "text"]
|
|
@@ -438,6 +436,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 438 |
"local_extracted_archive_paths": local_extracted_archive_paths[subconfig],
|
| 439 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths[subconfig]],
|
| 440 |
"meta_path": meta_path[subconfig],
|
|
|
|
| 441 |
},
|
| 442 |
),
|
| 443 |
datasets.SplitGenerator(
|
|
@@ -446,6 +445,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 446 |
"local_extracted_archive_paths": local_extracted_archive_paths["dev"],
|
| 447 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["dev"]],
|
| 448 |
"meta_path": meta_path["dev"],
|
|
|
|
| 449 |
},
|
| 450 |
),
|
| 451 |
datasets.SplitGenerator(
|
|
@@ -454,11 +454,12 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 454 |
"local_extracted_archive_paths": local_extracted_archive_paths["test"],
|
| 455 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 456 |
"meta_path": meta_path["test"],
|
|
|
|
| 457 |
},
|
| 458 |
),
|
| 459 |
]
|
| 460 |
|
| 461 |
-
def _spgispeech_generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
| 462 |
# define the expected metadata dict keys,
|
| 463 |
# some files have metadata with erroneous entries that we have to filter out
|
| 464 |
dict_keys = {"id": "wav_filename", "text": "transcript"}
|
|
@@ -482,6 +483,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 482 |
# get the .wav filename by removing the directory path from the audio filename
|
| 483 |
wav_filename = "/".join(audio_filename.split("/")[-2:])
|
| 484 |
example = dict(metadata[wav_filename])
|
|
|
|
| 485 |
example["audio"] = {"path": path, "bytes": audio_file.read()}
|
| 486 |
example["dataset"] = "spgispeech"
|
| 487 |
yield audio_filename, example
|
|
@@ -502,7 +504,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 502 |
split: _VOXPOPULI_METADATA_PATH.format(split=split) for split in splits
|
| 503 |
}
|
| 504 |
|
| 505 |
-
dl_manager.download_config.num_proc = len(audio_urls["train"])
|
| 506 |
meta_paths = dl_manager.download_and_extract(meta_urls)
|
| 507 |
audio_paths = dl_manager.download(audio_urls)
|
| 508 |
|
|
@@ -519,6 +521,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 519 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
|
| 520 |
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
|
| 521 |
"meta_path": meta_paths["train"],
|
|
|
|
| 522 |
}
|
| 523 |
),
|
| 524 |
datasets.SplitGenerator(
|
|
@@ -527,6 +530,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 527 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
|
| 528 |
"local_extracted_archives_paths": local_extracted_audio_paths["dev"],
|
| 529 |
"meta_path": meta_paths["dev"],
|
|
|
|
| 530 |
}
|
| 531 |
),
|
| 532 |
datasets.SplitGenerator(
|
|
@@ -535,11 +539,12 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 535 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
|
| 536 |
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
|
| 537 |
"meta_path": meta_paths["test"],
|
|
|
|
| 538 |
}
|
| 539 |
),
|
| 540 |
]
|
| 541 |
|
| 542 |
-
def _voxpopuli_generate_examples(self, audio_archives, local_extracted_archives_paths, meta_path):
|
| 543 |
assert len(audio_archives) == len(local_extracted_archives_paths)
|
| 544 |
|
| 545 |
logging.info("Reading voxpopuli metadata.")
|
|
@@ -553,7 +558,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 553 |
|
| 554 |
yield audio_id, {
|
| 555 |
"id": audio_id,
|
| 556 |
-
"text": metadata[audio_id]["normalized_text"].lower(),
|
| 557 |
"audio": {"path": path, "bytes": audio_file.read()},
|
| 558 |
"dataset": "voxpopuli",
|
| 559 |
}
|
|
@@ -572,6 +577,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 572 |
gen_kwargs={
|
| 573 |
"local_extracted_archives": [local_extracted_archives.get(train_name) for train_name in train_splits],
|
| 574 |
"archives": [dl_manager.iter_archive(archive_paths[train_name]) for train_name in train_splits],
|
|
|
|
| 575 |
},
|
| 576 |
)
|
| 577 |
]
|
|
@@ -581,6 +587,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 581 |
gen_kwargs={
|
| 582 |
"local_extracted_archives": [local_extracted_archives.get("dev.clean")],
|
| 583 |
"archives": [dl_manager.iter_archive(archive_paths["dev.clean"])],
|
|
|
|
| 584 |
},
|
| 585 |
),
|
| 586 |
datasets.SplitGenerator(
|
|
@@ -588,6 +595,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 588 |
gen_kwargs={
|
| 589 |
"local_extracted_archives": [local_extracted_archives.get("dev.other")],
|
| 590 |
"archives": [dl_manager.iter_archive(archive_paths["dev.other"])],
|
|
|
|
| 591 |
},
|
| 592 |
),
|
| 593 |
]
|
|
@@ -597,6 +605,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 597 |
gen_kwargs={
|
| 598 |
"local_extracted_archives": [local_extracted_archives.get("test.clean")],
|
| 599 |
"archives": [dl_manager.iter_archive(archive_paths["test.clean"])],
|
|
|
|
| 600 |
},
|
| 601 |
),
|
| 602 |
datasets.SplitGenerator(
|
|
@@ -604,12 +613,13 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 604 |
gen_kwargs={
|
| 605 |
"local_extracted_archives": [local_extracted_archives.get("test.other")],
|
| 606 |
"archives": [dl_manager.iter_archive(archive_paths["test.other"])],
|
|
|
|
| 607 |
},
|
| 608 |
),
|
| 609 |
]
|
| 610 |
return train_split + dev_splits + test_splits
|
| 611 |
|
| 612 |
-
def _librispeech_generate_examples(self, archives, local_extracted_archives):
|
| 613 |
key = 0
|
| 614 |
audio_data = {}
|
| 615 |
transcripts = []
|
|
@@ -637,7 +647,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 637 |
{
|
| 638 |
"id": id_,
|
| 639 |
"file": audio_file,
|
| 640 |
-
"text": transcript,
|
| 641 |
}
|
| 642 |
)
|
| 643 |
if audio_data and len(audio_data) == len(transcripts):
|
|
@@ -674,12 +684,11 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 674 |
"Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
|
| 675 |
)
|
| 676 |
|
| 677 |
-
|
| 678 |
-
bundle_version = bundle_url_template.split("/")[0]
|
| 679 |
dl_manager.download_config.ignore_url_params = True
|
| 680 |
|
| 681 |
self._common_voice_log_download("en", bundle_version, hf_auth_token)
|
| 682 |
-
archive_path = dl_manager.download(self._common_voice_get_bundle_url("en",
|
| 683 |
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
|
| 684 |
|
| 685 |
path_to_data = "/".join([bundle_version, "en"])
|
|
@@ -693,15 +702,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 693 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 694 |
"metadata_filepath": "/".join([path_to_data, "train.tsv"]) if path_to_data else "train.tsv",
|
| 695 |
"path_to_clips": path_to_clips,
|
| 696 |
-
|
| 697 |
-
),
|
| 698 |
-
datasets.SplitGenerator(
|
| 699 |
-
name=datasets.Split.TEST,
|
| 700 |
-
gen_kwargs={
|
| 701 |
-
"local_extracted_archive": local_extracted_archive,
|
| 702 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 703 |
-
"metadata_filepath": "/".join([path_to_data, "test.tsv"]) if path_to_data else "test.tsv",
|
| 704 |
-
"path_to_clips": path_to_clips,
|
| 705 |
},
|
| 706 |
),
|
| 707 |
datasets.SplitGenerator(
|
|
@@ -711,26 +712,17 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 711 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 712 |
"metadata_filepath": "/".join([path_to_data, "dev.tsv"]) if path_to_data else "dev.tsv",
|
| 713 |
"path_to_clips": path_to_clips,
|
|
|
|
| 714 |
},
|
| 715 |
),
|
| 716 |
datasets.SplitGenerator(
|
| 717 |
-
name=
|
| 718 |
-
gen_kwargs={
|
| 719 |
-
"local_extracted_archive": local_extracted_archive,
|
| 720 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 721 |
-
"metadata_filepath": "/".join([path_to_data, "other.tsv"]) if path_to_data else "other.tsv",
|
| 722 |
-
"path_to_clips": path_to_clips,
|
| 723 |
-
},
|
| 724 |
-
),
|
| 725 |
-
datasets.SplitGenerator(
|
| 726 |
-
name="invalidated",
|
| 727 |
gen_kwargs={
|
| 728 |
"local_extracted_archive": local_extracted_archive,
|
| 729 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 730 |
-
"metadata_filepath": "/".join([path_to_data, "
|
| 731 |
-
if path_to_data
|
| 732 |
-
else "invalidated.tsv",
|
| 733 |
"path_to_clips": path_to_clips,
|
|
|
|
| 734 |
},
|
| 735 |
),
|
| 736 |
]
|
|
@@ -741,6 +733,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 741 |
archive_iterator,
|
| 742 |
metadata_filepath,
|
| 743 |
path_to_clips,
|
|
|
|
| 744 |
):
|
| 745 |
"""Yields examples."""
|
| 746 |
data_fields = list(self._info().features.keys())
|
|
@@ -783,7 +776,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 783 |
text = text[1:-1]
|
| 784 |
# replace double quotation marks with single
|
| 785 |
text = text.replace('""', '"')
|
| 786 |
-
result["text"] = text
|
| 787 |
|
| 788 |
yield path, result
|
| 789 |
|
|
@@ -850,7 +843,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 850 |
key = "-".join([speaker, start, end, label])
|
| 851 |
example = {
|
| 852 |
"audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
|
| 853 |
-
"text": transcript,
|
| 854 |
"id": key,
|
| 855 |
"dataset": "tedlium",
|
| 856 |
}
|
|
@@ -920,7 +913,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 920 |
key = transcript["id"]
|
| 921 |
yield key, {
|
| 922 |
"audio": audio,
|
| 923 |
-
"text": transcript["text"],
|
| 924 |
"dataset": "tedlium",
|
| 925 |
"id": transcript["id"],
|
| 926 |
}
|
|
@@ -969,7 +962,8 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 969 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["train"]
|
| 970 |
],
|
| 971 |
"local_audio_archives_paths": local_audio_archives_paths.get("train"),
|
| 972 |
-
"meta_paths": meta_paths["train"]
|
|
|
|
| 973 |
},
|
| 974 |
),
|
| 975 |
datasets.SplitGenerator(
|
|
@@ -979,7 +973,8 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 979 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
|
| 980 |
],
|
| 981 |
"local_audio_archives_paths": local_audio_archives_paths.get("dev"),
|
| 982 |
-
"meta_paths": meta_paths["dev"]
|
|
|
|
| 983 |
},
|
| 984 |
),
|
| 985 |
datasets.SplitGenerator(
|
|
@@ -989,12 +984,13 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 989 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
|
| 990 |
],
|
| 991 |
"local_audio_archives_paths": local_audio_archives_paths.get("test"),
|
| 992 |
-
"meta_paths": meta_paths["test"]
|
|
|
|
| 993 |
},
|
| 994 |
),
|
| 995 |
]
|
| 996 |
|
| 997 |
-
def _gigaspeech_generate_examples(self, audio_archives_iterators, local_audio_archives_paths, meta_paths):
|
| 998 |
assert len(audio_archives_iterators) == len(meta_paths)
|
| 999 |
if local_audio_archives_paths:
|
| 1000 |
assert len(audio_archives_iterators) == len(local_audio_archives_paths)
|
|
@@ -1031,7 +1027,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 1031 |
if len(text) == 0:
|
| 1032 |
continue
|
| 1033 |
|
| 1034 |
-
audio_meta["text"] = text
|
| 1035 |
|
| 1036 |
path = os.path.join(local_audio_archives_paths[i], audio_path_in_archive) if local_audio_archives_paths \
|
| 1037 |
else audio_path_in_archive
|
|
@@ -1074,6 +1070,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 1074 |
"local_extracted_archive_paths": local_extracted_archive_paths["train"],
|
| 1075 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["train"]],
|
| 1076 |
"metadata": metadata,
|
|
|
|
| 1077 |
},
|
| 1078 |
),
|
| 1079 |
datasets.SplitGenerator(
|
|
@@ -1082,6 +1079,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 1082 |
"local_extracted_archive_paths": local_extracted_archive_paths["dev"],
|
| 1083 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["dev"]],
|
| 1084 |
"metadata": metadata,
|
|
|
|
| 1085 |
},
|
| 1086 |
),
|
| 1087 |
datasets.SplitGenerator(
|
|
@@ -1090,11 +1088,12 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 1090 |
"local_extracted_archive_paths": local_extracted_archive_paths["test"],
|
| 1091 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 1092 |
"metadata": metadata,
|
|
|
|
| 1093 |
},
|
| 1094 |
),
|
| 1095 |
]
|
| 1096 |
|
| 1097 |
-
def _earnings_generate_examples(self, local_extracted_archive_paths, archives, metadata):
|
| 1098 |
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
|
| 1099 |
# Here we iterate over all the files within the TAR archive:
|
| 1100 |
for audio_filename, audio_file in archive:
|
|
@@ -1124,7 +1123,7 @@ class ESCDatasets(datasets.GeneratorBasedBuilder):
|
|
| 1124 |
|
| 1125 |
yield audio_filename, {
|
| 1126 |
"id": audio_filename,
|
| 1127 |
-
"text": text,
|
| 1128 |
"dataset": "earnings22",
|
| 1129 |
"audio": {"path": path, "bytes": audio_file.read()}
|
| 1130 |
}
|
|
@@ -1366,7 +1365,7 @@ _AMI_ANNOTATIONS_ARCHIVE_URL = _AMI_BASE_DATA_URL + "annotations/{split}/text"
|
|
| 1366 |
|
| 1367 |
_SPGISPEECH_BASE_URL = "https://huggingface.co/datasets/kensho/spgispeech/resolve/main/data/"
|
| 1368 |
|
| 1369 |
-
_SPGISPEECH_AUDIO_BASE_URL = _SPGISPEECH_BASE_URL + "
|
| 1370 |
|
| 1371 |
_SPGISPEECH_SUBSET_TO_DIR = {
|
| 1372 |
"s": ["s"],
|
|
@@ -1385,7 +1384,7 @@ _SPGISPEECH_AUDIO_ARCHIVES_NAMES = {
|
|
| 1385 |
"test": [f"test_part_{i}.tar.gz" for i in range(0, 3)],
|
| 1386 |
}
|
| 1387 |
|
| 1388 |
-
_SPGISPEECH_META_BASE_URL = _SPGISPEECH_BASE_URL + "
|
| 1389 |
|
| 1390 |
_SPGISPEECH_META_FILENAMES = {
|
| 1391 |
"s": "train_small.csv",
|
|
@@ -1417,6 +1416,8 @@ _LIBRISPEECH_DL_URLS = {
|
|
| 1417 |
|
| 1418 |
_COMMON_VOICE_API_URL = "https://commonvoice.mozilla.org/api/v1"
|
| 1419 |
|
|
|
|
|
|
|
| 1420 |
_TEDLIUM_BASE_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/TEDLIUM_release3/legacy/"
|
| 1421 |
|
| 1422 |
_TEDLIUM_URLS = {
|
|
|
|
| 32 |
from huggingface_hub import HfApi, HfFolder
|
| 33 |
import datasets
|
| 34 |
|
|
|
|
|
|
|
| 35 |
|
| 36 |
_DESCRIPTIONS = {
|
| 37 |
"ami": """
|
|
|
|
| 389 |
|
| 390 |
transcriptions[audio_filename] = {
|
| 391 |
"id": _id,
|
| 392 |
+
"text": text if split != "eval" else "",
|
| 393 |
}
|
| 394 |
|
| 395 |
features = ["id", "text"]
|
|
|
|
| 436 |
"local_extracted_archive_paths": local_extracted_archive_paths[subconfig],
|
| 437 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths[subconfig]],
|
| 438 |
"meta_path": meta_path[subconfig],
|
| 439 |
+
"is_test": False,
|
| 440 |
},
|
| 441 |
),
|
| 442 |
datasets.SplitGenerator(
|
|
|
|
| 445 |
"local_extracted_archive_paths": local_extracted_archive_paths["dev"],
|
| 446 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["dev"]],
|
| 447 |
"meta_path": meta_path["dev"],
|
| 448 |
+
"is_test": False,
|
| 449 |
},
|
| 450 |
),
|
| 451 |
datasets.SplitGenerator(
|
|
|
|
| 454 |
"local_extracted_archive_paths": local_extracted_archive_paths["test"],
|
| 455 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 456 |
"meta_path": meta_path["test"],
|
| 457 |
+
"is_test": True,
|
| 458 |
},
|
| 459 |
),
|
| 460 |
]
|
| 461 |
|
| 462 |
+
def _spgispeech_generate_examples(self, local_extracted_archive_paths, archives, meta_path, is_test):
|
| 463 |
# define the expected metadata dict keys,
|
| 464 |
# some files have metadata with erroneous entries that we have to filter out
|
| 465 |
dict_keys = {"id": "wav_filename", "text": "transcript"}
|
|
|
|
| 483 |
# get the .wav filename by removing the directory path from the audio filename
|
| 484 |
wav_filename = "/".join(audio_filename.split("/")[-2:])
|
| 485 |
example = dict(metadata[wav_filename])
|
| 486 |
+
if is_test: example["text"] = ""
|
| 487 |
example["audio"] = {"path": path, "bytes": audio_file.read()}
|
| 488 |
example["dataset"] = "spgispeech"
|
| 489 |
yield audio_filename, example
|
|
|
|
| 504 |
split: _VOXPOPULI_METADATA_PATH.format(split=split) for split in splits
|
| 505 |
}
|
| 506 |
|
| 507 |
+
dl_manager.download_config.num_proc = len(audio_urls["train"]) // 4
|
| 508 |
meta_paths = dl_manager.download_and_extract(meta_urls)
|
| 509 |
audio_paths = dl_manager.download(audio_urls)
|
| 510 |
|
|
|
|
| 521 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["train"]],
|
| 522 |
"local_extracted_archives_paths": local_extracted_audio_paths["train"],
|
| 523 |
"meta_path": meta_paths["train"],
|
| 524 |
+
"is_test": False,
|
| 525 |
}
|
| 526 |
),
|
| 527 |
datasets.SplitGenerator(
|
|
|
|
| 530 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["dev"]],
|
| 531 |
"local_extracted_archives_paths": local_extracted_audio_paths["dev"],
|
| 532 |
"meta_path": meta_paths["dev"],
|
| 533 |
+
"is_test": False,
|
| 534 |
}
|
| 535 |
),
|
| 536 |
datasets.SplitGenerator(
|
|
|
|
| 539 |
"audio_archives": [dl_manager.iter_archive(archive) for archive in audio_paths["test"]],
|
| 540 |
"local_extracted_archives_paths": local_extracted_audio_paths["test"],
|
| 541 |
"meta_path": meta_paths["test"],
|
| 542 |
+
"is_test": True,
|
| 543 |
}
|
| 544 |
),
|
| 545 |
]
|
| 546 |
|
| 547 |
+
def _voxpopuli_generate_examples(self, audio_archives, local_extracted_archives_paths, meta_path, is_test):
|
| 548 |
assert len(audio_archives) == len(local_extracted_archives_paths)
|
| 549 |
|
| 550 |
logging.info("Reading voxpopuli metadata.")
|
|
|
|
| 558 |
|
| 559 |
yield audio_id, {
|
| 560 |
"id": audio_id,
|
| 561 |
+
"text": metadata[audio_id]["normalized_text"].lower() if not is_test else "",
|
| 562 |
"audio": {"path": path, "bytes": audio_file.read()},
|
| 563 |
"dataset": "voxpopuli",
|
| 564 |
}
|
|
|
|
| 577 |
gen_kwargs={
|
| 578 |
"local_extracted_archives": [local_extracted_archives.get(train_name) for train_name in train_splits],
|
| 579 |
"archives": [dl_manager.iter_archive(archive_paths[train_name]) for train_name in train_splits],
|
| 580 |
+
"is_test": False,
|
| 581 |
},
|
| 582 |
)
|
| 583 |
]
|
|
|
|
| 587 |
gen_kwargs={
|
| 588 |
"local_extracted_archives": [local_extracted_archives.get("dev.clean")],
|
| 589 |
"archives": [dl_manager.iter_archive(archive_paths["dev.clean"])],
|
| 590 |
+
"is_test": False,
|
| 591 |
},
|
| 592 |
),
|
| 593 |
datasets.SplitGenerator(
|
|
|
|
| 595 |
gen_kwargs={
|
| 596 |
"local_extracted_archives": [local_extracted_archives.get("dev.other")],
|
| 597 |
"archives": [dl_manager.iter_archive(archive_paths["dev.other"])],
|
| 598 |
+
"is_test": False,
|
| 599 |
},
|
| 600 |
),
|
| 601 |
]
|
|
|
|
| 605 |
gen_kwargs={
|
| 606 |
"local_extracted_archives": [local_extracted_archives.get("test.clean")],
|
| 607 |
"archives": [dl_manager.iter_archive(archive_paths["test.clean"])],
|
| 608 |
+
"is_test": True,
|
| 609 |
},
|
| 610 |
),
|
| 611 |
datasets.SplitGenerator(
|
|
|
|
| 613 |
gen_kwargs={
|
| 614 |
"local_extracted_archives": [local_extracted_archives.get("test.other")],
|
| 615 |
"archives": [dl_manager.iter_archive(archive_paths["test.other"])],
|
| 616 |
+
"is_test": True,
|
| 617 |
},
|
| 618 |
),
|
| 619 |
]
|
| 620 |
return train_split + dev_splits + test_splits
|
| 621 |
|
| 622 |
+
def _librispeech_generate_examples(self, archives, local_extracted_archives, is_test):
|
| 623 |
key = 0
|
| 624 |
audio_data = {}
|
| 625 |
transcripts = []
|
|
|
|
| 647 |
{
|
| 648 |
"id": id_,
|
| 649 |
"file": audio_file,
|
| 650 |
+
"text": transcript if not is_test else "",
|
| 651 |
}
|
| 652 |
)
|
| 653 |
if audio_data and len(audio_data) == len(transcripts):
|
|
|
|
| 684 |
"Please set use_auth_token=True or use_auth_token='<TOKEN>' to download this dataset"
|
| 685 |
)
|
| 686 |
|
| 687 |
+
bundle_version = _COMMON_VOICE_BUNDLE_URL_TEMPLATE.split("/")[0]
|
|
|
|
| 688 |
dl_manager.download_config.ignore_url_params = True
|
| 689 |
|
| 690 |
self._common_voice_log_download("en", bundle_version, hf_auth_token)
|
| 691 |
+
archive_path = dl_manager.download(self._common_voice_get_bundle_url("en", _COMMON_VOICE_BUNDLE_URL_TEMPLATE))
|
| 692 |
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
|
| 693 |
|
| 694 |
path_to_data = "/".join([bundle_version, "en"])
|
|
|
|
| 702 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 703 |
"metadata_filepath": "/".join([path_to_data, "train.tsv"]) if path_to_data else "train.tsv",
|
| 704 |
"path_to_clips": path_to_clips,
|
| 705 |
+
"is_test": False,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 706 |
},
|
| 707 |
),
|
| 708 |
datasets.SplitGenerator(
|
|
|
|
| 712 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 713 |
"metadata_filepath": "/".join([path_to_data, "dev.tsv"]) if path_to_data else "dev.tsv",
|
| 714 |
"path_to_clips": path_to_clips,
|
| 715 |
+
"is_test": False,
|
| 716 |
},
|
| 717 |
),
|
| 718 |
datasets.SplitGenerator(
|
| 719 |
+
name=datasets.Split.TEST,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 720 |
gen_kwargs={
|
| 721 |
"local_extracted_archive": local_extracted_archive,
|
| 722 |
"archive_iterator": dl_manager.iter_archive(archive_path),
|
| 723 |
+
"metadata_filepath": "/".join([path_to_data, "test.tsv"]) if path_to_data else "test.tsv",
|
|
|
|
|
|
|
| 724 |
"path_to_clips": path_to_clips,
|
| 725 |
+
"is_test": True,
|
| 726 |
},
|
| 727 |
),
|
| 728 |
]
|
|
|
|
| 733 |
archive_iterator,
|
| 734 |
metadata_filepath,
|
| 735 |
path_to_clips,
|
| 736 |
+
is_test,
|
| 737 |
):
|
| 738 |
"""Yields examples."""
|
| 739 |
data_fields = list(self._info().features.keys())
|
|
|
|
| 776 |
text = text[1:-1]
|
| 777 |
# replace double quotation marks with single
|
| 778 |
text = text.replace('""', '"')
|
| 779 |
+
result["text"] = text if not is_test else ""
|
| 780 |
|
| 781 |
yield path, result
|
| 782 |
|
|
|
|
| 843 |
key = "-".join([speaker, start, end, label])
|
| 844 |
example = {
|
| 845 |
"audio": {"path": audio_file, "array": samples, "sampling_rate": sampling_rate},
|
| 846 |
+
"text": transcript if split_path != "test" else "",
|
| 847 |
"id": key,
|
| 848 |
"dataset": "tedlium",
|
| 849 |
}
|
|
|
|
| 913 |
key = transcript["id"]
|
| 914 |
yield key, {
|
| 915 |
"audio": audio,
|
| 916 |
+
"text": transcript["text"] if split_path != "test" else "",
|
| 917 |
"dataset": "tedlium",
|
| 918 |
"id": transcript["id"],
|
| 919 |
}
|
|
|
|
| 962 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["train"]
|
| 963 |
],
|
| 964 |
"local_audio_archives_paths": local_audio_archives_paths.get("train"),
|
| 965 |
+
"meta_paths": meta_paths["train"],
|
| 966 |
+
"is_test": False,
|
| 967 |
},
|
| 968 |
),
|
| 969 |
datasets.SplitGenerator(
|
|
|
|
| 973 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["dev"]
|
| 974 |
],
|
| 975 |
"local_audio_archives_paths": local_audio_archives_paths.get("dev"),
|
| 976 |
+
"meta_paths": meta_paths["dev"],
|
| 977 |
+
"is_test": False,
|
| 978 |
},
|
| 979 |
),
|
| 980 |
datasets.SplitGenerator(
|
|
|
|
| 984 |
dl_manager.iter_archive(archive_path) for archive_path in audio_archives_paths["test"]
|
| 985 |
],
|
| 986 |
"local_audio_archives_paths": local_audio_archives_paths.get("test"),
|
| 987 |
+
"meta_paths": meta_paths["test"],
|
| 988 |
+
"is_test": True,
|
| 989 |
},
|
| 990 |
),
|
| 991 |
]
|
| 992 |
|
| 993 |
+
def _gigaspeech_generate_examples(self, audio_archives_iterators, local_audio_archives_paths, meta_paths, is_test):
|
| 994 |
assert len(audio_archives_iterators) == len(meta_paths)
|
| 995 |
if local_audio_archives_paths:
|
| 996 |
assert len(audio_archives_iterators) == len(local_audio_archives_paths)
|
|
|
|
| 1027 |
if len(text) == 0:
|
| 1028 |
continue
|
| 1029 |
|
| 1030 |
+
audio_meta["text"] = text if not is_test else ""
|
| 1031 |
|
| 1032 |
path = os.path.join(local_audio_archives_paths[i], audio_path_in_archive) if local_audio_archives_paths \
|
| 1033 |
else audio_path_in_archive
|
|
|
|
| 1070 |
"local_extracted_archive_paths": local_extracted_archive_paths["train"],
|
| 1071 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["train"]],
|
| 1072 |
"metadata": metadata,
|
| 1073 |
+
"is_test": False,
|
| 1074 |
},
|
| 1075 |
),
|
| 1076 |
datasets.SplitGenerator(
|
|
|
|
| 1079 |
"local_extracted_archive_paths": local_extracted_archive_paths["dev"],
|
| 1080 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["dev"]],
|
| 1081 |
"metadata": metadata,
|
| 1082 |
+
"is_test": False,
|
| 1083 |
},
|
| 1084 |
),
|
| 1085 |
datasets.SplitGenerator(
|
|
|
|
| 1088 |
"local_extracted_archive_paths": local_extracted_archive_paths["test"],
|
| 1089 |
"archives": [dl_manager.iter_archive(path) for path in archive_paths["test"]],
|
| 1090 |
"metadata": metadata,
|
| 1091 |
+
"is_test": True,
|
| 1092 |
},
|
| 1093 |
),
|
| 1094 |
]
|
| 1095 |
|
| 1096 |
+
def _earnings_generate_examples(self, local_extracted_archive_paths, archives, metadata, is_test):
|
| 1097 |
for local_extracted_archive_path, archive in zip(local_extracted_archive_paths, archives):
|
| 1098 |
# Here we iterate over all the files within the TAR archive:
|
| 1099 |
for audio_filename, audio_file in archive:
|
|
|
|
| 1123 |
|
| 1124 |
yield audio_filename, {
|
| 1125 |
"id": audio_filename,
|
| 1126 |
+
"text": text if not is_test else "",
|
| 1127 |
"dataset": "earnings22",
|
| 1128 |
"audio": {"path": path, "bytes": audio_file.read()}
|
| 1129 |
}
|
|
|
|
| 1365 |
|
| 1366 |
_SPGISPEECH_BASE_URL = "https://huggingface.co/datasets/kensho/spgispeech/resolve/main/data/"
|
| 1367 |
|
| 1368 |
+
_SPGISPEECH_AUDIO_BASE_URL = _SPGISPEECH_BASE_URL + "audio"
|
| 1369 |
|
| 1370 |
_SPGISPEECH_SUBSET_TO_DIR = {
|
| 1371 |
"s": ["s"],
|
|
|
|
| 1384 |
"test": [f"test_part_{i}.tar.gz" for i in range(0, 3)],
|
| 1385 |
}
|
| 1386 |
|
| 1387 |
+
_SPGISPEECH_META_BASE_URL = _SPGISPEECH_BASE_URL + "meta"
|
| 1388 |
|
| 1389 |
_SPGISPEECH_META_FILENAMES = {
|
| 1390 |
"s": "train_small.csv",
|
|
|
|
| 1416 |
|
| 1417 |
_COMMON_VOICE_API_URL = "https://commonvoice.mozilla.org/api/v1"
|
| 1418 |
|
| 1419 |
+
_COMMON_VOICE_BUNDLE_URL_TEMPLATE = 'cv-corpus-9.0-2022-04-27/cv-corpus-9.0-2022-04-27-{locale}.tar.gz'
|
| 1420 |
+
|
| 1421 |
_TEDLIUM_BASE_URL = "https://huggingface.co/datasets/LIUM/tedlium/resolve/main/TEDLIUM_release3/legacy/"
|
| 1422 |
|
| 1423 |
_TEDLIUM_URLS = {
|