download parquet from hf datasets (#10)
Browse files
JGLUE.py
CHANGED
|
@@ -7,6 +7,9 @@ from typing import Dict, List, Optional, Union
|
|
| 7 |
import datasets as ds
|
| 8 |
import pandas as pd
|
| 9 |
from datasets.tasks import QuestionAnsweringExtractive
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
_CITATION = """\
|
| 12 |
@inproceedings{kurihara-etal-2022-jglue,
|
|
@@ -500,12 +503,26 @@ class JGLUE(ds.GeneratorBasedBuilder):
|
|
| 500 |
filter_review_id_list = file_paths["filter_review_id_list"]
|
| 501 |
label_conv_review_id_list = file_paths["label_conv_review_id_list"]
|
| 502 |
|
| 503 |
-
|
| 504 |
-
|
| 505 |
-
|
| 506 |
-
|
| 507 |
-
|
| 508 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 509 |
return [
|
| 510 |
ds.SplitGenerator(
|
| 511 |
name=ds.Split.TRAIN,
|
|
|
|
| 7 |
import datasets as ds
|
| 8 |
import pandas as pd
|
| 9 |
from datasets.tasks import QuestionAnsweringExtractive
|
| 10 |
+
import logging
|
| 11 |
+
|
| 12 |
+
logger = logging.getLogger(__name__)
|
| 13 |
|
| 14 |
_CITATION = """\
|
| 15 |
@inproceedings{kurihara-etal-2022-jglue,
|
|
|
|
| 503 |
filter_review_id_list = file_paths["filter_review_id_list"]
|
| 504 |
label_conv_review_id_list = file_paths["label_conv_review_id_list"]
|
| 505 |
|
| 506 |
+
try:
|
| 507 |
+
split_dfs = preprocess_for_marc_ja(
|
| 508 |
+
config=self.config,
|
| 509 |
+
data_file_path=file_paths["data"],
|
| 510 |
+
filter_review_id_list_paths=filter_review_id_list,
|
| 511 |
+
label_conv_review_id_list_paths=label_conv_review_id_list,
|
| 512 |
+
)
|
| 513 |
+
except KeyError as err:
|
| 514 |
+
from urllib.parse import urljoin
|
| 515 |
+
|
| 516 |
+
logger.warning(err)
|
| 517 |
+
|
| 518 |
+
base_url = "https://huggingface.co/datasets/shunk031/JGLUE/resolve/refs%2Fconvert%2Fparquet/MARC-ja/"
|
| 519 |
+
marcja_parquet_urls = {
|
| 520 |
+
"train": urljoin(base_url, "jglue-train.parquet"),
|
| 521 |
+
"valid": urljoin(base_url, "jglue-validation.parquet"),
|
| 522 |
+
}
|
| 523 |
+
file_paths = dl_manager.download_and_extract(marcja_parquet_urls)
|
| 524 |
+
split_dfs = {k: pd.read_parquet(v) for k, v in file_paths.items()}
|
| 525 |
+
|
| 526 |
return [
|
| 527 |
ds.SplitGenerator(
|
| 528 |
name=ds.Split.TRAIN,
|