|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""Bernice pretrain data""" |
|
|
|
|
|
|
|
|
import csv |
|
|
import json |
|
|
import os |
|
|
import gzip |
|
|
import datasets |
|
|
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
Alexandra DeLucia, Shijie Wu, Aaron Mueller, Carlos Aguirre, Philip Resnik, and Mark Dredze. 2022. |
|
|
Bernice: A Multilingual Pre-trained Encoder for Twitter. In Proceedings of the 2022 Conference on |
|
|
Empirical Methods in Natural Language Processing, pages 6191–6205, Abu Dhabi, United Arab Emirates. |
|
|
Association for Computational Linguistics. |
|
|
""" |
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder. |
|
|
The tweets are from the public 1% Twitter API stream from January 2016 to December 2021. |
|
|
Twitter-provided language metadata is provided with the tweet ID. The data contains 66 unique languages, |
|
|
as identified by ISO 639 language codes, including `und` for undefined languages. |
|
|
Tweets need to be re-gathered via the Twitter API. |
|
|
""" |
|
|
|
|
|
_HOMEPAGE = "https://preview.aclanthology.org/emnlp-22-ingestion/2022.emnlp-main.415" |
|
|
|
|
|
|
|
|
_LICENSE = "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_BASE_DATA_URL = "https://huggingface.co/datasets/jhu-clsp/bernice-pretrain-data/resolve/main/data" |
|
|
_URLS = { |
|
|
"all": ['2016_01.txt.gz', '2016_02.txt.gz', '2016_03.txt.gz', '2016_04.txt.gz', '2016_05.txt.gz', '2016_06.txt.gz', |
|
|
'2016_07.txt.gz', '2016_08.txt.gz', '2016_09.txt.gz', '2016_10.txt.gz', '2016_11.txt.gz', '2016_12.txt.gz', |
|
|
'2017_01.txt.gz', '2017_02.txt.gz', '2017_03.txt.gz', '2017_04.txt.gz', '2017_05.txt.gz', '2017_06.txt.gz', |
|
|
'2017_07.txt.gz', '2017_09.txt.gz', '2017_10.txt.gz', '2017_11.txt.gz', '2017_12.txt.gz', '2018_01.txt.gz', |
|
|
'2018_02.txt.gz', '2018_03.txt.gz', '2018_04.txt.gz', '2018_05.txt.gz', '2018_06.txt.gz', '2018_07.txt.gz', |
|
|
'2018_08.txt.gz', '2018_09.txt.gz', '2018_10.txt.gz', '2018_11.txt.gz', '2018_12.txt.gz', '2019_01.txt.gz', |
|
|
'2019_02.txt.gz', '2019_03.txt.gz', '2019_04.txt.gz', '2019_05.txt.gz', '2019_06.txt.gz', '2019_07.txt.gz', |
|
|
'2019_08.txt.gz', '2019_09.txt.gz', '2019_10.txt.gz', '2019_11.txt.gz', '2019_12.txt.gz', '2020_01.txt.gz', |
|
|
'2020_02.txt.gz', '2020_03.txt.gz', '2020_04.txt.gz', '2020_05.txt.gz', '2020_06.txt.gz', '2020_07.txt.gz', |
|
|
'2020_08.txt.gz', '2020_09.txt.gz', '2020_10.txt.gz', '2020_11.txt.gz', '2020_12.txt.gz', '2021_01.txt.gz', |
|
|
'2021_02.txt.gz', '2021_03.txt.gz', '2021_04.txt.gz', '2021_05.txt.gz', '2021_06.txt.gz', '2021_07.txt.gz', |
|
|
'2021_08.txt.gz', '2021_09.txt.gz', '2021_10.txt.gz', '2021_11.txt.gz', '2021_12.txt.gz'], |
|
|
"indic": ["indic_tweet_ids.txt.gz"] |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
class BernicePretrainData(datasets.GeneratorBasedBuilder): |
|
|
"""Tweet IDs for the 2.5 billion multilingual tweets used to train Bernice, a Twitter encoder.""" |
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
datasets.BuilderConfig(name="all", version=VERSION, |
|
|
description="Includes all tweets"), |
|
|
datasets.BuilderConfig(name="indic", version=VERSION, |
|
|
description="Only the Indic languages, plus `undefined'"), |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "all" |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
|
|
|
description=_DESCRIPTION, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
features=datasets.Features( |
|
|
{ |
|
|
"tweet_id": datasets.Value("string"), |
|
|
"lang": datasets.Value("string"), |
|
|
"year": datasets.Value("string") |
|
|
} |
|
|
), |
|
|
homepage=_HOMEPAGE, |
|
|
|
|
|
license=_LICENSE, |
|
|
|
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
urls_to_download = [f"{_BASE_DATA_URL}/{self.config.name}/{f}" for f in _URLS[self.config.name]] |
|
|
downloaded_files = dl_manager.download_and_extract(urls_to_download) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
|
|
|
gen_kwargs={ |
|
|
"filepaths": downloaded_files, |
|
|
"split": "train", |
|
|
}, |
|
|
) |
|
|
] |
|
|
|
|
|
|
|
|
def _generate_examples(self, filepaths, split): |
|
|
|
|
|
|
|
|
for filepath in filepaths: |
|
|
with open(filepath, encoding="utf-8") as f: |
|
|
for line_number, instance in enumerate(f): |
|
|
tweet_id, lang, year = instance.strip().split("\t") |
|
|
yield tweet_id, { |
|
|
"tweet_id": tweet_id, |
|
|
"lang": lang, |
|
|
"year": year |
|
|
} |
|
|
|