|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
This template serves as a starting point for contributing a dataset to the Nusantara Dataset repo. |
|
|
|
|
|
When modifying it for your dataset, look for TODO items that offer specific instructions. |
|
|
|
|
|
Full documentation on writing dataset loading scripts can be found here: |
|
|
https://huggingface.co/docs/datasets/add_dataset.html |
|
|
|
|
|
To create a dataset loading script you will create a class and implement 3 methods: |
|
|
* `_info`: Establishes the schema for the dataset, and returns a datasets.DatasetInfo object. |
|
|
* `_split_generators`: Downloads and extracts data for each split (e.g. train/val/test) or associate local data with each split. |
|
|
* `_generate_examples`: Creates examples from data on disk that conform to each schema defined in `_info`. |
|
|
|
|
|
TODO: Before submitting your script, delete this doc string and replace it with a description of your dataset. |
|
|
|
|
|
[seacrowd_schema_name] = (kb, pairs, qa, text, t2t, entailment) |
|
|
""" |
|
|
from base64 import encode |
|
|
import json |
|
|
from pathlib import Path |
|
|
from typing import Dict, List, Tuple |
|
|
|
|
|
import datasets |
|
|
|
|
|
from seacrowd.utils import schemas |
|
|
from seacrowd.utils.common_parser import load_conll_data |
|
|
from seacrowd.utils.configs import SEACrowdConfig |
|
|
from seacrowd.utils.constants import Tasks, DEFAULT_SOURCE_VIEW_NAME, DEFAULT_SEACROWD_VIEW_NAME |
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
|
@article{DBLP:journals/corr/abs-2011-00677, |
|
|
author = {Fajri Koto and |
|
|
Afshin Rahimi and |
|
|
Jey Han Lau and |
|
|
Timothy Baldwin}, |
|
|
title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language |
|
|
Model for Indonesian {NLP}}, |
|
|
journal = {CoRR}, |
|
|
volume = {abs/2011.00677}, |
|
|
year = {2020}, |
|
|
url = {https://arxiv.org/abs/2011.00677}, |
|
|
eprinttype = {arXiv}, |
|
|
eprint = {2011.00677}, |
|
|
timestamp = {Fri, 06 Nov 2020 15:32:47 +0100}, |
|
|
biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib}, |
|
|
bibsource = {dblp computer science bibliography, https://dblp.org} |
|
|
} |
|
|
""" |
|
|
|
|
|
|
|
|
|
|
|
_DATASETNAME = "indolem_sentiment" |
|
|
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME |
|
|
_UNIFIED_VIEW_NAME = DEFAULT_SEACROWD_VIEW_NAME |
|
|
|
|
|
_LANGUAGES = ["ind"] |
|
|
_LOCAL = False |
|
|
|
|
|
|
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
IndoLEM (Indonesian Language Evaluation Montage) is a comprehensive Indonesian benchmark that comprises of seven tasks for the Indonesian language. This benchmark is categorized into three pillars of NLP tasks: morpho-syntax, semantics, and discourse. |
|
|
|
|
|
This dataset is based on binary classification (positive and negative), with distribution: |
|
|
* Train: 3638 sentences |
|
|
* Development: 399 sentences |
|
|
* Test: 1011 sentences |
|
|
|
|
|
The data is sourced from 1) Twitter [(Koto and Rahmaningtyas, 2017)](https://www.researchgate.net/publication/321757985_InSet_Lexicon_Evaluation_of_a_Word_List_for_Indonesian_Sentiment_Analysis_in_Microblogs) |
|
|
and 2) [hotel reviews](https://github.com/annisanurulazhar/absa-playground/). |
|
|
|
|
|
The experiment is based on 5-fold cross validation. |
|
|
""" |
|
|
|
|
|
|
|
|
_HOMEPAGE = "https://indolem.github.io/" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_LICENSE = "Creative Commons Attribution Share-Alike 4.0 International" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_URLS = { |
|
|
_DATASETNAME: { |
|
|
'train': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/train0.csv', |
|
|
'dev': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/dev0.csv', |
|
|
'test': 'https://raw.githubusercontent.com/indolem/indolem/main/sentiment/data/test0.csv' |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
_SUPPORTED_TASKS = [Tasks.SENTIMENT_ANALYSIS] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
_SOURCE_VERSION = "1.0.0" |
|
|
|
|
|
_SEACROWD_VERSION = "2024.06.20" |
|
|
|
|
|
|
|
|
|
|
|
class IndolemSentimentDataset(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
label_classes = ['negative','positive'] |
|
|
SOURCE_VERSION = datasets.Version(_SOURCE_VERSION) |
|
|
SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
|
SEACrowdConfig( |
|
|
name="indolem_sentiment_source", |
|
|
version=SOURCE_VERSION, |
|
|
description="indolem_sentiment source schema", |
|
|
schema="source", |
|
|
subset_id="indolem_sentiment", |
|
|
), |
|
|
SEACrowdConfig( |
|
|
name="indolem_sentiment_seacrowd_text", |
|
|
version=SEACROWD_VERSION, |
|
|
description="indolem_sentiment Nusantara schema", |
|
|
schema="seacrowd_text", |
|
|
subset_id="indolem_sentiment", |
|
|
), |
|
|
] |
|
|
|
|
|
DEFAULT_CONFIG_NAME = "indolem_sentiment_source" |
|
|
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if self.config.schema == "source": |
|
|
features = datasets.Features({"sentence":datasets.Value("string"), "sentiment": datasets.Value("int32")}) |
|
|
elif self.config.schema == "seacrowd_text": |
|
|
features = schemas.text_features(self.label_classes) |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: |
|
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
urls = _URLS[_DATASETNAME] |
|
|
train_data = Path(dl_manager.download(urls['train'])) |
|
|
test_data = Path(dl_manager.download(urls['test'])) |
|
|
dev_data = Path(dl_manager.download(urls['dev'])) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
|
|
|
gen_kwargs={ |
|
|
"filepath": train_data, |
|
|
"split": "train", |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TEST, |
|
|
gen_kwargs={ |
|
|
"filepath": test_data, |
|
|
"split": "test", |
|
|
}, |
|
|
), |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.VALIDATION, |
|
|
gen_kwargs={ |
|
|
"filepath": dev_data, |
|
|
"split": "dev", |
|
|
}, |
|
|
), |
|
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]: |
|
|
"""Yields examples as (key, example) tuples.""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with filepath.open('r', encoding='utf-8') as f: |
|
|
line = f.readline() |
|
|
id = 0 |
|
|
while line: |
|
|
line = f.readline().strip() |
|
|
if len(line) == 0: break |
|
|
|
|
|
ex = {} |
|
|
id += 1 |
|
|
sentence = line[:-2].strip('"') |
|
|
sentiment = int(line[-1]) |
|
|
if self.config.schema == 'source': |
|
|
ex = {'sentence': sentence, 'sentiment': sentiment} |
|
|
elif self.config.schema == 'seacrowd_text': |
|
|
ex = {'id': str(id), 'text': str(sentence), 'label': self.label_classes[sentiment]} |
|
|
else: |
|
|
raise ValueError(f"Invalid config: {self.config.name}") |
|
|
|
|
|
yield id, ex |
|
|
|
|
|
|
|
|
|