| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | """OCR-IDL: OCR annotations for the Industry Document Library.""" |
| |
|
| |
|
| | import json |
| |
|
| | import datasets |
| |
|
| |
|
| | logger = datasets.logging.get_logger(__name__) |
| |
|
| |
|
| | _CITATION = """\ |
| | @article{biten2022ocr, |
| | title = {OCR-IDL: Ocr annotations for industry document library dataset}, |
| | author = {{Biten}, Ali Furkan and {Tito}, Ruben and {Gomez}, Lluis and {Valveny}, Ernest and {Karatzas}, Dimosthenis}, |
| | journal = {arXiv preprint arXiv:2202.12985}, |
| | year = 2022, |
| | eid = {arXiv:2202.12985}, |
| | pages = {arXiv:2202.12985}, |
| | archivePrefix = {arXiv}, |
| | eprint = {2202.12985}, |
| | } |
| | """ |
| |
|
| | _DESCRIPTION = """\ |
| | The OCR-IDL Dataset contains the OCR annotations of 26M pages of theIndustry Document Library (IDL).\ |
| | It is specially intended to be used for text-layout self-supervised tasks such as Masked Language Modeling or Text De-noising.\ |
| | However, we also include the url to the documents so that can be downloaded for image-text alignment tasks. |
| | """ |
| |
|
| | _URL = "http://datasets.cvc.uab.es/UCSF_IDL/" |
| | _PROJECT_URL = "https://github.com/furkanbiten/idl_data" |
| | _URLS = { |
| | "train": _URL + "train-v1.1.json", |
| | "dev": _URL + "dev-v1.1.json", |
| | } |
| |
|
| |
|
| | class OCRIDLConfig(datasets.BuilderConfig): |
| | """BuilderConfig for OCR-IDL.""" |
| |
|
| | def __init__(self, **kwargs): |
| | """BuilderConfig for OCR-IDL. |
| | Args: |
| | **kwargs: keyword arguments forwarded to super. |
| | """ |
| | super(OCRIDLConfig, self).__init__(**kwargs) |
| |
|
| |
|
| | class OCR_IDL(datasets.GeneratorBasedBuilder): |
| | """SQUAD: The Stanford Question Answering Dataset. Version 1.1.""" |
| |
|
| | BUILDER_CONFIGS = [ |
| | OCRIDLConfig( |
| | name="OCR-IDL", |
| | version=datasets.Version("1.0.0", ""), |
| | description=_DESCRIPTION, |
| | ), |
| | ] |
| |
|
| | def _info(self): |
| | return datasets.DatasetInfo( |
| | description=_DESCRIPTION, |
| | features=datasets.Features( |
| | { |
| | "document_id": datasets.Value("string"), |
| | "document_url": datasets.Value("string"), |
| | "page_id": datasets.Value("string"), |
| | "page_height": datasets.Value("int32"), |
| | "page_width": datasets.Value("int32"), |
| | "words": [], |
| | "boxes": [], |
| | "word_lines_id": [], |
| | "text_types": [], |
| | "recog_conf": [] |
| | } |
| | ), |
| | |
| | supervised_keys=None, |
| | homepage=_PROJECT_URL, |
| | citation=_CITATION, |
| |
|
| | ) |
| |
|
| | def _split_generators(self, dl_manager): |
| | data_dir = dl_manager.download('https://huggingface.co/datasets/rubentito/OCR-IDL/resolve/main/val.csv') |
| | |
| |
|
| | return [ |
| | datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"data": downloaded_files[0]}), |
| | datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files[0]}), |
| | ] |
| |
|
| | def _generate_examples(self, filepath): |
| | """This function returns the examples in the raw (text) form.""" |
| | logger.info("generating examples from = %s", filepath) |
| | key = 0 |
| | with open(filepath, encoding="utf-8") as f: |
| | squad = json.load(f) |
| | for article in squad["data"]: |
| | title = article.get("title", "") |
| | for paragraph in article["paragraphs"]: |
| | context = paragraph["context"] |
| | for qa in paragraph["qas"]: |
| | answer_starts = [answer["answer_start"] for answer in qa["answers"]] |
| | answers = [answer["text"] for answer in qa["answers"]] |
| | |
| | |
| | yield key, { |
| | "title": title, |
| | "context": context, |
| | "question": qa["question"], |
| | "id": qa["id"], |
| | "answers": { |
| | "answer_start": answer_starts, |
| | "text": answers, |
| | }, |
| | } |
| | key += 1 |