Datasets:

Modalities:
Image
Text
Formats:
arrow
Libraries:
Datasets
License:
kb-books / extract_data.py
balsab's picture
more files + README update (#16)
facbeb0 verified
# /// script
# requires-python = "==3.12"
# dependencies = [
# "PyMuPDF>=1.26.0",
# "datasets>=3.5.0",
# ]
# ///
import os
import re
import string
import json
import logging
import shutil
from datetime import datetime
from tqdm import tqdm
import fitz
from datasets import Dataset, load_dataset
logger = logging.getLogger(__name__)
##########################################
###### FILL MANUALLY #####################
#name of parquet files
source = "kb_books"
#how many years should go in one parquet file (do not change!)
n_chunks = 1
#how many docs in 1 parquet
n_batch = 1
#paths
input_path = os.path.join("..","..","kb-books","raw")
output_path = os.path.join(".","data")
logs = os.path.join(".","log")
#first year to process
start_year = 1876
#last year to process
stop_year = 1880
#misc folders in data
unwanted_folders = ("README.txt","logs")
#demo run for testing, if true, only first page is read
demo = False
#location of reference filenames for public domain documents
ref_pd_location = os.path.join(".","pd_check","public_domain_files.txt")
with open(ref_pd_location, 'r') as pd_files:
ref_pd_list = pd_files.read().splitlines()
############################################
def find_author_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the author(s) from various possible locations in the json metadata.
"""
try:
author = data.get("pnx").get("addata")["au"]
except KeyError:
author = []
try:
add_author = data.get("pnx").get("addata")["addau"]
except KeyError:
add_author = []
authors = list(set(author)) + list(set(add_author))
authors = "; ".join(authors)
if len(authors) < 1:
try:
authors = data.get("pnx").get("sort")["author"]
authors = "; ".join(authors)
except KeyError:
pass
if len(authors) < 1:
try:
authors = data.get("pnx").get("display")["creator"]
authors = "; ".join(authors)
except KeyError:
authors = "missing"
return (authors)
def find_title_json(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the title from various possible locations in the json metadata.
"""
try:
title = data.get("pnx").get("display")["title"][0]
except KeyError:
title = []
if len(title) < 1:
try:
title = data.get("pnx").get("addata")["btitle"][0]
except KeyError:
pass
else:
pass
if len(title) < 1:
try:
title = data.get("pnx").get("sort")["title"][0]
except KeyError:
pass
if len(title) < 1:
title = "missing"
return(title)
def find_digitalization(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding the digitalization date from various possible locations in the json metadata.
"""
try:
digit = data.get("pnx").get("display")["creationdate"][0]
#last 4 digit number in string
digit = re.findall(r"\d{4}$",digit)[0]
except KeyError:
digit = []
if len(digit) < 1:
try:
digit = data.get("pnx").get("addata")["date"][1]
digit = re.findall(r"\d{4}$",digit)[0]
except KeyError:
digit = "missing"
return(digit)
def find_source(data: dict[str,dict[str,dict[str,str]]]) -> str:
"""
A function for finding source of the document from the json metadata.
"""
try:
doc_source = data.get("pnx").get("display")["lds50"]
#last 4 digit number in string
doc_source = [i for i in doc_source if "Digi" not in i][0]
except (KeyError, IndexError):
doc_source = "missing"
return doc_source
#filter alive and missing people
def dead_70_yrs_ago(ds):
"""filter for the scraped authors to find ones who have died 70 years ago"""
birth_miss = False
death_miss = False
try:
birth_yr = int(ds["born"])
if birth_yr > 1955:
birth = False
else:
birth = True
except ValueError:
birth = False
birth_miss = True
try:
death_yr = int(ds["died"])
if death_yr > 1955:
death = False
else:
death = True
except ValueError:
death = False
death_miss = True
if ((death and birth) or
(death and birth_miss) or
(death_miss and birth_yr < 1833)
):
filtered = True
else:
filtered = False
return filtered
def extract_meta_data(pdf_file:str) -> dict[str,str|int]:
"""
A function for extracting meta data from the json files
includes:
- author(s)
- title
- published
- digitalized
- source
"""
try:
#load in json
json_file = pdf_file[:-3] + "json"
f = open(json_file)
data = json.load(f)
#do stuff
authors = find_author_json(data)
title = find_title_json(data)
digitalized = find_digitalization(data)
doc_source = find_source(data)
#close
f.close()
except BaseException:
authors = "missing"
title = "missing"
digitalized = "missing"
doc_source = "missing"
return authors, title, digitalized, doc_source
def simplify_name(author:str) -> str:
"""
function for simplifying repeated single author name separated by ;
eg. "Holck, J.; af J. Holck." -> Holck, J.
"""
simp = author
if ";" in author:
only_uppercase = [re.findall(r"[A-Z][a-z]*",i) for i in author.split(";")]
if len(only_uppercase)==2:
if sorted(only_uppercase[0]) == sorted(only_uppercase[1]):
simp = re.findall(r"^[^;]*",author)[0]
else:
pass
else:
pass
else:
pass
return simp
def separate_names(author:str) -> list[list[list[str]],int]:
"""
function for separating different authors and their
- separates by ";"
- matches strings starting with uppercase letters
"""
authors = re.findall(r"([^;]*)",author)
authors = list(filter(None,authors))
authors = [re.findall(r"([A-Z]\w*)",i) for i in authors]
n_author = len(authors)
return authors, n_author
def check_copyright(pub_year:int,
cover_page_text: str,
filename: str,
ref_filenames: list[str]
) -> bool:
"""
Function for checking public domain status based on:
- year published,
- if the digitalising party claims it is copyright free
- if the filename can be matched to a name from an outside source
"""
if pub_year < 1833:
public_domain = True
elif ("free of copyright" in cover_page_text):
public_domain = True
elif filename in ref_filenames:
public_domain = True
else:
public_domain = False
return public_domain
def convert_pdf_to_dataset(file_name: str,
path_to_file: str,
demo: bool = False) -> Dataset:
"""Converts pdf to image and a dataset with rows by page, based on:
https://thepythoncode.com/article/convert-pdf-files-to-images-in-python
"""
#create path, create id
input_file=os.path.join(path_to_file,file_name)
#whitespaces to underscores, remove: punctuation, alma, pdf
doc_id = re.sub(" ","_",file_name)
doc_id = ''.join(filter(lambda x: x not in string.punctuation, doc_id))
doc_id = re.sub(r"alma|pdf","",doc_id)
#get metadata
pub_year = file_name[:4]
#get metadata (from json)
author, title, digitalized, doc_source = extract_meta_data(input_file)
# Open the document
pdfIn = fitz.open(input_file)
data_list = []
# Iterate throughout the pages, set range for full doc or demo test runs
if demo:
page_range = 1
else:
page_range = pdfIn.page_count
for pg in range(page_range):
page = pdfIn[pg]
#get page text
page_text = page.get_text()
#meta data from frontpage if still missing
if pg == 0:
#remove \n for easier regexing
try:
text_solid = re.sub("\n","",page_text)
except TypeError:
#if no text on frontpage
text_solid = "missing"
if author == "missing":
try:
author = re.search(r"(?<=Author\(s\):)(.*?)(?=Titel)",text_solid)[0]
#trying to clean it a bit
author = simplify_name(author)
#author, n_author = separate_names(author)
except TypeError:
#in case no cover page
author = "missing"
finally:
#in case cover page present, but still no author
if len(author) == 0:
author = "missing"
else:
pass
if title == "missing":
try:
title = re.search(r"(?<=Title:)(.*?)(?=Udgivet)",text_solid)[0]
except TypeError:
title = "missing"
#now that all possible meta data is gathered after first page, see copyright status
copyright_free = check_copyright(int(pub_year),
text_solid,
file_name,
ref_pd_list)
else:
#on other pages
pass
if not copyright_free:
#if public domain was not confirmed, end looking through pages
break
#create page_image
rotate = int(0)
# 2, 2 (text should be readable)
zoom_x = 2
zoom_y = 2
# Pre-rotate is to rotate if needed.
mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate)
pix = page.get_pixmap(matrix=mat, alpha=False)
page_img = pix.pil_image()
page_id = f"{doc_id}_p{pg+1}"
#assemble data_doc
if type(author) == list:
author = "; ".join(author)
else:
pass
meta_data ={"doc_id" : doc_id,
"page_id" : page_id,
"page_image" : page_img,
"page_text": page_text,
"author": author,
"title" : title,
"published": pub_year,
"digitalized": digitalized,
"source": doc_source,
"file_name": file_name}
data_list.append(meta_data)
pdfIn.close()
if copyright_free:
ds = Dataset.from_list(data_list)
else:
ds = "missing"
return ds
def make_year_list(start_year: int, stop_year: int) -> list[str]:
"""make a list of file names based on years"""
year_list = list(range(start_year, stop_year + 1))
year_list = [str(i) for i in year_list]
return year_list
#source filter for ADL (they are not scanned pdfs)
adl_filter = lambda ds: ds["source"] != "ADLFBI"
def split(a, n):
"splits list into n roughly equal parts"
k, m = divmod(len(a), n)
return (a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def remove(path):
""" param <path> could either be relative or absolute. """
if os.path.isfile(path) or os.path.islink(path):
os.remove(path) # remove the file
elif os.path.isdir(path):
shutil.rmtree(path) # remove dir and all contains
else:
raise ValueError("file {} is not a file or dir.".format(path))
def reorganize_data(output_path: str, shard_size: str = "5"):
""" Loads the temporary data folders in the data path and creates 5GB shards for each year, deletes temporary files
"""
folders = os.listdir(output_path)
temp_folders = [i for i in folders if "_t" in i]
if len(temp_folders) == 0:
print("DATA ORGANIZED")
return
print("REORGANIZING DATA...")
for t_fold in tqdm(temp_folders):
#load all separate parquets into 1 ds
data_path = os.path.join(output_path,t_fold)
data_set = load_dataset(data_path, split = "train")
#save it to appropriately size chunks
year_str = t_fold[:-2]
new_data_path = os.path.join(output_path,year_str)
data_set.save_to_disk(new_data_path, max_shard_size="5GB")
#delete temp_folder
try :
remove(data_path)
except PermissionError as e:
print(f"{e}")
def main():
sub_folders = os.listdir(input_path)
for u in unwanted_folders:
sub_folders.remove(u)
#select years to process
year_list = make_year_list(start_year,stop_year)
sub_folders = sorted([i for i in sub_folders if i in year_list])
#chunking because there's a lot of data
chunks = [sub_folders[i:i + n_chunks] for i in range(0, len(sub_folders), n_chunks)]
logger.info(f"Extracting from PDFs...{sub_folders[0]}-{sub_folders[-1]}")
for ch in tqdm(chunks):
problem_list =[]
for sfolder in ch:
#sub folder path e.g /raw/1750
sfp = os.path.join(input_path,sfolder)
files = [i for i in os.listdir(sfp) if ".pdf" in i]
#further chunking because even 1 year is too much memory-wise
#batched_files = list(split(files,10))
#limit files in 1 parquet
batched_files = [files[i:i + n_batch] for i in range(0, len(files), n_batch)]
for batch_nr, batch in enumerate(batched_files):
ds=[]
for i in batch:
try:
temporary_ds = convert_pdf_to_dataset(i,sfp,demo)
if temporary_ds is None:
pass
else:
print(temporary_ds[0]["file_name"])
for j in temporary_ds:
ds.append(j)
except BaseException as e:
logger.info(f"FILE ERROR: {os.path.join(sfp,i)}")
logger.info(f"ERROR: {e}")
problem_list.append(i)
logger.info(f"Assembling Dataset: {ch[0]}-{ch[-1]}, BATCH:{batch_nr}")
#if no viable data was saved, do not make a parquet
if len(ds) == 0:
continue
ds = Dataset.from_list(ds)
#filter out certain files
ds = ds.filter(adl_filter)
ds = ds.remove_columns("source")
#make subfolders by year _t for temporary, will be reorganized
save_path = os.path.join(output_path,f"{sfolder}_t",f"{source}_{ch[0]}-{ch[-1]}_{batch_nr}.parquet")
ds.to_parquet(save_path)
logger.info(f"FOLDER DONE: {sfolder}")
if len(problem_list) >= 1:
with open(os.path.join(logs,f"problems_{ch[0]}-{ch[-1]}.txt"), 'w') as outfile:
outfile.write('\n'.join(str(i) for i in problem_list))
else:
pass
#reorganize the data after running everything
ds = None
temporary_ds = None
del ds
del temporary_ds
#reorganize_data(output_path)
if __name__ == "__main__":
log_path = os.path.join(logs,"extract.log")
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(levelname)s - %(message)s",
handlers=[
logging.StreamHandler(),
logging.FileHandler(log_path),
],
)
main()