Kenneth Enevoldsen
commited on
Commit
·
32e9f98
1
Parent(s):
02d1b4a
Added creation scripts
Browse files
dataset_creation_scripts/annotate.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import pickle
|
| 3 |
+
from pathlib import Path
|
| 4 |
+
|
| 5 |
+
from spacy.tokens import Span
|
| 6 |
+
|
| 7 |
+
import dacy
|
| 8 |
+
from dacy.datasets import dane
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def load_examples():
|
| 12 |
+
save_path = Path("examples.pkl")
|
| 13 |
+
if save_path.exists():
|
| 14 |
+
with open(save_path, "rb") as f:
|
| 15 |
+
examples = pickle.load(f)
|
| 16 |
+
|
| 17 |
+
return examples
|
| 18 |
+
|
| 19 |
+
train, dev, test = dane()
|
| 20 |
+
|
| 21 |
+
nlp = dacy.load("da_dacy_large_ner_fine_grained-0.1.0")
|
| 22 |
+
|
| 23 |
+
examples = list(test(nlp)) + list(train(nlp)) + list(dev(nlp))
|
| 24 |
+
|
| 25 |
+
docs = nlp.pipe([ex.x.text for ex in examples])
|
| 26 |
+
|
| 27 |
+
for e in examples:
|
| 28 |
+
e.predicted = next(docs)
|
| 29 |
+
|
| 30 |
+
with open("examples.pkl", "wb") as f:
|
| 31 |
+
pickle.dump(examples, f)
|
| 32 |
+
|
| 33 |
+
return examples
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def normalize_examples(examples):
|
| 37 |
+
label_mapping = {
|
| 38 |
+
"PER": "PERSON",
|
| 39 |
+
"LOC": "LOCATION",
|
| 40 |
+
"ORG": "ORGANIZATION",
|
| 41 |
+
"MISC": "MISC",
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
for e in examples:
|
| 45 |
+
old_ents = e.y.ents
|
| 46 |
+
new_ents = []
|
| 47 |
+
for ent in old_ents:
|
| 48 |
+
new_label = label_mapping[ent.label_]
|
| 49 |
+
new_ent = Span(e.y, start=ent.start, end=ent.end, label=new_label)
|
| 50 |
+
new_ents.append(new_ent)
|
| 51 |
+
|
| 52 |
+
e.y.ents = new_ents
|
| 53 |
+
|
| 54 |
+
return examples
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def example_to_review_format(example) -> dict:
|
| 58 |
+
ref = example.y
|
| 59 |
+
|
| 60 |
+
text = ref.text
|
| 61 |
+
tokens = [
|
| 62 |
+
{"text": t.text, "start": t.idx, "end": t.idx + len(t), "id": i}
|
| 63 |
+
for i, t in enumerate(ref)
|
| 64 |
+
]
|
| 65 |
+
answer = "accept"
|
| 66 |
+
|
| 67 |
+
versions = []
|
| 68 |
+
|
| 69 |
+
v_ref_spans = [
|
| 70 |
+
{
|
| 71 |
+
"start": s.start_char,
|
| 72 |
+
"end": s.end_char,
|
| 73 |
+
"label": s.label_,
|
| 74 |
+
"token_start": s.start,
|
| 75 |
+
"token_end": s.end - 1,
|
| 76 |
+
}
|
| 77 |
+
for s in ref.ents
|
| 78 |
+
]
|
| 79 |
+
v_ref = {
|
| 80 |
+
"text": text,
|
| 81 |
+
"tokens": tokens,
|
| 82 |
+
"spans": v_ref_spans,
|
| 83 |
+
"answer": answer,
|
| 84 |
+
"sessions": ["reference"],
|
| 85 |
+
"default": True,
|
| 86 |
+
}
|
| 87 |
+
versions.append(v_ref)
|
| 88 |
+
|
| 89 |
+
v_pred_spans = [
|
| 90 |
+
{
|
| 91 |
+
"start": s.start_char,
|
| 92 |
+
"end": s.end_char,
|
| 93 |
+
"label": s.label_,
|
| 94 |
+
"token_start": s.start,
|
| 95 |
+
"token_end": s.end - 1,
|
| 96 |
+
}
|
| 97 |
+
for s in example.predicted.ents
|
| 98 |
+
]
|
| 99 |
+
v_pred = {
|
| 100 |
+
"text": text,
|
| 101 |
+
"tokens": tokens,
|
| 102 |
+
"spans": v_pred_spans,
|
| 103 |
+
"answer": answer,
|
| 104 |
+
"sessions": ["da_dacy_large_ner_fine_grained-0.1.0"],
|
| 105 |
+
"default": True,
|
| 106 |
+
}
|
| 107 |
+
versions.append(v_pred)
|
| 108 |
+
|
| 109 |
+
return {
|
| 110 |
+
"text": text,
|
| 111 |
+
"tokens": tokens,
|
| 112 |
+
"answer": answer,
|
| 113 |
+
"view_id": "ner_manual",
|
| 114 |
+
"versions": versions,
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
examples = load_examples()
|
| 120 |
+
|
| 121 |
+
",".join(set([ent.label_ for e in examples for ent in e.x.ents]))
|
| 122 |
+
|
| 123 |
+
jsonl_data = [example_to_review_format(e) for e in normalize_examples(examples)]
|
| 124 |
+
|
| 125 |
+
with open("examples.jsonl", "w") as f:
|
| 126 |
+
for json_dict in jsonl_data:
|
| 127 |
+
line = json.dumps(json_dict)
|
| 128 |
+
f.write(f"{line}\n")
|
| 129 |
+
|
| 130 |
+
with open("reference.jsonl", "w") as f:
|
| 131 |
+
for json_dict in jsonl_data:
|
| 132 |
+
line = json.dumps(json_dict["versions"][0])
|
| 133 |
+
f.write(f"{line}\n")
|
| 134 |
+
|
| 135 |
+
with open("predictions.jsonl", "w") as f:
|
| 136 |
+
for json_dict in jsonl_data:
|
| 137 |
+
line = json.dumps(json_dict["versions"][1])
|
| 138 |
+
f.write(f"{line}\n")
|
dataset_creation_scripts/split.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import spacy
|
| 2 |
+
from numpy import char
|
| 3 |
+
from spacy.tokens import Doc, DocBin
|
| 4 |
+
|
| 5 |
+
train_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/train.spacy"
|
| 6 |
+
dev_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/dev.spacy"
|
| 7 |
+
test_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/test.spacy"
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
nlp = spacy.blank("da")
|
| 11 |
+
# train, dev, test = dane()
|
| 12 |
+
train_docs = list(DocBin().from_disk(train_dane).get_docs(nlp.vocab))
|
| 13 |
+
dev_docs = list(DocBin().from_disk(dev_dane).get_docs(nlp.vocab))
|
| 14 |
+
test_docs = list(DocBin().from_disk(test_dane).get_docs(nlp.vocab))
|
| 15 |
+
|
| 16 |
+
Doc.set_extension("split", default=None)
|
| 17 |
+
|
| 18 |
+
for split, nam in zip([train_docs, dev_docs, test_docs], ["train", "dev", "test"]):
|
| 19 |
+
for doc in split:
|
| 20 |
+
doc._.split = nam
|
| 21 |
+
|
| 22 |
+
# text2doc = {}
|
| 23 |
+
# n_duplicates = 0 # all looks like non-actual duplicates (e.g. "stk. 2")
|
| 24 |
+
# for i, doc in enumerate(test_docs + train_docs + dev_docs):
|
| 25 |
+
# if doc.text in text2doc:
|
| 26 |
+
# print(f"Duplicate found: {doc.text}")
|
| 27 |
+
# print("split:": doc._.split)
|
| 28 |
+
# n_duplicates += 1
|
| 29 |
+
# text2doc[doc.text] = doc
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
# load daneplus
|
| 33 |
+
path_to_data = "/Users/au561649/Github/DaCy/training/dane_plus/train.spacy"
|
| 34 |
+
train_data = DocBin().from_disk(path_to_data)
|
| 35 |
+
daneplus_docs = list(train_data.get_docs(nlp.vocab))
|
| 36 |
+
|
| 37 |
+
text2doc = {}
|
| 38 |
+
n_duplicates = 0 # No duplicates (prodigy removed them - this will be problematic when reconstructing the documents - so therefore we re-annotate the dane documents)
|
| 39 |
+
for i, doc in enumerate(daneplus_docs):
|
| 40 |
+
if doc.text in text2doc:
|
| 41 |
+
print(f"Duplicate found: {doc.text}")
|
| 42 |
+
n_duplicates += 1
|
| 43 |
+
text2doc[doc.text] = doc
|
| 44 |
+
|
| 45 |
+
# Add the daneplus annotations to the dane documents
|
| 46 |
+
docs_to_fix = []
|
| 47 |
+
for doc in train_docs + dev_docs + test_docs:
|
| 48 |
+
if doc.text in text2doc:
|
| 49 |
+
_ents_to_add = text2doc[doc.text].ents
|
| 50 |
+
ents_to_add = []
|
| 51 |
+
for ent in _ents_to_add:
|
| 52 |
+
char_span = doc.char_span(ent.start_char, ent.end_char, label=ent.label_)
|
| 53 |
+
if char_span is None:
|
| 54 |
+
print(f"Entity could not be added: {ent.text}")
|
| 55 |
+
docs_to_fix.append((doc, ent))
|
| 56 |
+
continue
|
| 57 |
+
ents_to_add.append(char_span)
|
| 58 |
+
doc.ents = ents_to_add # type: ignore
|
| 59 |
+
|
| 60 |
+
# manual fixes (due to difference in tokenization)
|
| 61 |
+
doc, ent = docs_to_fix[0]
|
| 62 |
+
ents = list(doc.ents)
|
| 63 |
+
_ent = doc[-2:-1]
|
| 64 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 65 |
+
print("added", new_ent, "to", doc.text)
|
| 66 |
+
ents.append(new_ent)
|
| 67 |
+
doc.ents = ents
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
doc, ent = docs_to_fix[1]
|
| 71 |
+
ents = list(doc.ents)
|
| 72 |
+
_ent = doc[-3:-1]
|
| 73 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 74 |
+
ents.append(new_ent)
|
| 75 |
+
doc.ents = ents
|
| 76 |
+
print("added", new_ent, "to", doc.text)
|
| 77 |
+
|
| 78 |
+
doc, ent = docs_to_fix[2]
|
| 79 |
+
ents = list(doc.ents)
|
| 80 |
+
_ent = doc[-3:-1]
|
| 81 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 82 |
+
ents.append(new_ent)
|
| 83 |
+
doc.ents = ents
|
| 84 |
+
print("added", new_ent, "to", doc.text)
|
| 85 |
+
|
| 86 |
+
doc, ent = docs_to_fix[3]
|
| 87 |
+
ents = list(doc.ents)
|
| 88 |
+
_ent = doc[-3:-1]
|
| 89 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 90 |
+
ents.append(new_ent)
|
| 91 |
+
doc.ents = ents
|
| 92 |
+
print("added", new_ent, "to", doc.text)
|
| 93 |
+
|
| 94 |
+
doc, ent = docs_to_fix[4]
|
| 95 |
+
ents = list(doc.ents)
|
| 96 |
+
_ent = doc[-3:-1]
|
| 97 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 98 |
+
ents.append(new_ent)
|
| 99 |
+
doc.ents = ents
|
| 100 |
+
print("added", new_ent, "to", doc.text)
|
| 101 |
+
|
| 102 |
+
doc, ent = docs_to_fix[5]
|
| 103 |
+
ents = list(doc.ents)
|
| 104 |
+
_ent = doc[-3:-1]
|
| 105 |
+
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
|
| 106 |
+
ents.append(new_ent)
|
| 107 |
+
doc.ents = ents
|
| 108 |
+
print("added", new_ent, "to", doc.text)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
# Save the new documents
|
| 112 |
+
new_train = DocBin(docs=train_docs)
|
| 113 |
+
new_dev = DocBin(docs=dev_docs)
|
| 114 |
+
new_test = DocBin(docs=test_docs)
|
| 115 |
+
|
| 116 |
+
new_train.to_disk("train.spacy")
|
| 117 |
+
new_dev.to_disk("dev.spacy")
|
| 118 |
+
new_test.to_disk("test.spacy")
|
dataset_creation_scripts/upload_to_hf.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datasets import Dataset, DatasetDict
|
| 2 |
+
from spacy.tokens import DocBin
|
| 3 |
+
import spacy
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def convert_spacy_docs_to_hf_entry(doc) -> dict:
|
| 7 |
+
return doc.to_json()
|
| 8 |
+
|
| 9 |
+
train = "/Users/au561649/Github/DaCy/training/dane_plus/train.spacy"
|
| 10 |
+
dev = "/Users/au561649/Github/DaCy/training/dane_plus/dev.spacy"
|
| 11 |
+
test = "/Users/au561649/Github/DaCy/training/dane_plus/test.spacy"
|
| 12 |
+
|
| 13 |
+
nlp = spacy.blank("da")
|
| 14 |
+
|
| 15 |
+
train_docs = list(DocBin().from_disk(train).get_docs(nlp.vocab))
|
| 16 |
+
dev_docs = list(DocBin().from_disk(dev).get_docs(nlp.vocab))
|
| 17 |
+
test_docs = list(DocBin().from_disk(test).get_docs(nlp.vocab))
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
# my_list = [{"a": 1}, {"a": 2}, {"a": 3}]
|
| 21 |
+
# dataset = Dataset.from_list(my_list)
|
| 22 |
+
|
| 23 |
+
train_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in train_docs])
|
| 24 |
+
dev_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in dev_docs])
|
| 25 |
+
test_dataset = Dataset.from_list([convert_spacy_docs_to_hf_entry(doc) for doc in test_docs])
|
| 26 |
+
|
| 27 |
+
dataset_dict = DatasetDict({"train": train_dataset, "dev": dev_dataset, "test": test_dataset})
|
| 28 |
+
|
| 29 |
+
dataset_dict.push_to_hub("dane_plus")
|