Limitless063 linxy commited on
Commit
7d54959
·
verified ·
0 Parent(s):

Duplicate from linxy/LaTeX_OCR

Browse files

Co-authored-by: Lin Xueyuan <linxy@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ size_categories:
4
+ - 100K<n<1M
5
+ task_categories:
6
+ - image-to-text
7
+ dataset_info:
8
+ - config_name: default
9
+ features:
10
+ - name: image
11
+ dtype: image
12
+ - name: text
13
+ dtype: string
14
+ splits:
15
+ - name: train
16
+ num_bytes: 392473380.05
17
+ num_examples: 76318
18
+ download_size: 383401054
19
+ dataset_size: 392473380.05
20
+ - config_name: full
21
+ features:
22
+ - name: image
23
+ dtype: image
24
+ - name: text
25
+ dtype: string
26
+ splits:
27
+ - name: train
28
+ num_bytes: 385291867
29
+ num_examples: 76318
30
+ - name: validation
31
+ num_bytes: 43364061.55
32
+ num_examples: 8475
33
+ - name: test
34
+ num_bytes: 47643036.303
35
+ num_examples: 9443
36
+ download_size: 473618552
37
+ dataset_size: 483485587.878
38
+ - config_name: human_handwrite
39
+ features:
40
+ - name: image
41
+ dtype: image
42
+ - name: text
43
+ dtype: string
44
+ splits:
45
+ - name: train
46
+ num_bytes: 16181778
47
+ num_examples: 1200
48
+ - name: validation
49
+ num_bytes: 962283
50
+ num_examples: 68
51
+ - name: test
52
+ num_bytes: 906906
53
+ num_examples: 70
54
+ download_size: 18056029
55
+ dataset_size: 18050967
56
+ - config_name: human_handwrite_print
57
+ features:
58
+ - name: image
59
+ dtype: image
60
+ - name: text
61
+ dtype: string
62
+ splits:
63
+ - name: train
64
+ num_bytes: 3152122.8
65
+ num_examples: 1200
66
+ - name: validation
67
+ num_bytes: 182615
68
+ num_examples: 68
69
+ - name: test
70
+ num_bytes: 181698
71
+ num_examples: 70
72
+ download_size: 1336052
73
+ dataset_size: 3516435.8
74
+ - config_name: small
75
+ features:
76
+ - name: image
77
+ dtype: image
78
+ - name: text
79
+ dtype: string
80
+ splits:
81
+ - name: train
82
+ num_bytes: 261296
83
+ num_examples: 50
84
+ - name: validation
85
+ num_bytes: 156489
86
+ num_examples: 30
87
+ - name: test
88
+ num_bytes: 156489
89
+ num_examples: 30
90
+ download_size: 588907
91
+ dataset_size: 574274
92
+ - config_name: synthetic_handwrite
93
+ features:
94
+ - name: image
95
+ dtype: image
96
+ - name: text
97
+ dtype: string
98
+ splits:
99
+ - name: train
100
+ num_bytes: 496610333.066
101
+ num_examples: 76266
102
+ - name: validation
103
+ num_bytes: 63147351.515
104
+ num_examples: 9565
105
+ - name: test
106
+ num_bytes: 62893132.805
107
+ num_examples: 9593
108
+ download_size: 616418996
109
+ dataset_size: 622650817.3859999
110
+ configs:
111
+ - config_name: default
112
+ data_files:
113
+ - split: train
114
+ path: full/train-*
115
+ - config_name: full
116
+ data_files:
117
+ - split: train
118
+ path: full/train-*
119
+ - split: validation
120
+ path: full/validation-*
121
+ - split: test
122
+ path: full/test-*
123
+ - config_name: human_handwrite
124
+ data_files:
125
+ - split: train
126
+ path: human_handwrite/train-*
127
+ - split: validation
128
+ path: human_handwrite/validation-*
129
+ - split: test
130
+ path: human_handwrite/test-*
131
+ - config_name: human_handwrite_print
132
+ data_files:
133
+ - split: train
134
+ path: human_handwrite_print/train-*
135
+ - split: validation
136
+ path: human_handwrite_print/validation-*
137
+ - split: test
138
+ path: human_handwrite_print/test-*
139
+ - config_name: small
140
+ data_files:
141
+ - split: train
142
+ path: small/train-*
143
+ - split: validation
144
+ path: small/validation-*
145
+ - split: test
146
+ path: small/test-*
147
+ - config_name: synthetic_handwrite
148
+ data_files:
149
+ - split: train
150
+ path: synthetic_handwrite/train-*
151
+ - split: validation
152
+ path: synthetic_handwrite/validation-*
153
+ - split: test
154
+ path: synthetic_handwrite/test-*
155
+ tags:
156
+ - code
157
+ ---
158
+
159
+ # LaTeX OCR 的数据仓库
160
+
161
+ 本数据仓库是专为 [LaTeX_OCR](https://github.com/LinXueyuanStdio/LaTeX_OCR) 及 [LaTeX_OCR_PRO](https://github.com/LinXueyuanStdio/LaTeX_OCR) 制作的数据,来源于 `https://zenodo.org/record/56198#.V2p0KTXT6eA` 以及 `https://www.isical.ac.in/~crohme/` 以及我们自己构建。
162
+
163
+ 如果这个数据仓库有帮助到你的话,请点亮 ❤️like ++
164
+
165
+ 后续追加新的数据也会放在这个仓库 ~~
166
+
167
+ > 原始数据仓库在github [LinXueyuanStdio/Data-for-LaTeX_OCR](https://github.com/LinXueyuanStdio/Data-for-LaTeX_OCR).
168
+
169
+ ## 数据集
170
+
171
+ 本仓库有 5 个数据集
172
+
173
+ 1. `small` 是小数据集,样本数 110 条,用于测试
174
+ 2. `full` 是印刷体约 100k 的完整数据集。实际上样本数略小于 100k,因为用 LaTeX 的抽象语法树剔除了很多不能渲染的 LaTeX。
175
+ 3. `synthetic_handwrite` 是手写体 100k 的完整数据集,基于 `full` 的公式,使用手写字体合成而来,可以视为人类在纸上的手写体。样本数实际上略小于 100k,理由同上。
176
+ 4. `human_handwrite` 是手写体较小数据集,更符合人类在电子屏上的手写体。主要来源于 `CROHME`。我们用 LaTeX 的抽象语法树校验过了。
177
+ 5. `human_handwrite_print` 是来自 `human_handwrite` 的印刷体数据集,公式部分和 `human_handwrite` 相同,图片部分由公式用 LaTeX 渲染而来。
178
+
179
+ ## 使用
180
+
181
+ 加载训练集
182
+
183
+ - name 可选 small, full, synthetic_handwrite, human_handwrite, human_handwrite_print
184
+ - split 可选 train, validation, test
185
+
186
+ ```python
187
+ >>> from datasets import load_dataset
188
+ >>> train_dataset = load_dataset("linxy/LaTeX_OCR", name="small", split="train")
189
+ >>> train_dataset[2]["text"]
190
+ \rho _ { L } ( q ) = \sum _ { m = 1 } ^ { L } \ P _ { L } ( m ) \ { \frac { 1 } { q ^ { m - 1 } } } .
191
+ >>> train_dataset[2]
192
+ {'image': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=200x50 at 0x15A5D6CE210>,
193
+ 'text': '\\rho _ { L } ( q ) = \\sum _ { m = 1 } ^ { L } \\ P _ { L } ( m ) \\ { \\frac { 1 } { q ^ { m - 1 } } } .'}
194
+ >>> len(train_dataset)
195
+ 50
196
+ ```
197
+
198
+ 加载所有
199
+ ```python
200
+ >>> from datasets import load_dataset
201
+ >>> dataset = load_dataset("linxy/LaTeX_OCR", name="small")
202
+ >>> dataset
203
+ DatasetDict({
204
+ train: Dataset({
205
+ features: ['image', 'text'],
206
+ num_rows: 50
207
+ })
208
+ validation: Dataset({
209
+ features: ['image', 'text'],
210
+ num_rows: 30
211
+ })
212
+ test: Dataset({
213
+ features: ['image', 'text'],
214
+ num_rows: 30
215
+ })
216
+ })
217
+ ```
218
+
219
+
full/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:908015e4c891792d36ea8070e9e0d90b61b1f89f2b0ddf7a647b236b5b59496a
3
+ size 47575275
full/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93505f9a92a4860ed5ba247882c89f91118093c862c7cb9d46f496b42222f2f8
3
+ size 383401054
full/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbef2077fe9518a060e308e714d55f31fe8f7c06f8e9bb8282126e2538daf2f8
3
+ size 42633842
human_handwrite/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56461d611a855d48d2e78b5716bdd26ebe150b12df2619701e4db450cf242497
3
+ size 905665
human_handwrite/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f60c687aa83469552784d2f1c404d848a5e18f126468962e24dca84c54c67e2
3
+ size 16189089
human_handwrite/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a658403d893e25eb9834bc499206702c8881a888f0e1edaac413da07ae827336
3
+ size 961275
human_handwrite_print/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d0a04b441330589a02095b0984d76bd77984646675c8d1a005b14daf84365d0
3
+ size 78567
human_handwrite_print/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed636f707fba2cebc9b21d29bd65538744d329af837caa9922ded9ca7a00306a
3
+ size 1180941
human_handwrite_print/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8cbe5e5b63d64062a38be0ff91927089fd1038d15b5c95bda019274c8382d9e
3
+ size 76544
small/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b5c3cad9a0130649e12edd383317ef76525b490b3e75ba37f20dbaa38ccfe6b
3
+ size 161866
small/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5ab0d3488cd4e005587e7c684e7b30b3964f00b3df00cd73ed540c38754763f
3
+ size 265175
small/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b5c3cad9a0130649e12edd383317ef76525b490b3e75ba37f20dbaa38ccfe6b
3
+ size 161866
synthetic_handwrite/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83b47dd05a62bb590e70789e901e6d834489469f3b335024b48eaac0fca68483
3
+ size 61759146
synthetic_handwrite/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5ae0ec1de57f498acf6fc0531d324c38b8547e4f761c081f7ce24d2602262ed
3
+ size 492809356
synthetic_handwrite/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee3ecb13595e4c4e6363865289054bccbf59f6c30e78440d86a61ace444247b7
3
+ size 61850494