CUI03 lgienapp commited on
Commit
998461e
·
verified ·
0 Parent(s):

Duplicate from coral-nlp/german-commons

Browse files

Co-authored-by: Lukas Gienapp <[email protected]>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +59 -0
  2. DATASHEET.md +262 -0
  3. README.md +479 -0
  4. bloom_filter.bin +3 -0
  5. subset=Cultural/source=BLBooks/part-0001.parquet +3 -0
  6. subset=Cultural/source=BLBooks/part-0002.parquet +3 -0
  7. subset=Cultural/source=DiBiLit/part-0001.parquet +3 -0
  8. subset=Cultural/source=DiBiPhil/part-0001.parquet +3 -0
  9. subset=Cultural/source=GermanPD/part-0001.parquet +3 -0
  10. subset=Cultural/source=GermanPD/part-0002.parquet +3 -0
  11. subset=Cultural/source=GermanPD/part-0003.parquet +3 -0
  12. subset=Cultural/source=GermanPD/part-0004.parquet +3 -0
  13. subset=Cultural/source=GermanPD/part-0005.parquet +3 -0
  14. subset=Cultural/source=GermanPD/part-0006.parquet +3 -0
  15. subset=Cultural/source=GermanPD/part-0007.parquet +3 -0
  16. subset=Cultural/source=GermanPD/part-0008.parquet +3 -0
  17. subset=Cultural/source=GermanPD/part-0009.parquet +3 -0
  18. subset=Cultural/source=GermanPD/part-0010.parquet +3 -0
  19. subset=Cultural/source=GermanPD/part-0011.parquet +3 -0
  20. subset=Cultural/source=GermanPD/part-0012.parquet +3 -0
  21. subset=Cultural/source=GermanPD/part-0013.parquet +3 -0
  22. subset=Cultural/source=GermanPD/part-0014.parquet +3 -0
  23. subset=Cultural/source=GermanPD/part-0015.parquet +3 -0
  24. subset=Cultural/source=GermanPD/part-0016.parquet +3 -0
  25. subset=Cultural/source=GermanPD/part-0017.parquet +3 -0
  26. subset=Cultural/source=GermanPD/part-0018.parquet +3 -0
  27. subset=Cultural/source=GermanPD/part-0019.parquet +3 -0
  28. subset=Cultural/source=GermanPD/part-0020.parquet +3 -0
  29. subset=Cultural/source=GermanPD/part-0021.parquet +3 -0
  30. subset=Cultural/source=GermanPD/part-0022.parquet +3 -0
  31. subset=Cultural/source=GermanPD/part-0023.parquet +3 -0
  32. subset=Cultural/source=GermanPD/part-0024.parquet +3 -0
  33. subset=Cultural/source=GermanPD/part-0025.parquet +3 -0
  34. subset=Cultural/source=GermanPD/part-0026.parquet +3 -0
  35. subset=Cultural/source=GermanPD/part-0027.parquet +3 -0
  36. subset=Cultural/source=GermanPD/part-0028.parquet +3 -0
  37. subset=Cultural/source=GermanPD/part-0029.parquet +3 -0
  38. subset=Cultural/source=GermanPD/part-0030.parquet +3 -0
  39. subset=Cultural/source=GermanPD/part-0031.parquet +3 -0
  40. subset=Cultural/source=GermanPD/part-0032.parquet +3 -0
  41. subset=Cultural/source=GermanPD/part-0033.parquet +3 -0
  42. subset=Cultural/source=GermanPD/part-0034.parquet +3 -0
  43. subset=Cultural/source=GermanPD/part-0035.parquet +3 -0
  44. subset=Cultural/source=GermanPD/part-0036.parquet +3 -0
  45. subset=Cultural/source=GermanPD/part-0037.parquet +3 -0
  46. subset=Cultural/source=GermanPD/part-0038.parquet +3 -0
  47. subset=Cultural/source=GermanPD/part-0039.parquet +3 -0
  48. subset=Cultural/source=GermanPD/part-0040.parquet +3 -0
  49. subset=Cultural/source=GermanPD/part-0041.parquet +3 -0
  50. subset=Cultural/source=GermanPD/part-0042.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mds filter=lfs diff=lfs merge=lfs -text
13
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
+ *.model filter=lfs diff=lfs merge=lfs -text
15
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
16
+ *.npy filter=lfs diff=lfs merge=lfs -text
17
+ *.npz filter=lfs diff=lfs merge=lfs -text
18
+ *.onnx filter=lfs diff=lfs merge=lfs -text
19
+ *.ot filter=lfs diff=lfs merge=lfs -text
20
+ *.parquet filter=lfs diff=lfs merge=lfs -text
21
+ *.pb filter=lfs diff=lfs merge=lfs -text
22
+ *.pickle filter=lfs diff=lfs merge=lfs -text
23
+ *.pkl filter=lfs diff=lfs merge=lfs -text
24
+ *.pt filter=lfs diff=lfs merge=lfs -text
25
+ *.pth filter=lfs diff=lfs merge=lfs -text
26
+ *.rar filter=lfs diff=lfs merge=lfs -text
27
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
38
+ # Audio files - uncompressed
39
+ *.pcm filter=lfs diff=lfs merge=lfs -text
40
+ *.sam filter=lfs diff=lfs merge=lfs -text
41
+ *.raw filter=lfs diff=lfs merge=lfs -text
42
+ # Audio files - compressed
43
+ *.aac filter=lfs diff=lfs merge=lfs -text
44
+ *.flac filter=lfs diff=lfs merge=lfs -text
45
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
46
+ *.ogg filter=lfs diff=lfs merge=lfs -text
47
+ *.wav filter=lfs diff=lfs merge=lfs -text
48
+ # Image files - uncompressed
49
+ *.bmp filter=lfs diff=lfs merge=lfs -text
50
+ *.gif filter=lfs diff=lfs merge=lfs -text
51
+ *.png filter=lfs diff=lfs merge=lfs -text
52
+ *.tiff filter=lfs diff=lfs merge=lfs -text
53
+ # Image files - compressed
54
+ *.jpg filter=lfs diff=lfs merge=lfs -text
55
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
56
+ *.webp filter=lfs diff=lfs merge=lfs -text
57
+ # Video files - compressed
58
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
59
+ *.webm filter=lfs diff=lfs merge=lfs -text
DATASHEET.md ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Datasheet: German Commons
2
+
3
+ This is a datasheet compliant with the recommendations of [Gebru et al. (2018)](https://arxiv.org/abs/1803.09010v8), describing the properties of the **German Commons** dataset.
4
+
5
+ ## Motivation
6
+
7
+ ### Why was the dataset created?
8
+
9
+ German Commons addresses the critical gap in large-scale open German
10
+ text for language model training. Existing German corpora either lack
11
+ explicit licensing, contain web-scraped content of uncertain provenance,
12
+ or provide insufficient scale.
13
+
14
+ ### Has the dataset been used already?
15
+
16
+ This represents the initial release of German Commons. No external usage
17
+ has occurred prior to publication. Constituent dataset may have already been used prior.
18
+
19
+ ### What (other) tasks could the dataset be used for?
20
+
21
+ Beyond language model pretraining, German Commons supports all German
22
+ NLP research requiring clean, license-compliant text, multilingual model
23
+ development, or linguistic analysis of German text across domains. The
24
+ diverse domain coverage (legal, cultural, scientific, etc.) further
25
+ enables domain-specific model development and cross-domain evaluation
26
+ studies.
27
+
28
+ ### Who funded the creation of the dataset?
29
+
30
+ Dataset compilation was supported by German and European research
31
+ grants: German Federal Ministry of Research, Technology, and Space
32
+ (BMFTR) under Grants  `01IS24077A`,  `01IS24077B`, and  `01IS24077D`, by
33
+ the ScaDS.AI Center for Scalable Data Analytics and Artificial
34
+ Intelligence, funded by the BMFTR and by the Sächsische
35
+ Staatsministerium für Wissenschaft, Kultur und Tourismus under Grant
36
+  `ScaDS.AI`, and by the OpenWeb-Search.eu project, funded by the
37
+ European Union under Grant  `GA 101070014`. Constituent datasets
38
+ originate primarily from state-funded institutions across Germany and
39
+ Austria.
40
+
41
+ ## Dataset Composition
42
+
43
+ ### What are the instances?
44
+
45
+ Each instance represents a single German-language document with
46
+ associated metadata and licensing information.
47
+
48
+ ### How many instances are there in total?
49
+
50
+ The dataset contains 35,778,211 documents comprising 154,558,196,961
51
+ GPT-2 tokens.
52
+
53
+ ### What data does each instance consist of?
54
+
55
+ Each instance includes: a unique identifier for source
56
+ cross-referencing, source dataset name, quality-filtered and
57
+ paragraph-deduplicated raw text, canonical SPDX license URL, thematic
58
+ domain key, GPT-2 token count, a perplexity score calculated using a
59
+ KenLM model trained on German Wikipedia text, and a OCR quality score
60
+ calculated using [OCRoscope](https://github.com/Pleias/OCRoscope).
61
+
62
+ ### Is there a label or target associated with each instance?
63
+
64
+ No supervised labels exist. However, each instance contains metadata
65
+ labels for thematic domain classification, licensing information, and
66
+ document length statistics.
67
+
68
+ ### Is any information missing from individual instances?
69
+
70
+ Paragraph-level deduplication may alter texts from their original form.
71
+ Personally identifiable information has been systematically removed.
72
+
73
+ ### Does the dataset contain all possible instances or is it a sample (not necessarily random) of instances from a larger set?
74
+
75
+ The dataset represents a filtered subset of source collections.
76
+ Filtering removes OCR errors, extraction artifacts, and low-quality or
77
+ duplicated content, creating a curated selection.
78
+
79
+ ### Are there recommended data splits?
80
+
81
+ No predefined splits are provided. All data is intended for pretraining.
82
+
83
+ ### Are there any errors, sources of noise, or redundancies in the dataset?
84
+
85
+ Despite quality filtering and deduplication, residual issues may remain:
86
+ cross-corpus text duplicates from overlapping sources, and extraction
87
+ artifacts from OCR and PDF-to-text processing.
88
+
89
+ ### Is the dataset self-contained, or does it link to or otherwise rely on external resources?
90
+
91
+ The dataset is self-contained and centrally downloadable. The Source
92
+ dataset references provided enable reproducible reconstruction.
93
+
94
+ ## Collection Process
95
+
96
+ ### What mechanisms or procedures were used to collect the data?
97
+
98
+ Data collection employed multiple automated procedures: direct download
99
+ from institutional repositories and open platforms, programmatic
100
+ crawling via APIs where available, and automated text extraction from
101
+ PDF and other document formats using specialized libraries. Then, the
102
+ open source processing pipelines were applied for quality filtering and
103
+ deduplication all sources. Validation occurred through manual inspection
104
+ of sample outputs, cross-verification against source repositories, and
105
+ automated consistency checks.
106
+
107
+ ### How was the data associated with each instance acquired?
108
+
109
+ All text data represents directly observable content from original
110
+ sources; no inference or derivation occurred. Metadata (licensing,
111
+ thematic classification, source attribution) was extracted directly from
112
+ source repository information or explicitly provided by institutional
113
+ datasets. Where PDF extraction was required, raw text underwent
114
+ validation against source documents to verify accuracy.
115
+
116
+ ### If the dataset is a sample from a larger set, what was the sampling strategy?
117
+
118
+ Sampling was deterministic based on explicit criteria: German language
119
+ content as per automated classification explicit open licensing, quality
120
+ thresholds, and institutional source verification. No probabilistic
121
+ sampling occurred; all content meeting inclusion criteria was retained
122
+ after deduplication.
123
+
124
+ ### Who was involved in the data collection process and how were they compensated?
125
+
126
+ Data collection was conducted by the author team using automated
127
+ systems. No crowdworkers, contractors, or external annotators were
128
+ employed. All processing occurred through programmatic methods without
129
+ manual content creation or labeling requiring compensation.
130
+
131
+ ### Over what timeframe was the data collected? Does this timeframe match the creation timeframe of the data associated with the instances?
132
+
133
+ Collection occurred between January and August 2025, using source
134
+ dataset versions available through August 31st, 2025. The underlying
135
+ content creation spans multiple centuries, representing a temporal range
136
+ that significantly predates and extends beyond the collection timeframe.
137
+
138
+ ## Data Preprocessing
139
+
140
+ ### Was any preprocessing/cleaning/labeling of the data done?
141
+
142
+ Comprehensive preprocessing included: text extraction from PDFs and OCR
143
+ sources with encoding normalization, language detection and filtering
144
+ for German content, and quality filtering targeting digitization
145
+ artifacts and extraction errors, paragraph-level deduplication using
146
+ content hashing, systematic PII removal, format standardization across
147
+ all source types. Thematic domain classification was applied based on
148
+ source dataset.
149
+
150
+ ### Was the raw data saved in addition to the preprocessed/cleaned/labeled data?
151
+
152
+ Raw data is not provided since all constituent source datasets remain
153
+ publicly accessible through their original repositories.
154
+
155
+ ### Is the software used to preprocess/clean/label the instances available?
156
+
157
+ All preprocessing software is open source and available at
158
+ <https://github.com/coral-nlp/llmdata> , ensuring complete
159
+ reproducibility of the dataset.
160
+
161
+ ### Does this dataset collection/processing procedure achieve the motivation for creating the dataset stated in the first section of this datasheet?
162
+
163
+ Yes. The procedure successfully addresses the identified gap by:
164
+ providing the largest collection to-date of openly licensed German text,
165
+ enabling open German language model development without licensing
166
+ uncertainties, and establishing reproducible methodology for future
167
+ dataset construction. This directly fulfills the stated motivation of
168
+ creating license-compliant, large-scale German training data.
169
+
170
+ ### How will the dataset be distributed?
171
+
172
+ The dataset is distributed as Parquet files through multiple public
173
+ repositories for redundancy. Primary distribution occurs via Hugging
174
+ Face Hub at <https://huggingface.co/datasets/coral-nlp/german-commons>.
175
+
176
+ ### When will the dataset be released/first distributed? What license (if any) is it distributed under?
177
+
178
+ Public release occurred on 2025/10/14. Dataset metadata and compilation
179
+ are licensed under ODC-BY 1.0 (<https://opendatacommons.org/licenses/by/1-0/>). Individual document texts retain
180
+ their original licenses as specified in each instance's SPDX URL field,
181
+ creating a heterogeneous but fully documented licensing structure.
182
+
183
+ ### Are there any copyrights on the data?
184
+
185
+ Yes. Each document retains copyright under its original creator or
186
+ institutional provider, governed by the specific license indicated in
187
+ the instance metadata. The compilation itself does not claim additional
188
+ copyright over constituent texts.
189
+
190
+ ### Are there any fees or access/export restrictions?
191
+
192
+ The dataset is freely accessible without fees or registration
193
+ requirements. However, users must comply with individual document
194
+ licenses, which may include attribution requirements or share-alike
195
+ provisions. Commercial use is permitted by all constituent licenses.
196
+
197
+ ## Dataset Maintenance
198
+
199
+ ### Who is supporting/hosting/maintaining the dataset?
200
+
201
+ The dataset is maintained by the authors of this report.
202
+
203
+ ### Will the dataset be updated? If so, how often and by whom?
204
+
205
+ Updates may occur when significant new German open-source collections
206
+ become available. The original authors will coordinate updates, with
207
+ community contributions welcomed through the open-source pipeline.
208
+
209
+ ### How will updates be communicated?
210
+
211
+ Updates will be announced through: versioned releases on hosting
212
+ platforms with detailed changelogs, academic publication updates when
213
+ substantial changes occur.
214
+
215
+ ### If the dataset becomes obsolete how will this be communicated?
216
+
217
+ Obsolescence will be communicated through deprecation notices on all
218
+ hosting platforms.
219
+
220
+ ### Is there a repository to link to any/all papers/systems that use this dataset?
221
+
222
+ No centralized usage repository will be maintained. Usage tracking
223
+ occurs through standard academic citation of the dataset paper. Users
224
+ are encouraged to cite the dataset publication when reporting results or
225
+ building derivative works.
226
+
227
+ ### If others want to extend/augment/build on this dataset, is there a mechanism for them to do so?
228
+
229
+ The open-source `llmdata` pipeline enables community extensions through
230
+ standardized data ingestion protocols for new sources and automated
231
+ quality assessment and deduplication using established filtering
232
+ criteria. Community contributions undergo review by the maintenance
233
+ team.
234
+
235
+ ## Ethical Considerations
236
+
237
+ ### Were any ethical review processes conducted?
238
+
239
+ No formal institutional review board process was conducted. The dataset
240
+ relies exclusively on pre-existing, publicly available, and explicitly
241
+ licensed materials from established institutional sources. Data
242
+ processing incorporated ethical considerations including systematic PII
243
+ removal and exclusion of sources lacking clear licensing frameworks.
244
+
245
+ ### Does the dataset contain data that might be considered confidential?
246
+
247
+ No. All included content derives from explicitly open-licensed
248
+ institutional sources.
249
+
250
+ ### Does the dataset contain data that, if viewed directly, might be offensive, insulting, threatening, or might otherwise cause anxiety?
251
+
252
+ Potentially yes. The dataset spans centuries of German text documents,
253
+ which may include historical perspectives, political viewpoints, or
254
+ language that could be considered offensive by contemporary standards.
255
+ The scale and temporal range make comprehensive content moderation
256
+ infeasible. Users should exercise appropriate caution.
257
+
258
+ ### Does the dataset relate to people?
259
+
260
+ The dataset may contain publicly available information relating to
261
+ individuals in various contexts including historical documents,
262
+ biographical information, academic citations, and government records.
README.md ADDED
@@ -0,0 +1,479 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language_creators:
5
+ - found
6
+ language:
7
+ - de
8
+ license:
9
+ - odc-by
10
+ multilinguality:
11
+ - monolingual
12
+ size_categories:
13
+ - 100B<n
14
+ source_datasets: []
15
+ task_categories:
16
+ - text-generation
17
+ task_ids:
18
+ - language-modeling
19
+ pretty_name: German Commons
20
+ tags:
21
+ - german
22
+ - commons
23
+ - legal
24
+ - scientific
25
+ - cultural
26
+ - political
27
+ - web
28
+ - news
29
+ - economic
30
+ configs:
31
+ - config_name: default
32
+ data_files: subset=*/source=*/*.parquet
33
+ default: true
34
+ features:
35
+ - name: id
36
+ dtype: string
37
+ - name: source
38
+ dtype: string
39
+ - name: subset
40
+ dtype:
41
+ class_label:
42
+ names:
43
+ - Cultural
44
+ - Economic
45
+ - Legal
46
+ - News
47
+ - Political
48
+ - Scientific
49
+ - Web
50
+ - name: text
51
+ dtype: string
52
+ - name: license
53
+ dtype:
54
+ sequence: string
55
+ - name: num_tokens
56
+ dtype: int64
57
+ - name: perplexity
58
+ dtype: float64
59
+ - name: ocr_score
60
+ dtype: int64
61
+ splits:
62
+ - name: train
63
+ num_examples: 36773579
64
+ - config_name: cultural
65
+ data_files:
66
+ - split: blbooks
67
+ path: subset=Cultural/source=BLBooks/*.parquet
68
+ - split: dibilit
69
+ path: subset=Cultural/source=DiBiLit/*.parquet
70
+ - split: dibiphil
71
+ path: subset=Cultural/source=DiBiPhil/*.parquet
72
+ - split: germanpd
73
+ path: subset=Cultural/source=GermanPD/*.parquet
74
+ - split: mosel
75
+ path: subset=Cultural/source=MOSEL/*.parquet
76
+ - split: sbbfulltexts
77
+ path: subset=Cultural/source=SBB Fulltexts/*.parquet
78
+ - split: wikisource
79
+ path: subset=Cultural/source=Wikisource/*.parquet
80
+ - split: wikivoyage
81
+ path: subset=Cultural/source=Wikivoyage/*.parquet
82
+ - split: wikiquote
83
+ path: subset=Cultural/source=Wikiquote/*.parquet
84
+ features:
85
+ - name: id
86
+ dtype: string
87
+ - name: source
88
+ dtype: string
89
+ - name: subset
90
+ dtype:
91
+ class_label:
92
+ names:
93
+ - Cultural
94
+ - Economic
95
+ - Legal
96
+ - News
97
+ - Political
98
+ - Scientific
99
+ - Web
100
+ - name: text
101
+ dtype: string
102
+ - name: license
103
+ dtype:
104
+ sequence: string
105
+ - name: num_tokens
106
+ dtype: int64
107
+ - name: perplexity
108
+ dtype: float64
109
+ - name: ocr_score
110
+ dtype: int64
111
+ - config_name: economic
112
+ data_files:
113
+ - split: tedeutenders
114
+ path: subset=Economic/source=TEDEUTenders/*.parquet
115
+ features:
116
+ - name: id
117
+ dtype: string
118
+ - name: source
119
+ dtype: string
120
+ - name: subset
121
+ dtype:
122
+ class_label:
123
+ names:
124
+ - Cultural
125
+ - Economic
126
+ - Legal
127
+ - News
128
+ - Political
129
+ - Scientific
130
+ - Web
131
+ - name: text
132
+ dtype: string
133
+ - name: license
134
+ dtype:
135
+ sequence: string
136
+ - name: num_tokens
137
+ dtype: int64
138
+ - name: perplexity
139
+ dtype: float64
140
+ - name: ocr_score
141
+ dtype: int64
142
+ - config_name: legal
143
+ data_files:
144
+ - split: bverfgaes
145
+ path: subset=Legal/source=Amtliche Entscheidungssammlung des Bundesverfassungsgerichts/*.parquet
146
+ - split: bundesrecht
147
+ path: subset=Legal/source=Deutsches Bundesrecht/*.parquet
148
+ - split: bag
149
+ path: subset=Legal/source=Entscheidungen des Bundesarbeitsgerichts/*.parquet
150
+ - split: bfh
151
+ path: subset=Legal/source=Entscheidungen des Bundesfinanzhofs/*.parquet
152
+ - split: bgh
153
+ path: subset=Legal/source=Entscheidungen des Bundesgerichtshofs/*.parquet
154
+ - split: bgh20
155
+ path: subset=Legal/source=Entscheidungen des Bundesgerichtshofs in Strafsachen
156
+ aus dem 20. Jahrhundert/*.parquet
157
+ - split: bpatg
158
+ path: subset=Legal/source=Entscheidungen des Bundespatentgerichts/*.parquet
159
+ - split: bverfg
160
+ path: subset=Legal/source=Entscheidungen des Bundesverfassungsgerichts/*.parquet
161
+ - split: bverwg
162
+ path: subset=Legal/source=Entscheidungen des Bundesverwaltungsgerichts/*.parquet
163
+ - split: eurlex
164
+ path: subset=Legal/source=EurLEX/*.parquet
165
+ - split: openlegaldata
166
+ path: subset=Legal/source=Open Legal Data/*.parquet
167
+ features:
168
+ - name: id
169
+ dtype: string
170
+ - name: source
171
+ dtype: string
172
+ - name: subset
173
+ dtype:
174
+ class_label:
175
+ names:
176
+ - Cultural
177
+ - Economic
178
+ - Legal
179
+ - News
180
+ - Political
181
+ - Scientific
182
+ - Web
183
+ - name: text
184
+ dtype: string
185
+ - name: license
186
+ dtype:
187
+ sequence: string
188
+ - name: num_tokens
189
+ dtype: int64
190
+ - name: perplexity
191
+ dtype: float64
192
+ - name: ocr_score
193
+ dtype: int64
194
+ - config_name: news
195
+ data_files:
196
+ - split: anno
197
+ path: news_anno_*.parquet
198
+ - split: zeitungsportal
199
+ path: news_deutsches_zeitungsportal_*.parquet
200
+ - split: europeana
201
+ path: news_europeana_newspapers_*.parquet
202
+ - split: wikinews
203
+ path: news_wikinews_*.parquet
204
+ features:
205
+ - name: id
206
+ dtype: string
207
+ - name: source
208
+ dtype: string
209
+ - name: subset
210
+ dtype:
211
+ class_label:
212
+ names:
213
+ - Cultural
214
+ - Economic
215
+ - Legal
216
+ - News
217
+ - Political
218
+ - Scientific
219
+ - Web
220
+ - name: text
221
+ dtype: string
222
+ - name: license
223
+ dtype:
224
+ sequence: string
225
+ - name: num_tokens
226
+ dtype: int64
227
+ - name: perplexity
228
+ dtype: float64
229
+ - name: ocr_score
230
+ dtype: int64
231
+ - config_name: political
232
+ data_files:
233
+ - split: btdrucksachen
234
+ path: subset=Political/source=Drucksachen des Bundestages/*.parquet
235
+ - split: eurovoc
236
+ path: subset=Political/source=EuroVoc/*.parquet
237
+ - split: germanpoliticalspeeches
238
+ path: subset=Political/source=German Political Speeches/*.parquet
239
+ - split: btplenarprotokolle
240
+ path: subset=Political/source=Plenarprotokolle des Bundestages/*.parquet
241
+ - split: reichtagsprotokolle
242
+ path: subset=Political/source=Reichtagsprotokolle/*.parquet
243
+ features:
244
+ - name: id
245
+ dtype: string
246
+ - name: source
247
+ dtype: string
248
+ - name: subset
249
+ dtype:
250
+ class_label:
251
+ names:
252
+ - Cultural
253
+ - Economic
254
+ - Legal
255
+ - News
256
+ - Political
257
+ - Scientific
258
+ - Web
259
+ - name: text
260
+ dtype: string
261
+ - name: license
262
+ dtype:
263
+ sequence: string
264
+ - name: num_tokens
265
+ dtype: int64
266
+ - name: perplexity
267
+ dtype: float64
268
+ - name: ocr_score
269
+ dtype: int64
270
+ - config_name: scientific
271
+ data_files:
272
+ - split: arxiv
273
+ path: subset=Scientific/source=arXiv/*.parquet
274
+ - split: doab
275
+ path: subset=Scientific/source=Directory of Open Access Books/*.parquet
276
+ - split: polyjournal
277
+ path: subset=Scientific/source=Polytechnisches Journal/*.parquet
278
+ - split: wikibooks
279
+ path: subset=Scientific/source=Wikibooks/*.parquet
280
+ - split: openalex
281
+ path: subset=Scientific/source=OpenAlex/*.parquet
282
+ - split: wikiversity
283
+ path: subset=Scientific/source=Wikiversity/*.parquet
284
+ features:
285
+ - name: id
286
+ dtype: string
287
+ - name: source
288
+ dtype: string
289
+ - name: subset
290
+ dtype:
291
+ class_label:
292
+ names:
293
+ - Cultural
294
+ - Economic
295
+ - Legal
296
+ - News
297
+ - Political
298
+ - Scientific
299
+ - Web
300
+ - name: text
301
+ dtype: string
302
+ - name: license
303
+ dtype:
304
+ sequence: string
305
+ - name: num_tokens
306
+ dtype: int64
307
+ - name: perplexity
308
+ dtype: float64
309
+ - name: ocr_score
310
+ dtype: int64
311
+ - config_name: web
312
+ data_files:
313
+ - split: onemillionposts
314
+ path: subset=Web/source=One Million Posts/*.parquet
315
+ - split: thestack
316
+ path: subset=Web/source=The Stack/*.parquet
317
+ - split: wikidiscussions
318
+ path: subset=Web/source=Wiki Discussions/*.parquet
319
+ - split: wikipedia
320
+ path: subset=Web/source=Wikipedia/*.parquet
321
+ - split: youtubecommons
322
+ path: subset=Web/source=Youtube Commons/*.parquet
323
+ features:
324
+ - name: id
325
+ dtype: string
326
+ - name: source
327
+ dtype: string
328
+ - name: subset
329
+ dtype:
330
+ class_label:
331
+ names:
332
+ - Cultural
333
+ - Economic
334
+ - Legal
335
+ - News
336
+ - Political
337
+ - Scientific
338
+ - Web
339
+ - name: text
340
+ dtype: string
341
+ - name: license
342
+ dtype:
343
+ sequence: string
344
+ - name: num_tokens
345
+ dtype: int64
346
+ - name: perplexity
347
+ dtype: float64
348
+ - name: ocr_score
349
+ dtype: int64
350
+ ---
351
+
352
+ # German Commons - 154 Billion Tokens of Openly Licensed Text for German Language Models
353
+
354
+ A comprehensive collection of German-language text data under open licenses for training German language models.
355
+
356
+ - **Datasheet**: [DATASHEET.md](DATASHEET.md).
357
+ - **Paper**: [arxiv.org/abs/2510.13996](https://arxiv.org/abs/2510.13996)
358
+ - **Code**: [github.com/coral-nlp/llmdata](https://github.com/coral-nlp/llmdata)
359
+ - **Bloom Filter** (DOLMA-compatible): [bloom_filter.bin](bloom_filter.bin)
360
+
361
+ ## Dataset Description
362
+
363
+ This dataset is aggregated from **41 diverse sources** and contains **154.56 billion tokens** of German text data with **35.78 million documents** spanning **7 thematic domains**:
364
+
365
+ - 🌐 **Web Commons**: 19.89B tokens source from Wiki projects, online discussions, code repositories, social media posts, YouTube transcripts
366
+ - 💬 **Political Commons**: 3.57B tokens sourced from parliamentary documents, speeches, protocols, political vocabulary
367
+ - ⚖️ **Legal Commons**: 2.99B tokens sourced from court decisions, federal law, legal databases, EU legal documents
368
+ - 📰 **News Commons**: 72.67B tokens sourced from historical and current newspapers archives
369
+ - 🏦 **Economics Commons**: 0.11B tokens sourced from EU public tenders
370
+ - 📚 **Cultural Commons**: 54.49B tokens sourced from cultural heritage collections
371
+ - 🔬 **Scientific Commons**: 0.84B tokens sourced from scholarly papers, books, and technical journals
372
+
373
+ ## Dataset Features
374
+
375
+ Each record contains the following fields:
376
+
377
+ - **id**: Unique identifier string, as per each documents' source dataset
378
+ - **source**: Source dataset name
379
+ - **subset**: Thematic subset (Cultural, Legal, Political, Scientific, News, Web, Economic)
380
+ - **text**: Main text content; deduplicated, quality filtered, with consistent formatting and encoding. Can be split at newlines to obtain paragraph text.
381
+ - **license**: List of applicable licenses for each document, given as canonical SPDX license URL.
382
+ - **num_tokens**: GPT-2 token count
383
+ - **perplexity**: Text perplexity measured with a KenLM model trained on German Wikipedia text
384
+ - **ocr_score**: OCR quality score measured using [OCRoscope](https://github.com/Pleias/OCRoscope)
385
+
386
+ ## Dataset Usage
387
+
388
+ - Load the entire dataset
389
+
390
+ ```python
391
+ from datasets import load_dataset
392
+
393
+ ds = load_dataset("coral-nlp/german-commons")
394
+ ```
395
+
396
+ - Load a thematic subset
397
+
398
+ ```python
399
+ ds = load_dataset("coral-nlp/german-commons", "cultural")
400
+ ```
401
+
402
+ - Load individual source datasets
403
+
404
+ ```python
405
+ wikipedia = load_dataset("coral-nlp/german-commons", "web", split="wikipedia")
406
+ ```
407
+
408
+ Supported splits and constituent datasets are:
409
+
410
+ | Subset | Split Key | Dataset Name | Docs | Tokens | License | Text Type | Source |
411
+ |:-------------|:----------------------------|:------------------------------------------------------|----------:|---------------:|:-------------|:---------------------------|:----------------------------------------------------------------- |
412
+ | `web` | `wikipedia` | Wikipedia | 2,930,224 | 2,948,751,608 | CC-BY-SA-4.0 | Various | [🔗](https://zenodo.org/records/14748605) |
413
+ | `web` | `wikidiscussions` | Wikipedia Discussions | 8,349,076 | 1,218,210,917 | CC-BY-SA-4.0 | Online Discussions | [🔗](https://corpora.ids-mannheim.de/pub/wikipedia-deutsch/2024/) |
414
+ | `web` | `youtubecommons` | YouTube Commons | 2,809,714 | 14,478,850,964 | Various | Video Subtitles | [🔗](https://huggingface.co/datasets/PleIAs/YouTube-Commons) |
415
+ | `web` | `onemillionposts` | One Million Posts Corpus | 946,082 | 94,872,633 | CC-BY-4.0 | Online Discussions | [🔗](https://ofai.github.io/million-post-corpus/) |
416
+ | `web` | `thestack` | The Stack (Markdown and TXT Subsets) | 421,466 | 1,105,173,228 | Various | Various | [🔗](https://huggingface.co/datasets/bigcode/the-stack-dedup) |
417
+ | `political` | `reichtagsprotokolle` | Reichtagsprotokolle | 522 | 703,495,637 | CC-BY-SA-4.0 | Parliamentary Protocols | [🔗](https://zenodo.org/records/10225467) |
418
+ | `political` | `germanpoliticalspeeches` | German Political Speeches | 6,678 | 29,409,655 | CC-BY-4.0 | Speech Transcripts | [🔗](https://zenodo.org/records/3611246) |
419
+ | `political` | `btdrucksachen` | Corpus der Drucksachen des Deutschen Bundestages | 3,017 | 528,769,669 | CC0-1.0 | Parliamentary Publications | [🔗](https://zenodo.org/records/4643066) |
420
+ | `political` | `btplenarprotokolle` | Corpus der Plenarprotokolle des Deutschen Bundestages | 1,833 | 316,034,708 | CC0-1.0 | Parliamentary Protocols | [🔗](https://zenodo.org/records/4542662) |
421
+ | `political` | `eurovoc` | EuroVoc | 245,838 | 1,988,111,462 | EUPL | Parliamentary Publications | [🔗](https://huggingface.co/datasets/EuropeanParliament/Eurovoc) |
422
+ | `legal` | `bundesrecht` | Corpus des Deutschen Bundesrechts | 3,217 | 1,004,294 | CC0-1.0 | German Federal Laws | [🔗](https://zenodo.org/records/14592346) |
423
+ | `legal` | `openlegaldata` | OpenLegalData | 249,909 | 1,915,956,613 | CC0-1.0 | Court Decisions | [🔗](https://huggingface.co/datasets/schneiderai/openlegaldata) |
424
+ | `legal` | `bfh` | Corpus der Entscheidungen des BFH | 10,885 | 67,791,931 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/14622341) |
425
+ | `legal` | `bgh20` | Entscheidungen des BGH in Strafsachen des 20. Jhd. | 36,062 | 92,873,390 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/4540377) |
426
+ | `legal` | `bgh` | Corpus der Entscheidungen des BGH | 77,258 | 292,832,709 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/12814022) |
427
+ | `legal` | `bverfg` | Corpus der Entscheidungen des BVerfG | 8,028 | 39,503,223 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/12705674) |
428
+ | `legal` | `bpatg` | Corpus der Entscheidungen des BpatG | 30,705 | 185,099,188 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/10849977) |
429
+ | `legal` | `bverwg` | Corpus der Entscheidungen des BVerwG | 27,185 | 123,487,739 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/10809039) |
430
+ | `legal` | `bverfgaes` | Corpus der amtl. Entscheidungssammlung des BVerfG | 919 | 24,427,294 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/10783177) |
431
+ | `legal` | `bag` | Corpus der Entscheidungen des BAG | 5,624 | 48,248,111 | CC0-1.0 | Court Decisions | [🔗](https://zenodo.org/records/4006645) |
432
+ | `legal` | `eurlex` | EurLEX | 64,934 | 201,263,562 | CC-BY-4.0 | European Union Laws | [🔗](https://zenodo.org/record/5363165/) |
433
+ | `news` | `zeitungsportal` | Deutsches Zeitungsportal | 8,076,164 | 43,871,094,547 | CC0-1.0 | News Articles | [🔗](https://www.deutsche-digitale-bibliothek.de/newspaper) |
434
+ | `news` | `europeana` | Europeana Newspapers | 3,256,341 | 20,684,418,365 | CC0-1.0 | News Articles | [🔗](https://huggingface.co/datasets/biglam/europeana_newspapers) |
435
+ | `news` | `anno` | ANNO | 1,910,281 | 8,103,825,248 | CC0-1.0 | News Articles | [🔗](https://labs.onb.ac.at/en/datasets/anno/) |
436
+ | `news` | `wikinews` | WikiNews | 23,266 | 14,222,520 | CC-BY-4.0 | News Articles | [🔗](https://de.wikinews.org) |
437
+ | `economic` | `tedeutenders` | TEDEUTenders | 57,214 | 110,611,112 | CC0-1.0 | Procurement Notices | [🔗](https://huggingface.co/datasets/PleIAs/TEDEUTenders) |
438
+ | `cultural` | `dibilit` | DiBiLit-Korpus | 2,062 | 216,391,448 | CC-BY-SA-4.0 | Literature | [🔗](https://zenodo.org/records/5786725) |
439
+ | `cultural` | `dibiphil` | DiBiPhil-Korpus | 269 | 32,151,997 | CC-BY-SA-4.0 | Literature | [🔗](https://github.com/deutschestextarchiv/DiBiPhil) |
440
+ | `cultural` | `wikisource` | Wikisource | 240,689 | 347,770,430 | CC-BY-SA-4.0 | Various | [🔗](https://dumps.wikimedia.org/dewikisource/20250801/) |
441
+ | `cultural` | `wikivoyage` | Wikivoyage | 20,370 | 42,025,478 | CC-BY-SA-4.0 | Travel | [🔗](https://zenodo.org/records/14748553) |
442
+ | `cultural` | `germanpd` | German-PD | 123,592 | 49,333,198,231 | CC0-1.0 | Literature | [🔗](https://huggingface.co/datasets/PleIAs/German-PD) |
443
+ | `cultural` | `blbooks` | BLBooks | 3,714 | 1,012,047,216 | CC0-1.0 | Literature | [🔗](https://huggingface.co/datasets/biglam/blbooks-parquet) |
444
+ | `cultural` | `mosel` | MOSEL | 3,127,203 | 3,181,917,752 | CC-BY-4.0 | Speech Transcripts | [🔗](https://huggingface.co/datasets/FBK-MT/mosel) |
445
+ | `cultural` | `sbbfulltexts` | SBB Fulltexts | 2,605,569 | 358,514,283 | CC-BY-4.0 | Literature | [🔗](https://zenodo.org/records/7716098) |
446
+ | `cultural` | `wikiquote` | Wikiquote | 8,612 | 6,688,458 | CC-BY-4.0 | Quotes & Proverbs | [🔗](https://de.wikiquote.org) |
447
+ | `scientific` | `wikibooks` | Wikibooks | 346 | 180,257,799 | CC-BY-SA-4.0 | Educational Books | [🔗](https://zenodo.org/records/14748586) |
448
+ | `scientific` | `polyjournal` | Digitalisierung des Polytechnischen Journals | 27,292 | 50,434,996 | CC-BY-SA-4.0 | Scholarly Papers | [🔗](https://github.com/deutschestextarchiv/dingler) |
449
+ | `scientific` | `doab` | Directory of Open Access Books | 1,939 | 166,920,321 | Various | Scholarly Books | [🔗](https://www.doabooks.org) |
450
+ | `scientific` | `arxiv` | arXiv | 8 | 103,478 | Various | Scholarly Papers | [🔗](https://www.kaggle.com/datasets/Cornell-University/arxiv) |
451
+ | `scientific` | `openalex` | OpenAlex | 47,733 | 413,632,648 | Various | Educational Content | [🔗](https://de.wikiversity.org) |
452
+ | `scientific` | `wikiversity` | Wikiversity | 16,371 | 27,802,099 | CC-BY-SA-4.0 | Scholarly Papers | [🔗](https://openalex.org) |
453
+
454
+ ## Citation
455
+
456
+ If you use this dataset, please cite the correponding paper:
457
+
458
+ ```bibtex
459
+ @article{gienapp:2025d,
460
+ title = {{The German Commons -- 154 Billion Tokens of Openly Licensed Text for German Language Models}},
461
+ author = {Lukas Gienapp and
462
+ Christopher Schr\"oder and
463
+ Stefan Schweter and
464
+ Christopher Akiki and
465
+ Ferdinand Schlatt and
466
+ Arden Zimmermann and
467
+ Phillipe Gen\^et and
468
+ Martin Potthast},
469
+ year = 2025,
470
+ month = oct,
471
+ journal = {CoRR},
472
+ volume = {abs/2510.13996},
473
+ url = {https://arxiv.org/abs/2510.13996}
474
+ }
475
+ ```
476
+
477
+ ## License
478
+
479
+ This dataset aggregation and metadata is released under ODC-BY license. Individual documents have their own specific licenses - please check the `license` field for each record.
bloom_filter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57c1b680ffef03de26ec1c2727bad141e24ad5d2904795cb1beae330831537cb
3
+ size 4294968084
subset=Cultural/source=BLBooks/part-0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23031935983f04f48bd66e6537357311141f99bf1ca711ed9025f8da1acd2bb6
3
+ size 1354078008
subset=Cultural/source=BLBooks/part-0002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b427832d12504843698333889d4851b6cb5293e4addaf1c08f92757884187cd1
3
+ size 308444014
subset=Cultural/source=DiBiLit/part-0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85ba3cb6d2b526c1e1c5d6b59a6e7c9a75be3039ab26e65b1c3d224fb9f6c32f
3
+ size 342802369
subset=Cultural/source=DiBiPhil/part-0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c32e2a51ec0721b2aac00f869101dae43666d32af268594fb03139ba71413033
3
+ size 47453198
subset=Cultural/source=GermanPD/part-0001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:193e7695fb8076af9ec29778f4a4c6d662abe9aaa5476608571d425b4b783f70
3
+ size 676504492
subset=Cultural/source=GermanPD/part-0002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d1d01d126c2d3910761f8d49a0e3a8a4c6b89f186e78b5d887b4eba536e8e97
3
+ size 1443734821
subset=Cultural/source=GermanPD/part-0003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7359a6e07e11cb0eb0e5c054143dc70fa23f3ce4d05e25e309045c2c4e37134b
3
+ size 1439412386
subset=Cultural/source=GermanPD/part-0004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a10407853185fe74f0a1e26717d05c1f66b9542c37e1aea40adf7b920819f71e
3
+ size 1443477926
subset=Cultural/source=GermanPD/part-0005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1c12f66f221e01b9582e6aee32e8a54d9976ee4013c1ff6ba907ef29b52b385
3
+ size 1438692783
subset=Cultural/source=GermanPD/part-0006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4bb67e8d04da35a266940612c4050bf76f2847ea099325fbd6beccda9dd49c6
3
+ size 1431459189
subset=Cultural/source=GermanPD/part-0007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1270af8a68f44c5f8c61f618b7bf52cb2df86c44a47d2b30ad18978aed4adf9
3
+ size 1434871205
subset=Cultural/source=GermanPD/part-0008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87cdc2d5a4c9bdbde704040c1530462490fd67cebeb24659767c283e9808c152
3
+ size 1418445890
subset=Cultural/source=GermanPD/part-0009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69c927dd9e166280c86e16aa5b56289743a84795360c22afdd48800079b7cf82
3
+ size 1436818312
subset=Cultural/source=GermanPD/part-0010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41cd63f6c21bd79fdf569e515d708ce02e5b30c6891512346ea03b0e4859b835
3
+ size 1455947561
subset=Cultural/source=GermanPD/part-0011.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1ce8552504260a393fba2e95ec11af17e694dc7ddcdd4cbf10bc45723bae0ff
3
+ size 1454247208
subset=Cultural/source=GermanPD/part-0012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a855d7b938afc0595422bcd78c003a27831ffab1ed4cad4ed5c7912dc6cf73dc
3
+ size 1438373053
subset=Cultural/source=GermanPD/part-0013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9614923eb917dfec8108fea2cf8d3600c2c4418b377aa977f056d21b354690d
3
+ size 1439958630
subset=Cultural/source=GermanPD/part-0014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8257a9eb49cd640b3a8980c83caa79c43a3ccaf835a41d1be182b4ac6a573878
3
+ size 1452069614
subset=Cultural/source=GermanPD/part-0015.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:043585507fd18e08c07f8759d133d5ee19c541dbe7eff4de1fffc0dce6264e42
3
+ size 1446016474
subset=Cultural/source=GermanPD/part-0016.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2438ac580fcfe5e60fe4bf9a8e0a4e30fec9b1b485a4684504196361716b02e7
3
+ size 1432128126
subset=Cultural/source=GermanPD/part-0017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d5acd8226e5a1c637d6d78bd38636683b135fe45b0996326fa22c410b67f780
3
+ size 1426189384
subset=Cultural/source=GermanPD/part-0018.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:112709779b82c2ec0342019f2edbb642fccfb1ff24e9c2db8c059e88b0e5e785
3
+ size 1440983089
subset=Cultural/source=GermanPD/part-0019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af7f77e2399d49692bfeee2a776b7884be9ee867dca15b8f8131b2f5d7024ecb
3
+ size 933083353
subset=Cultural/source=GermanPD/part-0020.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:686240f2053cf799a5659d19037ed65f8dc96ec2805445d7769d2e5ba60085c1
3
+ size 1438203248
subset=Cultural/source=GermanPD/part-0021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7216e6b659100be253e376714683acdd4ef17a49e354e802ea3f2d9264c10926
3
+ size 1455350364
subset=Cultural/source=GermanPD/part-0022.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aefb01e9deb5df7c902a81146c9afe79619a493aa41910dddd5006dc07472b97
3
+ size 1458100415
subset=Cultural/source=GermanPD/part-0023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c33100130b1eb952d2424bbab511a97729f54da80e49329931a1345112cbe31c
3
+ size 1446591112
subset=Cultural/source=GermanPD/part-0024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f8d056cce322f53aa475317dd97edac784e506eba6b9fcab0d403d54ecb193
3
+ size 1429167089
subset=Cultural/source=GermanPD/part-0025.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae24efdb65c05e38302852ecf247881a995caa5409de0dec1a677497a3ed9467
3
+ size 1428744082
subset=Cultural/source=GermanPD/part-0026.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00c32c1113b3164301112e776b75c08f84f54dc6d2185ce00ae0bfd5a2499086
3
+ size 1447087708
subset=Cultural/source=GermanPD/part-0027.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674fcfd67947ed8c812e80b5f20f65fa541fb8b2952190d7b2505796e0fb182c
3
+ size 1433796706
subset=Cultural/source=GermanPD/part-0028.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16846c5948689bc5bfc091e33aec1c7582b88dc8a88af400adc2143f43195787
3
+ size 1443763802
subset=Cultural/source=GermanPD/part-0029.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:053bcb1f87f727651aaf3f1c4cde8632270585ad722cdace0b73bd6858153847
3
+ size 1442798042
subset=Cultural/source=GermanPD/part-0030.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60b49e6cc47d708425f0017ae59a648588c5e531b3524dda41ad246f1fb2699
3
+ size 1430323209
subset=Cultural/source=GermanPD/part-0031.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:814690adf5f2f6d15707c72330dd1cd9e9738802e23f7e5a97d844488859f767
3
+ size 1450730905
subset=Cultural/source=GermanPD/part-0032.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b967ac7820aed56c01ecdddbd63a7dce36091ab20e51dbc144dc8ed0881db50
3
+ size 1456149536
subset=Cultural/source=GermanPD/part-0033.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3cb7d35a4f6e67cfe54f3a5764742d99e5a10d4ac1c377cd54490bbb8643b0d
3
+ size 1437512772
subset=Cultural/source=GermanPD/part-0034.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05cec5af5e7745943425e924d3731b45ecfe00f97e38c32a3acdbf657bea4b4b
3
+ size 1441439359
subset=Cultural/source=GermanPD/part-0035.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3bed83330cc1a0eef822a5f647f9da556bae58f66509bc35f1737a9d69b0417
3
+ size 1449255467
subset=Cultural/source=GermanPD/part-0036.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34748d66f09435ce0554d7f97ebbe533a96bb9bb2e3617f39b48a5d82eedfc25
3
+ size 1448007954
subset=Cultural/source=GermanPD/part-0037.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22d8dd522ce49049fc33c19052ca79309b8aaad229b863fc68cc48c78c7a75b8
3
+ size 1439027562
subset=Cultural/source=GermanPD/part-0038.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9beda007237a1b46b38cc46e9ed47fb0d3afe6dda89c6bed9202aa4dc341fc44
3
+ size 1444127810
subset=Cultural/source=GermanPD/part-0039.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39283c70e69b4d169549fbee97cb261493497579c0bd70644384dad3f65c350b
3
+ size 1440732996
subset=Cultural/source=GermanPD/part-0040.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea4dc06ed7dc66db95f8208f7fe4b7d902a9f2fdce684afaa481a3dde7bed4d2
3
+ size 1445673466
subset=Cultural/source=GermanPD/part-0041.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2726201365021930c57074993b06bd17e9d19f805ab1f66faee1745b70bb76e1
3
+ size 1438043290
subset=Cultural/source=GermanPD/part-0042.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:982a26f03b3391343bf579b5ac734dd78d5348940ef1398875e905c971c01bc1
3
+ size 1450981707