Range reference handling, filling the range
Browse files- pmc_open_access_xml.py +73 -33
pmc_open_access_xml.py
CHANGED
|
@@ -84,10 +84,10 @@ def clean_raw(xml_text):
|
|
| 84 |
"""
|
| 85 |
#Some XML can't be parsed because they are not starting with the DOCTYPE declaration
|
| 86 |
# Could be disabled if we handle the parsing error (TBD, how many files would be trashed)
|
| 87 |
-
|
| 88 |
begin_doc = begin_doc_rgx.search(xml_text)
|
| 89 |
xml_text = xml_text[begin_doc.start():]
|
| 90 |
-
|
| 91 |
#Some XML are poisoned with consecutive tabs and new lines
|
| 92 |
# xml_text = re.sub('\s+',' ',xml_text) # Commented because <code> requires those spacing
|
| 93 |
return xml_text
|
|
@@ -116,7 +116,7 @@ def get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys):
|
|
| 116 |
reference_d[k] = (v, " ##REF## ", "pmid_ref")
|
| 117 |
for i, k in enumerate(refs_nonpmid_keys):
|
| 118 |
reference_d[k] = (i, " ##UREF## ", "unknown_pub_ref")
|
| 119 |
-
|
| 120 |
refs_key_l = []
|
| 121 |
for el in ref_el_l:
|
| 122 |
keyword, ref_name = TAG_DIC[el.tag]
|
|
@@ -137,20 +137,64 @@ def parseout_el_refs(el, rids):
|
|
| 137 |
Extract then from the text all the references founds to the rids dictionnary,
|
| 138 |
and replace them by keywords of the corresponding family (eg " ##FIG## " for a figure,
|
| 139 |
" ##TAB## " for a table, or " ##MATHS## " for mathematical formulas)
|
| 140 |
-
|
|
|
|
|
|
|
| 141 |
Returns the parsed text, the identifiers for the references and the references text that
|
| 142 |
were replaced by the keywords. (eg, "Figure 2" was a hypertext reference and got replaced by " ##FIG## ")
|
| 143 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
res_rid = defaultdict(list)
|
| 145 |
res_reftext = defaultdict(list)
|
|
|
|
|
|
|
| 146 |
for xref in el.xpath(".//xref[not(ancestor::xref)]"): #Ignore innermost of imbricated references
|
|
|
|
|
|
|
| 147 |
rid = xref.get("rid")
|
| 148 |
if rid in rids.keys():
|
| 149 |
ref_idx, ref_kword, ref_class = rids[rid]
|
| 150 |
res_rid[ref_class].append(ref_idx)
|
| 151 |
-
res_reftext[ref_class].append(
|
| 152 |
-
|
| 153 |
tail = xref.tail if xref.tail else ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
prev_el = xref.getprevious()
|
| 155 |
if prev_el is None:
|
| 156 |
parent.text = "".join([(parent.text if parent.text else ""), ref_kword, tail])
|
|
@@ -162,12 +206,12 @@ def parseout_el_refs(el, rids):
|
|
| 162 |
#Removing the xml namespace, (otherwise they would be everywhere)
|
| 163 |
tag_start = text.find(">")+1
|
| 164 |
tag_txt = text[:tag_start]
|
| 165 |
-
|
| 166 |
for k, v in el.nsmap.items():
|
| 167 |
tag_txt = tag_txt.replace(f' xmlns:{k}="{v}"', "", 1)
|
| 168 |
|
| 169 |
text = "".join([tag_txt, text[tag_start:]])
|
| 170 |
-
|
| 171 |
return text, res_rid, res_reftext
|
| 172 |
|
| 173 |
|
|
@@ -207,7 +251,6 @@ def get_references(article_tree):
|
|
| 207 |
citation_d[el.tag].append(el.text)
|
| 208 |
references_nonpmid.append(dict(citation_d))
|
| 209 |
references_nonpmid_keys.append(ref_key)
|
| 210 |
-
|
| 211 |
return references_pmid, references_nonpmid, references_nonpmid_keys
|
| 212 |
|
| 213 |
def construct_datadict(article_tree):
|
|
@@ -220,26 +263,25 @@ def construct_datadict(article_tree):
|
|
| 220 |
- Titles are used to identify ["introduction", "methods", "results" and "discussion"]
|
| 221 |
- The path are then used to group paragraphs and titles into corresponding content.
|
| 222 |
- Remaining p and title are put in three other section: front, body, back
|
| 223 |
-
|
| 224 |
Returns:
|
| 225 |
- content_d: Dictionnary with the content result
|
| 226 |
- reference_d: The references of each kind (figure, table, ...) for each content type (intro, figure caption, ...)
|
| 227 |
- reference_text_d: The replaced text by the keywords of the references, with keys matching reference_d.
|
| 228 |
- reference_count: The count of unique external-document references.
|
| 229 |
-
|
| 230 |
Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/
|
| 231 |
"""
|
| 232 |
-
|
| 233 |
res_content_d, res_reference_d, res_reference_text_d = {}, defaultdict(dict), defaultdict(dict)
|
| 234 |
-
|
| 235 |
refs_pmid, refs_nonpmid, refs_nonpmid_keys = get_references(article_tree)
|
| 236 |
reference_count = len(refs_pmid)+len(refs_nonpmid)
|
| 237 |
-
|
| 238 |
res_content_d["unknown_pub"] = json.dumps(refs_nonpmid)
|
| 239 |
refs_el = article_tree.find(".//ref-list")
|
| 240 |
if refs_el is not None:
|
| 241 |
refs_el.getparent().remove(refs_el)
|
| 242 |
-
|
| 243 |
# Extracts the glossary if exists, and removes it from the tree
|
| 244 |
glossary = {}
|
| 245 |
def search_def(el):
|
|
@@ -251,7 +293,7 @@ def construct_datadict(article_tree):
|
|
| 251 |
definition = item.find(".//def")
|
| 252 |
definition = "".join(definition.itertext()) if definition is not None else ""
|
| 253 |
glossary[k] = definition
|
| 254 |
-
|
| 255 |
for el in article_tree.findall(".//glossary"):
|
| 256 |
search_def(el)
|
| 257 |
el.getparent().remove(el)
|
|
@@ -259,7 +301,7 @@ def construct_datadict(article_tree):
|
|
| 259 |
search_def(el) #There may be still more def-list outside of a glossary
|
| 260 |
el.getparent().remove(el)
|
| 261 |
res_content_d["glossary"] = glossary
|
| 262 |
-
|
| 263 |
# After testing, no question were found in the dataset, so I commented that part
|
| 264 |
# question_l = []
|
| 265 |
# for el in article_tree.xpath(".//question-preamble|.//question|.//answer|.//explanation"):
|
|
@@ -268,7 +310,7 @@ def construct_datadict(article_tree):
|
|
| 268 |
# res_content_d["question"] = "\n".join(question_l)
|
| 269 |
# for el in article_tree.xpath(".//question-wrap-group|.//question-wrap|.//answer-set|.//explanation"):
|
| 270 |
# el.getparent().remove(el)
|
| 271 |
-
|
| 272 |
# One big query is faster than multiple small ones
|
| 273 |
ref_el_l = article_tree.xpath(".//fig|.//table-wrap|.//array|.//supplementary-material\
|
| 274 |
|.//inline-supplementary-material|.//disp-formula\
|
|
@@ -292,7 +334,7 @@ def construct_datadict(article_tree):
|
|
| 292 |
repl_xref.tail = el.tail
|
| 293 |
el.addprevious(repl_xref)
|
| 294 |
el.getparent().remove(el)
|
| 295 |
-
|
| 296 |
# Finally, the discovered references and text are added to the result
|
| 297 |
for ref_k in REFS_KEYS[2:]: #Slicing from 2, to not add pmid and unknown ref here
|
| 298 |
res_content_d[ref_k[:-4]] = text_l_d[ref_k]#"\n".join(text_l_d[ref_k])
|
|
@@ -312,7 +354,7 @@ def construct_datadict(article_tree):
|
|
| 312 |
res_reference_d[part][ref_k] = list(chain(*tmp_l))
|
| 313 |
tmp_l = [refs_d[ref_k] for refs_d in ref_texts_l]
|
| 314 |
res_reference_text_d[part][ref_k] = list(chain(*tmp_l))
|
| 315 |
-
|
| 316 |
path_l, text_l, refs_l, refs_text_l = [], [], [], []
|
| 317 |
t_paths, t_texts_lowcase = [], []
|
| 318 |
for part in ["front", "body", "back"]: #Iterate parts and insert first front and back
|
|
@@ -374,7 +416,7 @@ def construct_datadict(article_tree):
|
|
| 374 |
|
| 375 |
res_reference_d = dict(res_reference_d)
|
| 376 |
res_reference_text_d = dict(res_reference_text_d)
|
| 377 |
-
|
| 378 |
return (res_content_d, res_reference_d, res_reference_text_d, reference_count)
|
| 379 |
|
| 380 |
class OpenAccessXMLConfig(datasets.BuilderConfig):
|
|
@@ -408,7 +450,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 408 |
{
|
| 409 |
"accession_id": datasets.Value("string"),
|
| 410 |
"pmid": datasets.Value("string"),
|
| 411 |
-
|
| 412 |
"introduction": datasets.features.Sequence(datasets.Value("string")),
|
| 413 |
"methods": datasets.features.Sequence(datasets.Value("string")),
|
| 414 |
"results": datasets.features.Sequence(datasets.Value("string")),
|
|
@@ -430,7 +472,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 430 |
"footnote": datasets.features.Sequence(datasets.Value("string")),
|
| 431 |
"graphic": datasets.features.Sequence(datasets.Value("string")),
|
| 432 |
"media": datasets.features.Sequence(datasets.Value("string")),
|
| 433 |
-
|
| 434 |
"unknown_pub": datasets.Value("string"),
|
| 435 |
# "question": datasets.Value("string"),
|
| 436 |
"glossary": datasets.features.Sequence(
|
|
@@ -475,9 +517,9 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 475 |
"incremental_file_lists": [],
|
| 476 |
"incremental_archives": []
|
| 477 |
}
|
| 478 |
-
|
| 479 |
baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv")
|
| 480 |
-
|
| 481 |
baseline_file_lists = []
|
| 482 |
baseline_archives = []
|
| 483 |
for subset in self.config.subsets:
|
|
@@ -494,12 +536,12 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 494 |
baseline_archive = dl_manager.download(baseline_archive_url)
|
| 495 |
except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 496 |
continue
|
| 497 |
-
|
| 498 |
baseline_file_lists.append(baseline_file_list)
|
| 499 |
baseline_archives.append(baseline_archive)
|
| 500 |
|
| 501 |
baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
|
| 502 |
-
|
| 503 |
# Incremental commented because some articles are already in the main parts (updates?)
|
| 504 |
# Need to find a way to add them to the dataset without duplicating the articles.
|
| 505 |
# Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated.
|
|
@@ -547,7 +589,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 547 |
incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File")
|
| 548 |
incrementals.File = incrementals.File.fillna('')
|
| 549 |
incrementals = incrementals.to_dict(orient="index")
|
| 550 |
-
|
| 551 |
for path, file in incremental_archive:
|
| 552 |
data = incrementals.pop(path)
|
| 553 |
pmcid = data["AccessionID"]
|
|
@@ -563,7 +605,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 563 |
article_tree = etree.ElementTree(etree.fromstring(text))
|
| 564 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 565 |
continue
|
| 566 |
-
|
| 567 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
| 568 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 569 |
data = {
|
|
@@ -611,7 +653,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 611 |
baselines = baselines.join(oa_package_list).reset_index().set_index("Article File")
|
| 612 |
baselines.File = baselines.File.fillna('')
|
| 613 |
baselines = baselines.to_dict(orient="index")
|
| 614 |
-
|
| 615 |
for path, file in baseline_archive:
|
| 616 |
data = baselines.pop(path)
|
| 617 |
pmcid = data["AccessionID"]
|
|
@@ -627,7 +669,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 627 |
article_tree = etree.ElementTree(etree.fromstring(text))
|
| 628 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 629 |
continue
|
| 630 |
-
|
| 631 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
| 632 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 633 |
data = {
|
|
@@ -669,5 +711,3 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 669 |
|
| 670 |
#except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 671 |
# continue
|
| 672 |
-
|
| 673 |
-
|
|
|
|
| 84 |
"""
|
| 85 |
#Some XML can't be parsed because they are not starting with the DOCTYPE declaration
|
| 86 |
# Could be disabled if we handle the parsing error (TBD, how many files would be trashed)
|
| 87 |
+
|
| 88 |
begin_doc = begin_doc_rgx.search(xml_text)
|
| 89 |
xml_text = xml_text[begin_doc.start():]
|
| 90 |
+
|
| 91 |
#Some XML are poisoned with consecutive tabs and new lines
|
| 92 |
# xml_text = re.sub('\s+',' ',xml_text) # Commented because <code> requires those spacing
|
| 93 |
return xml_text
|
|
|
|
| 116 |
reference_d[k] = (v, " ##REF## ", "pmid_ref")
|
| 117 |
for i, k in enumerate(refs_nonpmid_keys):
|
| 118 |
reference_d[k] = (i, " ##UREF## ", "unknown_pub_ref")
|
| 119 |
+
|
| 120 |
refs_key_l = []
|
| 121 |
for el in ref_el_l:
|
| 122 |
keyword, ref_name = TAG_DIC[el.tag]
|
|
|
|
| 137 |
Extract then from the text all the references founds to the rids dictionnary,
|
| 138 |
and replace them by keywords of the corresponding family (eg " ##FIG## " for a figure,
|
| 139 |
" ##TAB## " for a table, or " ##MATHS## " for mathematical formulas)
|
| 140 |
+
|
| 141 |
+
The range reference (e.g. 1-3 or 15-17) are replaced by their range (1,2,3 or 15,16,17)
|
| 142 |
+
|
| 143 |
Returns the parsed text, the identifiers for the references and the references text that
|
| 144 |
were replaced by the keywords. (eg, "Figure 2" was a hypertext reference and got replaced by " ##FIG## ")
|
| 145 |
"""
|
| 146 |
+
for xref in el.xpath(".//xref"):
|
| 147 |
+
inner_text = "".join(xref.itertext())
|
| 148 |
+
if inner_text == "": # Removing "empty" references
|
| 149 |
+
tail = xref.tail if xref.tail else ""
|
| 150 |
+
prev_el = xref.getprevious()
|
| 151 |
+
parent = xref.getparent()
|
| 152 |
+
if prev_el is None:
|
| 153 |
+
parent.text = "".join([(parent.text if parent.text else ""), tail])
|
| 154 |
+
else:
|
| 155 |
+
prev_el.tail = "".join([(prev_el.tail if prev_el.tail else ""), tail])
|
| 156 |
+
parent.remove(xref)
|
| 157 |
+
|
| 158 |
res_rid = defaultdict(list)
|
| 159 |
res_reftext = defaultdict(list)
|
| 160 |
+
ref_rstart, ref_rstop = None, None
|
| 161 |
+
has_ref_range = None
|
| 162 |
for xref in el.xpath(".//xref[not(ancestor::xref)]"): #Ignore innermost of imbricated references
|
| 163 |
+
inner_text = "".join(xref.itertext())
|
| 164 |
+
parent = xref.getparent()
|
| 165 |
rid = xref.get("rid")
|
| 166 |
if rid in rids.keys():
|
| 167 |
ref_idx, ref_kword, ref_class = rids[rid]
|
| 168 |
res_rid[ref_class].append(ref_idx)
|
| 169 |
+
res_reftext[ref_class].append(inner_text)
|
| 170 |
+
|
| 171 |
tail = xref.tail if xref.tail else ""
|
| 172 |
+
#### START HANDLING REF RANGE ########
|
| 173 |
+
try:
|
| 174 |
+
if has_ref_range is None:
|
| 175 |
+
if ref_kword in [" ##UREF## ", " ##REF## "]: # Otherwise it's a year
|
| 176 |
+
has_ref_range = res_reftext[ref_class][-1].isnumeric() and int(res_reftext[ref_class][-1]) < 500
|
| 177 |
+
|
| 178 |
+
if has_ref_range and ref_kword in [" ##UREF## ", " ##REF## "]:
|
| 179 |
+
if tail=="-":
|
| 180 |
+
ref_rstart = int(res_reftext[ref_class][-1])
|
| 181 |
+
tail = ", "
|
| 182 |
+
elif ref_rstart is not None:
|
| 183 |
+
ref_rstop = int(res_reftext[ref_class][-1])
|
| 184 |
+
ref_kword = [ref_kword]
|
| 185 |
+
for i in range(ref_rstart+1, ref_rstop):
|
| 186 |
+
new_rid = re.sub(str(ref_rstop), str(i), rid, count=1)
|
| 187 |
+
ref_idx_, ref_kword_, ref_class_ = rids[new_rid]
|
| 188 |
+
res_rid[ref_class_].insert(-1, ref_idx_)
|
| 189 |
+
res_reftext[ref_class_].insert(-1, str(i))
|
| 190 |
+
ref_kword.insert(-1, ref_kword_)
|
| 191 |
+
ref_kword = ", ".join(ref_kword)
|
| 192 |
+
ref_rstart = None
|
| 193 |
+
except (KeyError, ValueError):
|
| 194 |
+
ref_rstart = None
|
| 195 |
+
continue # The substitution failed, happen when text don't match the rid
|
| 196 |
+
#### END HANDLING REF RANGE ########
|
| 197 |
+
|
| 198 |
prev_el = xref.getprevious()
|
| 199 |
if prev_el is None:
|
| 200 |
parent.text = "".join([(parent.text if parent.text else ""), ref_kword, tail])
|
|
|
|
| 206 |
#Removing the xml namespace, (otherwise they would be everywhere)
|
| 207 |
tag_start = text.find(">")+1
|
| 208 |
tag_txt = text[:tag_start]
|
| 209 |
+
|
| 210 |
for k, v in el.nsmap.items():
|
| 211 |
tag_txt = tag_txt.replace(f' xmlns:{k}="{v}"', "", 1)
|
| 212 |
|
| 213 |
text = "".join([tag_txt, text[tag_start:]])
|
| 214 |
+
|
| 215 |
return text, res_rid, res_reftext
|
| 216 |
|
| 217 |
|
|
|
|
| 251 |
citation_d[el.tag].append(el.text)
|
| 252 |
references_nonpmid.append(dict(citation_d))
|
| 253 |
references_nonpmid_keys.append(ref_key)
|
|
|
|
| 254 |
return references_pmid, references_nonpmid, references_nonpmid_keys
|
| 255 |
|
| 256 |
def construct_datadict(article_tree):
|
|
|
|
| 263 |
- Titles are used to identify ["introduction", "methods", "results" and "discussion"]
|
| 264 |
- The path are then used to group paragraphs and titles into corresponding content.
|
| 265 |
- Remaining p and title are put in three other section: front, body, back
|
| 266 |
+
|
| 267 |
Returns:
|
| 268 |
- content_d: Dictionnary with the content result
|
| 269 |
- reference_d: The references of each kind (figure, table, ...) for each content type (intro, figure caption, ...)
|
| 270 |
- reference_text_d: The replaced text by the keywords of the references, with keys matching reference_d.
|
| 271 |
- reference_count: The count of unique external-document references.
|
| 272 |
+
|
| 273 |
Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/
|
| 274 |
"""
|
|
|
|
| 275 |
res_content_d, res_reference_d, res_reference_text_d = {}, defaultdict(dict), defaultdict(dict)
|
| 276 |
+
|
| 277 |
refs_pmid, refs_nonpmid, refs_nonpmid_keys = get_references(article_tree)
|
| 278 |
reference_count = len(refs_pmid)+len(refs_nonpmid)
|
| 279 |
+
|
| 280 |
res_content_d["unknown_pub"] = json.dumps(refs_nonpmid)
|
| 281 |
refs_el = article_tree.find(".//ref-list")
|
| 282 |
if refs_el is not None:
|
| 283 |
refs_el.getparent().remove(refs_el)
|
| 284 |
+
|
| 285 |
# Extracts the glossary if exists, and removes it from the tree
|
| 286 |
glossary = {}
|
| 287 |
def search_def(el):
|
|
|
|
| 293 |
definition = item.find(".//def")
|
| 294 |
definition = "".join(definition.itertext()) if definition is not None else ""
|
| 295 |
glossary[k] = definition
|
| 296 |
+
|
| 297 |
for el in article_tree.findall(".//glossary"):
|
| 298 |
search_def(el)
|
| 299 |
el.getparent().remove(el)
|
|
|
|
| 301 |
search_def(el) #There may be still more def-list outside of a glossary
|
| 302 |
el.getparent().remove(el)
|
| 303 |
res_content_d["glossary"] = glossary
|
| 304 |
+
|
| 305 |
# After testing, no question were found in the dataset, so I commented that part
|
| 306 |
# question_l = []
|
| 307 |
# for el in article_tree.xpath(".//question-preamble|.//question|.//answer|.//explanation"):
|
|
|
|
| 310 |
# res_content_d["question"] = "\n".join(question_l)
|
| 311 |
# for el in article_tree.xpath(".//question-wrap-group|.//question-wrap|.//answer-set|.//explanation"):
|
| 312 |
# el.getparent().remove(el)
|
| 313 |
+
|
| 314 |
# One big query is faster than multiple small ones
|
| 315 |
ref_el_l = article_tree.xpath(".//fig|.//table-wrap|.//array|.//supplementary-material\
|
| 316 |
|.//inline-supplementary-material|.//disp-formula\
|
|
|
|
| 334 |
repl_xref.tail = el.tail
|
| 335 |
el.addprevious(repl_xref)
|
| 336 |
el.getparent().remove(el)
|
| 337 |
+
|
| 338 |
# Finally, the discovered references and text are added to the result
|
| 339 |
for ref_k in REFS_KEYS[2:]: #Slicing from 2, to not add pmid and unknown ref here
|
| 340 |
res_content_d[ref_k[:-4]] = text_l_d[ref_k]#"\n".join(text_l_d[ref_k])
|
|
|
|
| 354 |
res_reference_d[part][ref_k] = list(chain(*tmp_l))
|
| 355 |
tmp_l = [refs_d[ref_k] for refs_d in ref_texts_l]
|
| 356 |
res_reference_text_d[part][ref_k] = list(chain(*tmp_l))
|
| 357 |
+
|
| 358 |
path_l, text_l, refs_l, refs_text_l = [], [], [], []
|
| 359 |
t_paths, t_texts_lowcase = [], []
|
| 360 |
for part in ["front", "body", "back"]: #Iterate parts and insert first front and back
|
|
|
|
| 416 |
|
| 417 |
res_reference_d = dict(res_reference_d)
|
| 418 |
res_reference_text_d = dict(res_reference_text_d)
|
| 419 |
+
|
| 420 |
return (res_content_d, res_reference_d, res_reference_text_d, reference_count)
|
| 421 |
|
| 422 |
class OpenAccessXMLConfig(datasets.BuilderConfig):
|
|
|
|
| 450 |
{
|
| 451 |
"accession_id": datasets.Value("string"),
|
| 452 |
"pmid": datasets.Value("string"),
|
| 453 |
+
|
| 454 |
"introduction": datasets.features.Sequence(datasets.Value("string")),
|
| 455 |
"methods": datasets.features.Sequence(datasets.Value("string")),
|
| 456 |
"results": datasets.features.Sequence(datasets.Value("string")),
|
|
|
|
| 472 |
"footnote": datasets.features.Sequence(datasets.Value("string")),
|
| 473 |
"graphic": datasets.features.Sequence(datasets.Value("string")),
|
| 474 |
"media": datasets.features.Sequence(datasets.Value("string")),
|
| 475 |
+
|
| 476 |
"unknown_pub": datasets.Value("string"),
|
| 477 |
# "question": datasets.Value("string"),
|
| 478 |
"glossary": datasets.features.Sequence(
|
|
|
|
| 517 |
"incremental_file_lists": [],
|
| 518 |
"incremental_archives": []
|
| 519 |
}
|
| 520 |
+
|
| 521 |
baseline_package_list = dl_manager.download(f"{_URL_ROOT}oa_file_list.csv")
|
| 522 |
+
|
| 523 |
baseline_file_lists = []
|
| 524 |
baseline_archives = []
|
| 525 |
for subset in self.config.subsets:
|
|
|
|
| 536 |
baseline_archive = dl_manager.download(baseline_archive_url)
|
| 537 |
except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 538 |
continue
|
| 539 |
+
|
| 540 |
baseline_file_lists.append(baseline_file_list)
|
| 541 |
baseline_archives.append(baseline_archive)
|
| 542 |
|
| 543 |
baseline_file_list_url = f"{url}{basename}{baseline}.filelist.csv"
|
| 544 |
+
|
| 545 |
# Incremental commented because some articles are already in the main parts (updates?)
|
| 546 |
# Need to find a way to add them to the dataset without duplicating the articles.
|
| 547 |
# Also adding them would mean that each new day the dataset is loaded, the whole dataset is recreated.
|
|
|
|
| 589 |
incrementals = incrementals.join(oa_package_list).reset_index().set_index("Article File")
|
| 590 |
incrementals.File = incrementals.File.fillna('')
|
| 591 |
incrementals = incrementals.to_dict(orient="index")
|
| 592 |
+
|
| 593 |
for path, file in incremental_archive:
|
| 594 |
data = incrementals.pop(path)
|
| 595 |
pmcid = data["AccessionID"]
|
|
|
|
| 605 |
article_tree = etree.ElementTree(etree.fromstring(text))
|
| 606 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 607 |
continue
|
| 608 |
+
|
| 609 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
| 610 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 611 |
data = {
|
|
|
|
| 653 |
baselines = baselines.join(oa_package_list).reset_index().set_index("Article File")
|
| 654 |
baselines.File = baselines.File.fillna('')
|
| 655 |
baselines = baselines.to_dict(orient="index")
|
| 656 |
+
|
| 657 |
for path, file in baseline_archive:
|
| 658 |
data = baselines.pop(path)
|
| 659 |
pmcid = data["AccessionID"]
|
|
|
|
| 669 |
article_tree = etree.ElementTree(etree.fromstring(text))
|
| 670 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 671 |
continue
|
| 672 |
+
|
| 673 |
content_d, reference_d, reference_text_d, n_ref = construct_datadict(article_tree)
|
| 674 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 675 |
data = {
|
|
|
|
| 711 |
|
| 712 |
#except FileNotFoundError: # non-commercial PMC000xxxxxx baseline does not exist
|
| 713 |
# continue
|
|
|
|
|
|