Big change, reference idx and text inside text
Browse files- pmc_open_access_xml.py +48 -104
pmc_open_access_xml.py
CHANGED
|
@@ -71,15 +71,15 @@ _SUBSETS = {
|
|
| 71 |
}
|
| 72 |
_BASELINE_DATE = "2022-09-03"
|
| 73 |
|
| 74 |
-
REFS_KEYS = ["pmid_ref", "unknown_pub_ref", "figure_ref", "table_ref", "formula_ref", "box_ref", "code_ref",
|
| 75 |
"quote_ref", "chem_ref", "supplementary_ref", "footnote_ref", "graphic_ref", "media_ref"]
|
| 76 |
CONTENT_KEYS = ["introduction", "methods", "results", "discussion", "conclusion",
|
| 77 |
-
"front", "body", "back", "figure", "table", "formula", "box",
|
| 78 |
"code", "quote", "chem", "supplementary", "footnote"]
|
| 79 |
begin_doc_rgx = re.compile("""<!DOCTYPE.*""")
|
| 80 |
def clean_raw(xml_text):
|
| 81 |
"""
|
| 82 |
-
Fixes the formating of xml of files and returns it.
|
| 83 |
Some have bad formating but they can be fixed/improved
|
| 84 |
"""
|
| 85 |
#Some XML can't be parsed because they are not starting with the DOCTYPE declaration
|
|
@@ -93,16 +93,16 @@ def clean_raw(xml_text):
|
|
| 93 |
return xml_text
|
| 94 |
|
| 95 |
# Tag name to "reference type" linking
|
| 96 |
-
TAG_DIC = {"fig":("
|
| 97 |
-
"array":("
|
| 98 |
-
"graphic":("
|
| 99 |
-
"media":("
|
| 100 |
-
"disp-formula":("
|
| 101 |
-
"table-wrap-foot":("
|
| 102 |
-
"code":("
|
| 103 |
-
"disp-quote":("
|
| 104 |
-
"supplementary-material":("
|
| 105 |
-
"inline-supplementary-material":("
|
| 106 |
|
| 107 |
def get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys):
|
| 108 |
"""
|
|
@@ -113,9 +113,9 @@ def get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys):
|
|
| 113 |
count_ref_d = defaultdict(lambda:0)
|
| 114 |
reference_d = {}
|
| 115 |
for k, v in refs_pmid.items():
|
| 116 |
-
reference_d[k] = (v, "
|
| 117 |
for i, k in enumerate(refs_nonpmid_keys):
|
| 118 |
-
reference_d[k] = (i, "
|
| 119 |
|
| 120 |
refs_key_l = []
|
| 121 |
for el in ref_el_l:
|
|
@@ -133,15 +133,14 @@ def parseout_el_refs(el, rids):
|
|
| 133 |
Removes xml namespace from the text for storage savings, such as:
|
| 134 |
- xmlns:xlink="http://www.w3.org/1999/xlink"
|
| 135 |
- xmlns:mml="http://www.w3.org/1998/Math/MathML"
|
| 136 |
-
|
| 137 |
Extract then from the text all the references founds to the rids dictionnary,
|
| 138 |
-
and replace them by keywords of the corresponding family (eg "
|
| 139 |
-
"
|
| 140 |
|
| 141 |
The range reference (e.g. 1-3 or 15-17) are replaced by their range (1,2,3 or 15,16,17)
|
| 142 |
|
| 143 |
-
Returns the parsed text
|
| 144 |
-
were replaced by the keywords. (eg, "Figure 2" was a hypertext reference and got replaced by " ##FIG## ")
|
| 145 |
"""
|
| 146 |
for xref in el.xpath(".//xref"):
|
| 147 |
inner_text = "".join(xref.itertext())
|
|
@@ -172,22 +171,22 @@ def parseout_el_refs(el, rids):
|
|
| 172 |
#### START HANDLING REF RANGE ########
|
| 173 |
try:
|
| 174 |
if has_ref_range is None:
|
| 175 |
-
if ref_kword in ["
|
| 176 |
has_ref_range = res_reftext[ref_class][-1].isnumeric() and int(res_reftext[ref_class][-1]) < 500
|
| 177 |
|
| 178 |
-
if has_ref_range and ref_kword in ["
|
| 179 |
if tail=="-":
|
| 180 |
ref_rstart = int(res_reftext[ref_class][-1])
|
| 181 |
tail = ", "
|
| 182 |
elif ref_rstart is not None:
|
| 183 |
ref_rstop = int(res_reftext[ref_class][-1])
|
| 184 |
-
new_ref_kwords = [ref_kword]
|
| 185 |
for i in range(ref_rstart+1, ref_rstop):
|
| 186 |
new_rid = re.sub(str(ref_rstop), str(i), rid, count=1)
|
| 187 |
ref_idx_, ref_kword_, ref_class_ = rids[new_rid]
|
| 188 |
res_rid[ref_class_].insert(-1, ref_idx_)
|
| 189 |
res_reftext[ref_class_].insert(-1, str(i))
|
| 190 |
-
new_ref_kwords.insert(-1, ref_kword_)
|
| 191 |
ref_kword = ", ".join(new_ref_kwords)
|
| 192 |
ref_rstart = None
|
| 193 |
except (KeyError, ValueError):
|
|
@@ -197,9 +196,9 @@ def parseout_el_refs(el, rids):
|
|
| 197 |
|
| 198 |
prev_el = xref.getprevious()
|
| 199 |
if prev_el is None:
|
| 200 |
-
parent.text = "".join([(parent.text if parent.text else ""), ref_kword, tail])
|
| 201 |
else:
|
| 202 |
-
prev_el.tail = "".join([(prev_el.tail if prev_el.tail else ""), ref_kword, tail])
|
| 203 |
parent.remove(xref)
|
| 204 |
|
| 205 |
text = etree.tostring(el, with_tail=False, encoding='unicode', method='xml')
|
|
@@ -212,7 +211,7 @@ def parseout_el_refs(el, rids):
|
|
| 212 |
|
| 213 |
text = "".join([tag_txt, text[tag_start:]])
|
| 214 |
|
| 215 |
-
return text
|
| 216 |
|
| 217 |
|
| 218 |
def get_references(article_tree):
|
|
@@ -226,7 +225,7 @@ def get_references(article_tree):
|
|
| 226 |
references_nonpmid = []
|
| 227 |
references_nonpmid_keys = []
|
| 228 |
refs = article_tree.find(".//ref-list")
|
| 229 |
-
if refs is None: #Some don't have any references
|
| 230 |
return {}, [], []
|
| 231 |
refs = refs.findall("ref")
|
| 232 |
for i, ref in enumerate(refs):
|
|
@@ -236,10 +235,10 @@ def get_references(article_tree):
|
|
| 236 |
pmid = int(pubid.text)
|
| 237 |
break
|
| 238 |
if pmid is not None and pmid<100000000:
|
| 239 |
-
#In an article (oa_comm:PMC2679651), broken PMID were found (>10e9).
|
| 240 |
#May be several of those. Not sure what to do with them, and what threshold to use
|
| 241 |
#Keeping them would result in loosing info about the reference (article title, authors, ...)
|
| 242 |
-
|
| 243 |
#Only the PMID is kept, as it links to the documents in pubmed abstract dataset.
|
| 244 |
references_pmid[ref.attrib["id"]] = str(pmid)
|
| 245 |
else:
|
|
@@ -272,7 +271,7 @@ def construct_datadict(article_tree):
|
|
| 272 |
|
| 273 |
Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/
|
| 274 |
"""
|
| 275 |
-
res_content_d
|
| 276 |
|
| 277 |
refs_pmid, refs_nonpmid, refs_nonpmid_keys = get_references(article_tree)
|
| 278 |
reference_count = len(refs_pmid)+len(refs_nonpmid)
|
|
@@ -305,7 +304,7 @@ def construct_datadict(article_tree):
|
|
| 305 |
# After testing, no question were found in the dataset, so I commented that part
|
| 306 |
# question_l = []
|
| 307 |
# for el in article_tree.xpath(".//question-preamble|.//question|.//answer|.//explanation"):
|
| 308 |
-
# text
|
| 309 |
# question_l.append(text)
|
| 310 |
# res_content_d["question"] = "\n".join(question_l)
|
| 311 |
# for el in article_tree.xpath(".//question-wrap-group|.//question-wrap|.//answer-set|.//explanation"):
|
|
@@ -319,16 +318,14 @@ def construct_datadict(article_tree):
|
|
| 319 |
|.//table-wrap-foot|.//fn-group|.//chem-struct-wrap\
|
| 320 |
|.//code|.//disp-quote|.//speech")
|
| 321 |
rids, key_l = get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys)
|
| 322 |
-
text_l_d
|
| 323 |
for el, key in zip(ref_el_l[::-1], key_l[::-1]):
|
| 324 |
#The iteration is done backward to always process first the most inner reference,
|
| 325 |
# it makes the processing is agnostic to structure rules differences between articles
|
| 326 |
-
new_text
|
| 327 |
|
| 328 |
ref_class = rids[key][2]
|
| 329 |
text_l_d[ref_class].insert(0, new_text)
|
| 330 |
-
refs_l_d[ref_class].insert(0, new_xref_id)
|
| 331 |
-
refs_text_l_d[ref_class].insert(0, new_xref_text)
|
| 332 |
|
| 333 |
repl_xref = etree.Element("xref", attrib={"rid":key})
|
| 334 |
repl_xref.tail = el.tail
|
|
@@ -338,52 +335,30 @@ def construct_datadict(article_tree):
|
|
| 338 |
# Finally, the discovered references and text are added to the result
|
| 339 |
for ref_k in REFS_KEYS[2:]: #Slicing from 2, to not add pmid and unknown ref here
|
| 340 |
res_content_d[ref_k[:-4]] = text_l_d[ref_k]#"\n".join(text_l_d[ref_k])
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
for ref_k2 in REFS_KEYS:
|
| 344 |
-
tmp_l = [refs_d[ref_k2] for refs_d in refs_l_d[ref_k]]
|
| 345 |
-
res_reference_d[ref_k[:-4]][ref_k2] = list(chain(*tmp_l)) # [:-4] slicing to remove the "_ref" part
|
| 346 |
-
tmp_l = [refs_d[ref_k2] for refs_d in refs_text_l_d[ref_k]]
|
| 347 |
-
res_reference_text_d[ref_k[:-4]][ref_k2] = list(chain(*tmp_l))
|
| 348 |
-
|
| 349 |
-
def add_part_to_dics(part, text_l, refs_l, ref_texts_l):
|
| 350 |
-
"""Repeated code to add various parts to the document"""
|
| 351 |
-
res_content_d[part] = text_l #"\n".join(text_l)
|
| 352 |
-
for ref_k in REFS_KEYS:
|
| 353 |
-
tmp_l = [refs_d[ref_k] for refs_d in refs_l]
|
| 354 |
-
res_reference_d[part][ref_k] = list(chain(*tmp_l))
|
| 355 |
-
tmp_l = [refs_d[ref_k] for refs_d in ref_texts_l]
|
| 356 |
-
res_reference_text_d[part][ref_k] = list(chain(*tmp_l))
|
| 357 |
-
|
| 358 |
-
path_l, text_l, refs_l, refs_text_l = [], [], [], []
|
| 359 |
t_paths, t_texts_lowcase = [], []
|
| 360 |
for part in ["front", "body", "back"]: #Iterate parts and insert first front and back
|
| 361 |
-
tmp_path_l, tmp_text_l
|
| 362 |
tmp_t_paths, tmp_t_texts_lowcase = [], []
|
| 363 |
part_el = article_tree.find(".//"+part)
|
| 364 |
if part_el is None:
|
| 365 |
-
res_content_d[part] = []
|
| 366 |
-
for target_key in REFS_KEYS:
|
| 367 |
-
res_reference_d[part][target_key] = []
|
| 368 |
-
res_reference_text_d[part][target_key] = []
|
| 369 |
continue
|
| 370 |
#Only the outermost p are kept, to prevent duplication.
|
| 371 |
#Also seen title with p inside. not(ancestor::title) prevents duplication of that p
|
| 372 |
for el in part_el.xpath(".//p[not(ancestor::p) and not(ancestor::title)]| .//title[not(ancestor::p) and not(ancestor::title)]"):
|
| 373 |
-
new_text
|
| 374 |
tmp_path_l.append(article_tree.getelementpath(el))
|
| 375 |
tmp_text_l.append(new_text)
|
| 376 |
-
tmp_refs_l.append(new_xref_id)
|
| 377 |
-
tmp_refs_text_l.append(new_xref_text)
|
| 378 |
if el.tag=="title":
|
| 379 |
tmp_t_paths.append(tmp_path_l[-1])
|
| 380 |
-
tmp_t_texts_lowcase.append(new_text.lower())
|
| 381 |
-
if part=="body": #We keep the body for processing right bellow.
|
| 382 |
path_l, text_l = tmp_path_l, tmp_text_l
|
| 383 |
-
refs_l, refs_text_l = tmp_refs_l, tmp_refs_text_l
|
| 384 |
t_paths, t_texts_lowcase = tmp_t_paths, tmp_t_texts_lowcase
|
| 385 |
else:
|
| 386 |
-
|
| 387 |
|
| 388 |
# Figuring from the titles which are the different categories
|
| 389 |
mask_intro = np.array(["introduction" in t_text or "background" in t_text for t_text in t_texts_lowcase]).astype(bool)
|
|
@@ -395,10 +370,7 @@ def construct_datadict(article_tree):
|
|
| 395 |
for mask, name_section in zip([mask_intro, mask_metho, mask_resul, mask_discu, mask_concl],
|
| 396 |
["introduction", "methods", "results", "discussion", "conclusion"]):
|
| 397 |
if not np.any(mask):
|
| 398 |
-
res_content_d[name_section] = []
|
| 399 |
-
for target_key in REFS_KEYS:
|
| 400 |
-
res_reference_d[name_section][target_key] = []
|
| 401 |
-
res_reference_text_d[name_section][target_key] = []
|
| 402 |
continue
|
| 403 |
|
| 404 |
filtered_path_l = list(compress(t_paths, mask))
|
|
@@ -407,17 +379,12 @@ def construct_datadict(article_tree):
|
|
| 407 |
root_path = root_path[:root_path.rindex("/")]
|
| 408 |
mask_contents = np.array([path.startswith(root_path) for path in path_l]).astype(bool)
|
| 409 |
processed_mask |= mask_contents
|
| 410 |
-
|
| 411 |
-
list(compress(refs_l, mask_contents)), list(compress(refs_text_l, mask_contents)))
|
| 412 |
|
| 413 |
processed_mask = ~processed_mask #Finally, add the body part as everything that don't belong to previous categories
|
| 414 |
-
|
| 415 |
-
list(compress(refs_l, processed_mask)), list(compress(refs_text_l, processed_mask)))
|
| 416 |
-
|
| 417 |
-
res_reference_d = dict(res_reference_d)
|
| 418 |
-
res_reference_text_d = dict(res_reference_text_d)
|
| 419 |
|
| 420 |
-
return (res_content_d,
|
| 421 |
|
| 422 |
class OpenAccessXMLConfig(datasets.BuilderConfig):
|
| 423 |
"""BuilderConfig for the PMC Open Access Subset."""
|
|
@@ -456,7 +423,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 456 |
"results": datasets.features.Sequence(datasets.Value("string")),
|
| 457 |
"discussion": datasets.features.Sequence(datasets.Value("string")),
|
| 458 |
"conclusion": datasets.features.Sequence(datasets.Value("string")),
|
| 459 |
-
|
| 460 |
"front": datasets.features.Sequence(datasets.Value("string")),
|
| 461 |
"body": datasets.features.Sequence(datasets.Value("string")),
|
| 462 |
"back": datasets.features.Sequence(datasets.Value("string")),
|
|
@@ -478,25 +445,6 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 478 |
"glossary": datasets.features.Sequence(
|
| 479 |
{"acronym": datasets.Value("string"), "definition": datasets.Value("string")}
|
| 480 |
),
|
| 481 |
-
|
| 482 |
-
"references": {k_cont:{k_ref:datasets.features.Sequence(datasets.Value("string" if k_ref=="pmid_ref" else "int32")) for k_ref in REFS_KEYS} for k_cont in CONTENT_KEYS},
|
| 483 |
-
"references_text": {k_cont:{k_ref:datasets.features.Sequence(datasets.Value("string")) for k_ref in REFS_KEYS} for k_cont in CONTENT_KEYS},
|
| 484 |
-
# -> With the 2 level dict, each item looks like this:
|
| 485 |
-
# "introduction":{"pmid_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 486 |
-
# "unknown_pub_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 487 |
-
# "figure_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 488 |
-
# "table_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 489 |
-
# "formula_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 490 |
-
# "box_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 491 |
-
# "code_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 492 |
-
# "quote_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 493 |
-
# "chem_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 494 |
-
# "supplementary_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 495 |
-
# "footnote_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 496 |
-
# "graphic_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 497 |
-
# "media_ref": datasets.features.Sequence(datasets.Value("string")),
|
| 498 |
-
# },
|
| 499 |
-
|
| 500 |
"n_references": datasets.Value("int32"),
|
| 501 |
"license": datasets.Value("string"),
|
| 502 |
"retracted": datasets.Value("string"),
|
|
@@ -606,7 +554,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 606 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 607 |
continue
|
| 608 |
|
| 609 |
-
content_d,
|
| 610 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 611 |
data = {
|
| 612 |
"introduction": content_d["introduction"],
|
|
@@ -630,8 +578,6 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 630 |
"media": content_d["media"],
|
| 631 |
# "question": content_d["question"],
|
| 632 |
"unknown_pub": content_d["unknown_pub"],
|
| 633 |
-
"references": reference_d,
|
| 634 |
-
"references_text": reference_text_d,
|
| 635 |
"glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]},
|
| 636 |
"n_references": n_ref,
|
| 637 |
"pmid": data["PMID"],
|
|
@@ -670,7 +616,7 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 670 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 671 |
continue
|
| 672 |
|
| 673 |
-
content_d,
|
| 674 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 675 |
data = {
|
| 676 |
"introduction": content_d["introduction"],
|
|
@@ -694,8 +640,6 @@ class OpenAccessXML(datasets.GeneratorBasedBuilder):
|
|
| 694 |
"media": content_d["media"],
|
| 695 |
# "question": content_d["question"],
|
| 696 |
"unknown_pub": content_d["unknown_pub"],
|
| 697 |
-
"references": reference_d,
|
| 698 |
-
"references_text": reference_text_d,
|
| 699 |
"glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]},
|
| 700 |
"n_references": n_ref,
|
| 701 |
"pmid": data["PMID"],
|
|
|
|
| 71 |
}
|
| 72 |
_BASELINE_DATE = "2022-09-03"
|
| 73 |
|
| 74 |
+
REFS_KEYS = ["pmid_ref", "unknown_pub_ref", "figure_ref", "table_ref", "formula_ref", "box_ref", "code_ref",
|
| 75 |
"quote_ref", "chem_ref", "supplementary_ref", "footnote_ref", "graphic_ref", "media_ref"]
|
| 76 |
CONTENT_KEYS = ["introduction", "methods", "results", "discussion", "conclusion",
|
| 77 |
+
"front", "body", "back", "figure", "table", "formula", "box",
|
| 78 |
"code", "quote", "chem", "supplementary", "footnote"]
|
| 79 |
begin_doc_rgx = re.compile("""<!DOCTYPE.*""")
|
| 80 |
def clean_raw(xml_text):
|
| 81 |
"""
|
| 82 |
+
Fixes the formating of xml of files and returns it.
|
| 83 |
Some have bad formating but they can be fixed/improved
|
| 84 |
"""
|
| 85 |
#Some XML can't be parsed because they are not starting with the DOCTYPE declaration
|
|
|
|
| 93 |
return xml_text
|
| 94 |
|
| 95 |
# Tag name to "reference type" linking
|
| 96 |
+
TAG_DIC = {"fig":("FIG","figure_ref"), "table-wrap":("TAB","table_ref"),
|
| 97 |
+
"array":("TAB","table_ref"), "boxed-text":("BOX","box_ref"),
|
| 98 |
+
"graphic":("GRAPH","graphic_ref"), "inline-graphic":("GRAPH","graphic_ref"),
|
| 99 |
+
"media":("MEDIA","media_ref"), "inline-media":("MEDIA","media_ref"),
|
| 100 |
+
"disp-formula":("FORMU","formula_ref"), "inline-formula":("FORMU","formula_ref"),
|
| 101 |
+
"table-wrap-foot":("FOOTN","footnote_ref"), "fn-group":("FOOTN","footnote_ref"),
|
| 102 |
+
"code":("CODE","code_ref"), "chem-struct-wrap":("CHEM","chem_ref"),
|
| 103 |
+
"disp-quote":("QUOTE","quote_ref"), "speech":("QUOTE","quote_ref"),
|
| 104 |
+
"supplementary-material":("SUPPL","supplementary_ref"),
|
| 105 |
+
"inline-supplementary-material":("SUPPL","supplementary_ref")}
|
| 106 |
|
| 107 |
def get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys):
|
| 108 |
"""
|
|
|
|
| 113 |
count_ref_d = defaultdict(lambda:0)
|
| 114 |
reference_d = {}
|
| 115 |
for k, v in refs_pmid.items():
|
| 116 |
+
reference_d[k] = (v, "REF", "pmid_ref")
|
| 117 |
for i, k in enumerate(refs_nonpmid_keys):
|
| 118 |
+
reference_d[k] = (i, "UREF", "unknown_pub_ref")
|
| 119 |
|
| 120 |
refs_key_l = []
|
| 121 |
for el in ref_el_l:
|
|
|
|
| 133 |
Removes xml namespace from the text for storage savings, such as:
|
| 134 |
- xmlns:xlink="http://www.w3.org/1999/xlink"
|
| 135 |
- xmlns:mml="http://www.w3.org/1998/Math/MathML"
|
| 136 |
+
|
| 137 |
Extract then from the text all the references founds to the rids dictionnary,
|
| 138 |
+
and replace them by keywords of the corresponding family (eg "##FIG##4##Doe 2022##" for a figure,
|
| 139 |
+
"##TAB##0##Table 1##" for a table, or "##MATHS##1##(2)##" for mathematical formulas)
|
| 140 |
|
| 141 |
The range reference (e.g. 1-3 or 15-17) are replaced by their range (1,2,3 or 15,16,17)
|
| 142 |
|
| 143 |
+
Returns the parsed text
|
|
|
|
| 144 |
"""
|
| 145 |
for xref in el.xpath(".//xref"):
|
| 146 |
inner_text = "".join(xref.itertext())
|
|
|
|
| 171 |
#### START HANDLING REF RANGE ########
|
| 172 |
try:
|
| 173 |
if has_ref_range is None:
|
| 174 |
+
if ref_kword in ["UREF", "REF"]: # Otherwise it's a year
|
| 175 |
has_ref_range = res_reftext[ref_class][-1].isnumeric() and int(res_reftext[ref_class][-1]) < 500
|
| 176 |
|
| 177 |
+
if has_ref_range and ref_kword in ["UREF", "REF"]:
|
| 178 |
if tail=="-":
|
| 179 |
ref_rstart = int(res_reftext[ref_class][-1])
|
| 180 |
tail = ", "
|
| 181 |
elif ref_rstart is not None:
|
| 182 |
ref_rstop = int(res_reftext[ref_class][-1])
|
| 183 |
+
new_ref_kwords = [f"##{ref_kword}##{ref_idx}##{inner_text}##"]
|
| 184 |
for i in range(ref_rstart+1, ref_rstop):
|
| 185 |
new_rid = re.sub(str(ref_rstop), str(i), rid, count=1)
|
| 186 |
ref_idx_, ref_kword_, ref_class_ = rids[new_rid]
|
| 187 |
res_rid[ref_class_].insert(-1, ref_idx_)
|
| 188 |
res_reftext[ref_class_].insert(-1, str(i))
|
| 189 |
+
new_ref_kwords.insert(-1, f"##{ref_kword_}##{ref_idx_}##{str(i)}##")
|
| 190 |
ref_kword = ", ".join(new_ref_kwords)
|
| 191 |
ref_rstart = None
|
| 192 |
except (KeyError, ValueError):
|
|
|
|
| 196 |
|
| 197 |
prev_el = xref.getprevious()
|
| 198 |
if prev_el is None:
|
| 199 |
+
parent.text = "".join([(parent.text if parent.text else ""), f"##{ref_kword}##{ref_idx}##{inner_text}##", tail])
|
| 200 |
else:
|
| 201 |
+
prev_el.tail = "".join([(prev_el.tail if prev_el.tail else ""), f"##{ref_kword}##{ref_idx}##{inner_text}##", tail])
|
| 202 |
parent.remove(xref)
|
| 203 |
|
| 204 |
text = etree.tostring(el, with_tail=False, encoding='unicode', method='xml')
|
|
|
|
| 211 |
|
| 212 |
text = "".join([tag_txt, text[tag_start:]])
|
| 213 |
|
| 214 |
+
return text
|
| 215 |
|
| 216 |
|
| 217 |
def get_references(article_tree):
|
|
|
|
| 225 |
references_nonpmid = []
|
| 226 |
references_nonpmid_keys = []
|
| 227 |
refs = article_tree.find(".//ref-list")
|
| 228 |
+
if refs is None: #Some don't have any references
|
| 229 |
return {}, [], []
|
| 230 |
refs = refs.findall("ref")
|
| 231 |
for i, ref in enumerate(refs):
|
|
|
|
| 235 |
pmid = int(pubid.text)
|
| 236 |
break
|
| 237 |
if pmid is not None and pmid<100000000:
|
| 238 |
+
#In an article (oa_comm:PMC2679651), broken PMID were found (>10e9).
|
| 239 |
#May be several of those. Not sure what to do with them, and what threshold to use
|
| 240 |
#Keeping them would result in loosing info about the reference (article title, authors, ...)
|
| 241 |
+
|
| 242 |
#Only the PMID is kept, as it links to the documents in pubmed abstract dataset.
|
| 243 |
references_pmid[ref.attrib["id"]] = str(pmid)
|
| 244 |
else:
|
|
|
|
| 271 |
|
| 272 |
Useful information about the tags can be found here: https://jats.nlm.nih.gov/archiving/tag-library/1.3/
|
| 273 |
"""
|
| 274 |
+
res_content_d = {}
|
| 275 |
|
| 276 |
refs_pmid, refs_nonpmid, refs_nonpmid_keys = get_references(article_tree)
|
| 277 |
reference_count = len(refs_pmid)+len(refs_nonpmid)
|
|
|
|
| 304 |
# After testing, no question were found in the dataset, so I commented that part
|
| 305 |
# question_l = []
|
| 306 |
# for el in article_tree.xpath(".//question-preamble|.//question|.//answer|.//explanation"):
|
| 307 |
+
# text = parseout_el_refs(el, {})
|
| 308 |
# question_l.append(text)
|
| 309 |
# res_content_d["question"] = "\n".join(question_l)
|
| 310 |
# for el in article_tree.xpath(".//question-wrap-group|.//question-wrap|.//answer-set|.//explanation"):
|
|
|
|
| 318 |
|.//table-wrap-foot|.//fn-group|.//chem-struct-wrap\
|
| 319 |
|.//code|.//disp-quote|.//speech")
|
| 320 |
rids, key_l = get_ref_indexes(ref_el_l, refs_pmid, refs_nonpmid_keys)
|
| 321 |
+
text_l_d = defaultdict(list)
|
| 322 |
for el, key in zip(ref_el_l[::-1], key_l[::-1]):
|
| 323 |
#The iteration is done backward to always process first the most inner reference,
|
| 324 |
# it makes the processing is agnostic to structure rules differences between articles
|
| 325 |
+
new_text = parseout_el_refs(el, rids)
|
| 326 |
|
| 327 |
ref_class = rids[key][2]
|
| 328 |
text_l_d[ref_class].insert(0, new_text)
|
|
|
|
|
|
|
| 329 |
|
| 330 |
repl_xref = etree.Element("xref", attrib={"rid":key})
|
| 331 |
repl_xref.tail = el.tail
|
|
|
|
| 335 |
# Finally, the discovered references and text are added to the result
|
| 336 |
for ref_k in REFS_KEYS[2:]: #Slicing from 2, to not add pmid and unknown ref here
|
| 337 |
res_content_d[ref_k[:-4]] = text_l_d[ref_k]#"\n".join(text_l_d[ref_k])
|
| 338 |
+
|
| 339 |
+
path_l, text_l = [], []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 340 |
t_paths, t_texts_lowcase = [], []
|
| 341 |
for part in ["front", "body", "back"]: #Iterate parts and insert first front and back
|
| 342 |
+
tmp_path_l, tmp_text_l = [], []
|
| 343 |
tmp_t_paths, tmp_t_texts_lowcase = [], []
|
| 344 |
part_el = article_tree.find(".//"+part)
|
| 345 |
if part_el is None:
|
| 346 |
+
res_content_d[part] = []
|
|
|
|
|
|
|
|
|
|
| 347 |
continue
|
| 348 |
#Only the outermost p are kept, to prevent duplication.
|
| 349 |
#Also seen title with p inside. not(ancestor::title) prevents duplication of that p
|
| 350 |
for el in part_el.xpath(".//p[not(ancestor::p) and not(ancestor::title)]| .//title[not(ancestor::p) and not(ancestor::title)]"):
|
| 351 |
+
new_text = parseout_el_refs(el, rids)
|
| 352 |
tmp_path_l.append(article_tree.getelementpath(el))
|
| 353 |
tmp_text_l.append(new_text)
|
|
|
|
|
|
|
| 354 |
if el.tag=="title":
|
| 355 |
tmp_t_paths.append(tmp_path_l[-1])
|
| 356 |
+
tmp_t_texts_lowcase.append(new_text.lower())
|
| 357 |
+
if part=="body": #We keep the body for processing right bellow.
|
| 358 |
path_l, text_l = tmp_path_l, tmp_text_l
|
|
|
|
| 359 |
t_paths, t_texts_lowcase = tmp_t_paths, tmp_t_texts_lowcase
|
| 360 |
else:
|
| 361 |
+
res_content_d[part] = tmp_text_l
|
| 362 |
|
| 363 |
# Figuring from the titles which are the different categories
|
| 364 |
mask_intro = np.array(["introduction" in t_text or "background" in t_text for t_text in t_texts_lowcase]).astype(bool)
|
|
|
|
| 370 |
for mask, name_section in zip([mask_intro, mask_metho, mask_resul, mask_discu, mask_concl],
|
| 371 |
["introduction", "methods", "results", "discussion", "conclusion"]):
|
| 372 |
if not np.any(mask):
|
| 373 |
+
res_content_d[name_section] = []
|
|
|
|
|
|
|
|
|
|
| 374 |
continue
|
| 375 |
|
| 376 |
filtered_path_l = list(compress(t_paths, mask))
|
|
|
|
| 379 |
root_path = root_path[:root_path.rindex("/")]
|
| 380 |
mask_contents = np.array([path.startswith(root_path) for path in path_l]).astype(bool)
|
| 381 |
processed_mask |= mask_contents
|
| 382 |
+
res_content_d[name_section] = list(compress(text_l, mask_contents))
|
|
|
|
| 383 |
|
| 384 |
processed_mask = ~processed_mask #Finally, add the body part as everything that don't belong to previous categories
|
| 385 |
+
res_content_d["body"] = list(compress(text_l, processed_mask))
|
|
|
|
|
|
|
|
|
|
|
|
|
| 386 |
|
| 387 |
+
return (res_content_d, reference_count)
|
| 388 |
|
| 389 |
class OpenAccessXMLConfig(datasets.BuilderConfig):
|
| 390 |
"""BuilderConfig for the PMC Open Access Subset."""
|
|
|
|
| 423 |
"results": datasets.features.Sequence(datasets.Value("string")),
|
| 424 |
"discussion": datasets.features.Sequence(datasets.Value("string")),
|
| 425 |
"conclusion": datasets.features.Sequence(datasets.Value("string")),
|
| 426 |
+
|
| 427 |
"front": datasets.features.Sequence(datasets.Value("string")),
|
| 428 |
"body": datasets.features.Sequence(datasets.Value("string")),
|
| 429 |
"back": datasets.features.Sequence(datasets.Value("string")),
|
|
|
|
| 445 |
"glossary": datasets.features.Sequence(
|
| 446 |
{"acronym": datasets.Value("string"), "definition": datasets.Value("string")}
|
| 447 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
"n_references": datasets.Value("int32"),
|
| 449 |
"license": datasets.Value("string"),
|
| 450 |
"retracted": datasets.Value("string"),
|
|
|
|
| 554 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 555 |
continue
|
| 556 |
|
| 557 |
+
content_d, n_ref = construct_datadict(article_tree)
|
| 558 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 559 |
data = {
|
| 560 |
"introduction": content_d["introduction"],
|
|
|
|
| 578 |
"media": content_d["media"],
|
| 579 |
# "question": content_d["question"],
|
| 580 |
"unknown_pub": content_d["unknown_pub"],
|
|
|
|
|
|
|
| 581 |
"glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]},
|
| 582 |
"n_references": n_ref,
|
| 583 |
"pmid": data["PMID"],
|
|
|
|
| 616 |
except etree.XMLSyntaxError: #In some files, xml is broken
|
| 617 |
continue
|
| 618 |
|
| 619 |
+
content_d, n_ref = construct_datadict(article_tree)
|
| 620 |
glossary = np.array([[k,v] for k,v in content_d["glossary"].items()])
|
| 621 |
data = {
|
| 622 |
"introduction": content_d["introduction"],
|
|
|
|
| 640 |
"media": content_d["media"],
|
| 641 |
# "question": content_d["question"],
|
| 642 |
"unknown_pub": content_d["unknown_pub"],
|
|
|
|
|
|
|
| 643 |
"glossary": {"acronym":glossary[:,0], "definition":glossary[:,1]} if len(glossary)>0 else {"acronym":[], "definition":[]},
|
| 644 |
"n_references": n_ref,
|
| 645 |
"pmid": data["PMID"],
|