Пример #1
0
    def parse_record(self, row: Dict[str, Any]) -> Optional[ContainerEntity]:
        """
        row is a python dict (parsed from JSON).

        returns a ContainerEntity (or None if invalid or couldn't parse)
        """

        name = clean_str(row.get("name"))
        if name and name.endswith("."):
            name = name[:-1]
        if not name:
            # Name is required (by schema)
            return None

        extra = dict()
        for k in (
                "urls",
                "webarchive_urls",
                "country",
                "sherpa_romeo",
                "ezb",
                "szczepanski",
                "doaj",
                "languages",
                "ia",
                "scielo",
                "kbart",
                "publisher_type",
                "platform",
        ):
            if row["extra"].get(k):
                extra[k] = row["extra"][k]

        container_type = None
        if "proceedings" in name.lower():
            container_type = "proceedings"
        elif "journal " in name.lower():
            container_type = "journal"

        if row["extra"].get("issnp"):
            row["extra"]["issnp"] = row["extra"]["issnp"].upper()
        if row["extra"].get("issne"):
            row["extra"]["issne"] = row["extra"]["issne"].upper()
        ce = ContainerEntity(
            issnl=row["issnl"],
            issnp=row["extra"].get("issnp"),
            issne=row["extra"].get("issne"),
            ident=row["ident"],
            name=name,
            container_type=container_type,
            publisher=clean_str(row.get("publisher")),
            wikidata_qid=row.get("wikidata_qid"),
            extra=extra,
        )
        return ce
Пример #2
0
 def doaj_contribs(
         self,
         authors: List[dict]) -> List[fatcat_openapi_client.ReleaseContrib]:
     """
     bibjson.author {
         affiliation (string, optional),
         name (string),
         orcid_id (string, optional)
     }
     """
     contribs = []
     index = 0
     for author in authors:
         if not author.get("name"):
             continue
         creator_id = None
         orcid = clean_orcid(author.get("orcid_id"))
         if orcid:
             creator_id = self.lookup_orcid(orcid)
         contribs.append(
             fatcat_openapi_client.ReleaseContrib(
                 raw_name=author.get("name"),
                 role="author",
                 index=index,
                 creator_id=creator_id,
                 raw_affiliation=clean_str(author.get("affiliation")),
             ))
         index += 1
     return contribs
Пример #3
0
    def dblp_contrib_single(self, elem: Any) -> fatcat_openapi_client.ReleaseContrib:
        """
        In the future, might try to implement creator key-ificiation and lookup here.

        Example rows:

            <author>Michael H. B&ouml;hlen</author>
            <author orcid="0000-0002-4354-9138">Nicolas Heist</author>
            <author orcid="0000-0001-9108-4278">Jens Lehmann 0001</author>
        """

        creator_id = None
        extra = None
        raw_name = clean_str(elem.text)

        # remove number in author name, if present
        if raw_name.split()[-1].isdigit():
            raw_name = ' '.join(raw_name.split()[:-1])

        if elem.get('orcid'):
            orcid = clean_orcid(elem['orcid'])
            if orcid:
                creator_id = self.lookup_orcid(orcid)
                if not creator_id:
                    extra = dict(orcid=orcid)
        return fatcat_openapi_client.ReleaseContrib(
            raw_name=raw_name,
            creator_id=creator_id,
            extra=extra,
        )
Пример #4
0
    def parse_record(self, obj: Dict[str, Any]) -> Optional[CreatorEntity]:
        """
        obj is a python dict (parsed from json).
        returns a CreatorEntity
        """

        if "person" not in obj:
            return False

        name = obj["person"]["name"]
        if not name:
            return None
        extra = None
        given = value_or_none(name.get("given-names"))
        sur = value_or_none(name.get("family-name"))
        display = value_or_none(name.get("credit-name"))
        if display is None:
            # TODO: sorry human beings
            if given and sur:
                display = "{} {}".format(given, sur)
            elif sur:
                display = sur
            elif given:
                display = given
        orcid = obj["orcid-identifier"]["path"]
        if not self.is_orcid(orcid):
            sys.stderr.write("Bad ORCID: {}\n".format(orcid))
            return None
        display = clean_str(display)
        if not display:
            # must have *some* name
            return None
        ce = CreatorEntity(
            orcid=orcid,
            given_name=clean_str(given),
            surname=clean_str(sur),
            display_name=display,
            extra=extra,
        )
        return ce
Пример #5
0
    def parse_record(self, row: Dict[str, Any]) -> Optional[ContainerEntity]:
        """
        row is a python dict (parsed from JSON).

        returns a ContainerEntity (or None if invalid or couldn't parse)
        """

        dblp_prefix = row.get("key") or row.get("dblp_prefix")
        assert dblp_prefix
        assert row["title"]

        container_type = None
        if dblp_prefix.startswith("conf/"):
            container_type = "conference-series"
        elif dblp_prefix.startswith("journals/"):
            container_type = "journal"
        elif dblp_prefix.startswith("series/"):
            container_type = "book-series"

        issnl = None
        for issn in row.get("issns", []):
            issnl = self.issn2issnl(issn)
            if issnl:
                break

        extra: Dict[str, Any] = {
            "dblp": {
                "prefix": dblp_prefix,
            },
        }

        if row.get("homepage_url"):
            extra["urls"] = [row["homepage_url"]]

        if row.get("acronym"):
            extra["acronym"] = row["acronym"]

        ce = fatcat_openapi_client.ContainerEntity(
            name=clean_str(row["title"]),
            container_type=container_type,
            issnl=issnl,
            wikidata_qid=row.get("wikidata_qid"),
            extra=extra,
        )
        return ce
Пример #6
0
    def parse_record(self, row):
        """
        row is a python dict (parsed from JSON).

        returns a ContainerEntity (or None if invalid or couldn't parse)
        """

        dblp_prefix = row.get('key') or row.get('dblp_prefix')
        assert dblp_prefix
        assert row['title']

        container_type = None
        if dblp_prefix.startswith('conf/'):
            container_type = "conference-series"
        elif dblp_prefix.startswith('journals/'):
            container_type = "journal"
        elif dblp_prefix.startswith('series/'):
            container_type = "book-series"

        issnl = None
        for issn in row.get('issns', []):
            issnl = self.issn2issnl(issn)
            if issnl:
                break

        extra = {
            'dblp': {
                'prefix': dblp_prefix,
            },
        }

        if row.get('homepage_url'):
            extra['urls'] = [row['homepage_url']]

        if row.get('acronym'):
            extra['acronym'] = row['acronym']

        ce = fatcat_openapi_client.ContainerEntity(
            name=clean_str(row['title']),
            container_type=container_type,
            issnl=issnl,
            wikidata_qid=row.get('wikidata_qid'),
            extra=extra,
        )
        return ce
Пример #7
0
 def do_contribs(obj_list: List[Dict[str, Any]],
                 ctype: str) -> List[ReleaseContrib]:
     contribs = []
     for i, am in enumerate(obj_list):
         creator_id = None
         if "ORCID" in am.keys():
             creator_id = self.lookup_orcid(am["ORCID"].split("/")[-1])
         # Sorry humans :(
         if am.get("given") and am.get("family"):
             raw_name: Optional[str] = "{} {}".format(
                 am["given"], am["family"])
         elif am.get("family"):
             raw_name = am["family"]
         else:
             # TODO: can end up empty
             raw_name = am.get("name") or am.get("given")
         extra: Dict[str, Any] = dict()
         if ctype == "author":
             index: Optional[int] = i
         else:
             index = None
         raw_affiliation = None
         affiliation_list = am.get("affiliation") or []
         # TODO: currently requiring a "name" in all affiliations. Could
         # add ROR support (via identifier) in the near future
         affiliation_list = [a for a in affiliation_list if "name" in a]
         if affiliation_list and len(affiliation_list) > 0:
             raw_affiliation = affiliation_list[0]["name"]
             if len(affiliation_list) > 1:
                 # note: affiliation => more_affiliations
                 extra["more_affiliations"] = [
                     clean_str(a["name"]) for a in affiliation_list[1:]
                 ]
         if am.get("sequence") and am.get("sequence") != "additional":
             extra["seq"] = clean_str(am.get("sequence"))
         assert ctype in ("author", "editor", "translator")
         raw_name = clean_str(raw_name)
         # TODO: what if 'raw_name' is None?
         contribs.append(
             ReleaseContrib(
                 creator_id=creator_id,
                 index=index,
                 raw_name=raw_name,
                 given_name=clean_str(am.get("given")),
                 surname=clean_str(am.get("family")),
                 raw_affiliation=clean_str(raw_affiliation),
                 role=ctype,
                 extra=extra or None,
             ))
     return contribs
Пример #8
0
    def doaj_abstracts(
            self,
            bibjson: dict) -> List[fatcat_openapi_client.ReleaseAbstract]:
        text = clean_str(bibjson.get("abstract"))
        if not text or len(text) < 10:
            return []
        if len(text) > MAX_ABSTRACT_LENGTH:
            text = text[:MAX_ABSTRACT_LENGTH] + " [...]"

        lang = detect_text_lang(text)

        abstract = fatcat_openapi_client.ReleaseAbstract(
            mimetype="text/plain",
            content=text,
            lang=lang,
        )

        return [
            abstract,
        ]
Пример #9
0
    def parse_record(self, row: Dict[str, Any]) -> Optional[ContainerEntity]:
        """
        row is a python dict (parsed from JSON).

        returns a ContainerEntity (or None if invalid or couldn't parse)
        """

        if not row.get("name"):
            # Name is required (by schema)
            return None

        extra = dict()
        for key in (
                "issne",
                "issnp",
                "languages",
                "country",
                "urls",
                "abbrev",
                "coden",
                "aliases",
                "original_name",
                "first_year",
                "last_year",
                "platform",
                "default_license",
                "road",
                "mimetypes",
                "sherpa_romeo",
                "kbart",
        ):
            if row.get(key):
                extra[key] = row[key]
        # TODO: not including for now: norwegian, dois/crossref, ia

        extra_doaj = dict()
        if row.get("doaj"):
            if row["doaj"].get("as_of"):
                extra_doaj["as_of"] = row["doaj"]["as_of"]
            if row["doaj"].get("works"):
                extra_doaj["works"] = row["doaj"]["works"]
        if extra_doaj:
            extra["doaj"] = extra_doaj

        extra_ia = dict()
        # TODO: would like an ia.longtail_ia flag
        if row.get("sim"):
            # NB: None case of the .get() here is blech, but othrwise
            # extra['ia'].get('sim') would be false-y, breaking 'any_ia_sim' later on
            extra_ia["sim"] = {
                "year_spans": row["sim"].get("year_spans"),
            }
        if extra_ia:
            extra["ia"] = extra_ia

        name = clean_str(row.get("name"))
        if not name:
            return None

        ce = ContainerEntity(
            issnl=row["issnl"],
            issne=row.get("issne"),
            issnp=row.get("issnp"),
            container_type=None,  # TODO
            name=name,
            publisher=clean_str(row.get("publisher")),
            wikidata_qid=None,  # TODO
            extra=extra,
        )
        return ce
Пример #10
0
    def parse_record(self, obj: Dict[str, Any]) -> Optional[ReleaseEntity]:
        """
        obj is a python dict (parsed from json).
        returns a ReleaseEntity
        """

        # Ways to be out of scope (provisionally)
        # journal-issue and journal-volume map to None, but allowed for now
        if obj.get("type") in (
                None,
                "journal",
                "proceedings",
                "standard-series",
                "report-series",
                "book-series",
                "book-set",
                "book-track",
                "proceedings-series",
        ):
            self.counts["skip-release-type"] += 1
            return None

        # Do require the 'title' keys to exist, as release entities do
        if ("title" not in obj) or (not obj["title"]):
            self.counts["skip-blank-title"] += 1
            return None

        release_type = self.map_release_type(obj["type"])

        # contribs
        def do_contribs(obj_list: List[Dict[str, Any]],
                        ctype: str) -> List[ReleaseContrib]:
            contribs = []
            for i, am in enumerate(obj_list):
                creator_id = None
                if "ORCID" in am.keys():
                    creator_id = self.lookup_orcid(am["ORCID"].split("/")[-1])
                # Sorry humans :(
                if am.get("given") and am.get("family"):
                    raw_name: Optional[str] = "{} {}".format(
                        am["given"], am["family"])
                elif am.get("family"):
                    raw_name = am["family"]
                else:
                    # TODO: can end up empty
                    raw_name = am.get("name") or am.get("given")
                extra: Dict[str, Any] = dict()
                if ctype == "author":
                    index: Optional[int] = i
                else:
                    index = None
                raw_affiliation = None
                affiliation_list = am.get("affiliation") or []
                # TODO: currently requiring a "name" in all affiliations. Could
                # add ROR support (via identifier) in the near future
                affiliation_list = [a for a in affiliation_list if "name" in a]
                if affiliation_list and len(affiliation_list) > 0:
                    raw_affiliation = affiliation_list[0]["name"]
                    if len(affiliation_list) > 1:
                        # note: affiliation => more_affiliations
                        extra["more_affiliations"] = [
                            clean_str(a["name"]) for a in affiliation_list[1:]
                        ]
                if am.get("sequence") and am.get("sequence") != "additional":
                    extra["seq"] = clean_str(am.get("sequence"))
                assert ctype in ("author", "editor", "translator")
                raw_name = clean_str(raw_name)
                # TODO: what if 'raw_name' is None?
                contribs.append(
                    ReleaseContrib(
                        creator_id=creator_id,
                        index=index,
                        raw_name=raw_name,
                        given_name=clean_str(am.get("given")),
                        surname=clean_str(am.get("family")),
                        raw_affiliation=clean_str(raw_affiliation),
                        role=ctype,
                        extra=extra or None,
                    ))
            return contribs

        contribs = do_contribs(obj.get("author", []), "author")
        contribs.extend(do_contribs(obj.get("editor", []), "editor"))
        contribs.extend(do_contribs(obj.get("translator", []), "translator"))

        # container
        issn = obj.get("ISSN", [None])[0]
        issnl = self.issn2issnl(issn)
        container_id = None
        if issnl:
            container_id = self.lookup_issnl(issnl)
        publisher = clean_str(obj.get("publisher"))

        container_name = obj.get("container-title")
        if container_name:
            container_name = clean_str(container_name[0], force_xml=True)
        if not container_name:
            container_name = None
        if (container_id is None and self.create_containers
                and (issnl is not None) and container_name):
            ce = fatcat_openapi_client.ContainerEntity(
                issnl=issnl,
                publisher=publisher,
                container_type=self.map_container_type(release_type),
                name=container_name,
            )
            ce_edit = self.create_container(ce)
            container_id = ce_edit.ident
            self._issnl_id_map[issnl] = container_id

        # license slug
        license_slug = None
        license_extra = []
        for lic in obj.get("license", []):
            if lic["content-version"] not in ("vor", "unspecified"):
                continue
            slug = lookup_license_slug(lic["URL"])
            if slug:
                license_slug = slug
            if "start" in lic:
                lic["start"] = lic["start"]["date-time"]
            license_extra.append(lic)

        # references
        refs = []
        for i, rm in enumerate(obj.get("reference", [])):
            try:
                year: Optional[int] = int(rm.get("year"))
                # TODO: will need to update/config in the future!
                # NOTE: are there crossref works with year < 100?
                if year is not None:
                    if year > 2025 or year < 100:
                        year = None
            except (TypeError, ValueError):
                year = None
            ref_extra: Dict[str, Any] = dict()
            key = rm.get("key")
            if key and key.startswith(obj["DOI"].upper()):
                key = key.replace(obj["DOI"].upper() + "-", "")
                key = key.replace(obj["DOI"].upper(), "")
            ref_container_name = rm.get("volume-title")
            if not ref_container_name:
                ref_container_name = rm.get("journal-title")
            elif rm.get("journal-title"):
                ref_extra["journal-title"] = rm["journal-title"]
            if rm.get("DOI"):
                ref_extra["doi"] = rm.get("DOI").lower()
            author = clean_str(rm.get("author"))
            if author:
                ref_extra["authors"] = [author]
            for k in (
                    "editor",
                    "edition",
                    "authority",
                    "version",
                    "genre",
                    "url",
                    "event",
                    "issue",
                    "volume",
                    "date",
                    "accessed_date",
                    "issued",
                    "page",
                    "medium",
                    "collection_title",
                    "chapter_number",
                    "unstructured",
                    "series-title",
                    "volume-title",
            ):
                if clean_str(rm.get(k)):
                    ref_extra[k] = clean_str(rm[k])
            refs.append(
                fatcat_openapi_client.ReleaseRef(
                    index=i,
                    # doing lookups would be a second import pass
                    target_release_id=None,
                    key=key,
                    year=year,
                    container_name=clean_str(ref_container_name),
                    title=clean_str(rm.get("article-title")),
                    locator=clean_str(rm.get("first-page")),
                    # TODO: just dump JSON somewhere here?
                    extra=ref_extra or None,
                ))

        # abstracts
        abstracts = []
        abstract = clean_str(obj.get("abstract"))
        if abstract and len(abstract) > 10:
            abstracts.append(
                fatcat_openapi_client.ReleaseAbstract(
                    mimetype="application/xml+jats", content=abstract))

        # extra fields
        extra: Dict[str, Any] = dict()
        extra_crossref: Dict[str, Any] = dict()
        # top-level extra keys
        if not container_id:
            if obj.get("container-title"):
                extra["container_name"] = container_name
        for key in "group-title":
            val = obj.get(key)
            if val:
                if type(val) == list:
                    val = val[0]
                if type(val) == str:
                    val = clean_str(val)
                    if val:
                        extra[key] = clean_str(val)
                else:
                    extra[key] = val
        # crossref-nested extra keys
        for key in ("subject", "type", "alternative-id", "archive", "funder"):
            val = obj.get(key)
            if val:
                if type(val) == str:
                    extra_crossref[key] = clean_str(val)
                else:
                    extra_crossref[key] = val
        if license_extra:
            extra_crossref["license"] = license_extra

        if len(obj["title"]) > 1:
            aliases = [clean_str(t) for t in obj["title"][1:]]
            aliases = [t for t in aliases if t]
            if aliases:
                extra["aliases"] = aliases

        # ISBN
        isbn13 = None
        for raw in obj.get("ISBN", []):
            # TODO: convert if not ISBN-13 format
            if len(raw) == 17:
                isbn13 = raw
                break

        # release status
        if obj["type"] in (
                "journal-article",
                "conference-proceeding",
                "book",
                "dissertation",
                "book-chapter",
        ):
            release_stage: Optional[str] = "published"
        else:
            # unknown
            release_stage = None

        # filter out unreasonably huge releases
        if len(abstracts) > 100:
            self.counts["skip-huge-abstracts"] += 1
            return None
        if len(contribs) > 2000:
            self.counts["skip-huge-contribs"] += 1
            return None
        if len(refs) > 5000:
            self.counts["skip-huge-refs"] += 1
            return None

        # release date parsing is amazingly complex
        raw_date = obj["issued"]["date-parts"][0]
        if not raw_date or not raw_date[0]:
            # got some NoneType, even though at least year is supposed to be set
            release_year = None
            release_date = None
        elif len(raw_date) == 3:
            release_year = raw_date[0]
            release_date = datetime.date(year=raw_date[0],
                                         month=raw_date[1],
                                         day=raw_date[2])
        else:
            # sometimes only the year is included, not the full date
            release_year = raw_date[0]
            release_date = None

        original_title: Optional[str] = None
        if obj.get("original-title"):
            ot = obj.get("original-title")
            if ot is not None:
                original_title = clean_str(ot[0], force_xml=True)

        title: Optional[str] = None
        if obj.get("title"):
            title = clean_str(obj["title"][0], force_xml=True)
            if not title or len(title) <= 1:
                # title can't be just a single character
                self.counts["skip-blank-title"] += 1
                return None

        doi = clean_doi(obj["DOI"].lower())
        if not doi:
            self.counts["skip-bad-doi"] += 1
            return None

        subtitle = None
        if obj.get("subtitle"):
            subtitle = clean_str(obj["subtitle"][0], force_xml=True)
            if not subtitle or len(subtitle) <= 1:
                # subtitle can't be just a single character
                subtitle = None

        if extra_crossref:
            extra["crossref"] = extra_crossref

        re = ReleaseEntity(
            work_id=None,
            container_id=container_id,
            title=title,
            subtitle=subtitle,
            original_title=original_title,
            release_type=release_type,
            release_stage=release_stage,
            release_date=release_date,
            release_year=release_year,
            publisher=publisher,
            ext_ids=fatcat_openapi_client.ReleaseExtIds(
                doi=doi,
                isbn13=isbn13,
            ),
            volume=clean_str(obj.get("volume")),
            issue=clean_str(obj.get("issue")),
            pages=clean_str(obj.get("page")),
            language=clean_str(obj.get("language")),
            license_slug=license_slug,
            extra=extra or None,
            abstracts=abstracts or None,
            contribs=contribs or None,
            refs=refs or None,
        )
        return re
Пример #11
0
def parse_jalc_persons(raw_persons: List[Any]) -> List[ReleaseContrib]:
    """
    For the most part, JALC DC names are in either japanese or english. The
    two common patterns are a list alternating between the two (in which case
    the names are translations), or all in one language or the other.

    Because dublin core is a projection tossing away a bunch of context, the
    other cases are hard to disambiguate. There are also some cases with Korean
    and other languages mixed in. This crude method doesn't handle everything
    right; it tries to just get the two common patterns correct. Sorry humans!

    Edge cases for this function:
    - 10.11316/jpsgaiyo.56.1.4.0_757_3 <= all english, some japanese, works
    - 10.14988/pa.2017.0000013531 <= complex, not japanese/english, mixed
    - 10.15036/arerugi.62.1407_1 <= one japanese, two english; fails
    - 10.14988/pa.2017.0000007327 <= ambiguous; translator in jpn/eng
    """

    persons = []

    # first parse out into language-agnostic dics
    for raw in raw_persons:
        name = raw.find("name") or None
        if name:
            name = clean_str(name.get_text().replace("\n", " "))
        surname = raw.find("familyName") or None
        if surname:
            surname = clean_str(surname.get_text().replace("\n", " "))
        given_name = raw.find("givenName") or None
        if given_name:
            given_name = clean_str(given_name.get_text().replace("\n", " "))
        lang = "en"
        if is_cjk(name):
            lang = "ja"
        if lang == "en" and surname and given_name:
            # english names order is flipped
            name = "{} {}".format(given_name, surname)
        rc = ReleaseContrib(raw_name=name,
                            surname=surname,
                            given_name=given_name,
                            role="author")
        # add an extra hint field; won't end up in serialized object
        rc._lang = lang
        persons.append(rc)

    if not persons:
        return []

    if all([p._lang == "en"
            for p in persons]) or all([p._lang == "ja" for p in persons]):
        # all english names, or all japanese names
        return persons

    # for debugging
    # if len([1 for p in persons if p._lang == 'en']) != len([1 for p in persons if p._lang == 'ja']):
    #    print("INTERESTING: {}".format(persons[0]))

    start_lang = persons[0]._lang
    contribs = []
    for p in persons:
        if p._lang == start_lang:
            contribs.append(p)
        else:
            if p._lang == "en" and contribs[-1]._lang == "ja":
                eng = p
                jpn = contribs[-1]
            elif p._lang == "ja" and contribs[-1]._lang == "en":
                eng = contribs[-1]
                jpn = p
            else:
                # give up and just add as another author
                contribs.append(p)
                continue
            eng.extra = {
                "original_name": {
                    "lang": jpn._lang,
                    "raw_name": jpn.raw_name,
                    "given_name": jpn.given_name,
                    "surname": jpn.surname,
                },
            }
            contribs[-1] = eng
    return contribs
Пример #12
0
    def parse_record(self, record: Any) -> Optional[ReleaseEntity]:
        """
        record is a beautiful soup object
        returns a ReleaseEntity, or None

        In JALC metadata, both English and Japanese records are given for most
        fields.
        """

        extra: Dict[str, Any] = dict()
        extra_jalc: Dict[str, Any] = dict()

        titles = record.find_all("title")
        if not titles:
            return None
        title = titles[0].get_text().replace("\n", " ").strip()
        original_title = None
        if title.endswith("."):
            title = title[:-1]
        if len(titles) > 1:
            original_title = titles[1].get_text().replace("\n", " ").strip()
            if original_title.endswith("."):
                original_title = original_title[:-1]

        doi = None
        if record.doi:
            doi = clean_doi(record.doi.string.strip().lower())
            # TODO: following code is redundant with clean_doi()
            if not doi:
                return None
            if doi.startswith("http://dx.doi.org/"):
                doi = doi.replace("http://dx.doi.org/", "")
            elif doi.startswith("https://dx.doi.org/"):
                doi = doi.replace("https://dx.doi.org/", "")
            elif doi.startswith("http://doi.org/"):
                doi = doi.replace("http://doi.org/", "")
            elif doi.startswith("https://doi.org/"):
                doi = doi.replace("https://doi.org/", "")
            if not (doi.startswith("10.") and "/" in doi):
                sys.stderr.write("bogus JALC DOI: {}\n".format(doi))
                doi = None
        if not doi:
            return None

        people = record.find_all("Person")
        contribs = parse_jalc_persons(people)

        for i, contrib in enumerate(contribs):
            if contrib.raw_name != "et al.":
                contrib.index = i

        release_year = None
        release_date = None
        date = record.date or None
        if date:
            date = date.string
            if len(date) == 10:
                release_date_date = datetime.datetime.strptime(
                    date["completed-date"], DATE_FMT).date()
                release_year = release_date_date.year
                release_date = release_date_date.isoformat()
            elif len(date) == 4 and date.isdigit():
                release_year = int(date)

        pages = None
        if record.startingPage and record.startingPage.string.strip():
            pages = record.startingPage.string.strip()
            if record.endingPage and record.endingPage.string.strip():
                pages = "{}-{}".format(pages, record.endingPage.string.strip())
        # double check to prevent "-" as pages
        if pages and pages.strip() == "-":
            pages = None

        volume = None
        if record.volume:
            volume = record.volume.string
        issue = None
        if record.number:
            # note: number/issue transform
            issue = record.number.string

        # container
        issn = None
        issn_list = record.find_all("issn")
        if issn_list:
            # if we wanted the other ISSNs, would also need to uniq the list.
            # But we only need one to lookup ISSN-L/container
            issn = issn_list[0].string
        if issn:
            issnl = self.issn2issnl(issn)
        else:
            issnl = None
        container_id = None
        if issnl:
            container_id = self.lookup_issnl(issnl)

        publisher = None
        container_name = None
        container_extra: Dict[str, Any] = dict()

        if record.publicationName:
            pubs = [
                p.get_text().replace("\n", " ").strip()
                for p in record.find_all("publicationName") if p.get_text()
            ]
            pubs = [clean_str(p) for p in pubs if p]
            assert pubs
            if len(pubs) > 1 and pubs[0] == pubs[1]:
                pubs = [pubs[0]]
            if len(pubs) > 1 and is_cjk(pubs[0]):
                # eng/jpn ordering is not reliable
                pubs = [pubs[1], pubs[0]]
            container_name = clean_str(pubs[0])
            if len(pubs) > 1:
                container_extra["original_name"] = clean_str(pubs[1])

        if record.publisher:
            pubs = [
                p.get_text().replace("\n", " ").strip()
                for p in record.find_all("publisher") if p.get_text()
            ]
            pubs = [p for p in pubs if p]
            if len(pubs) > 1 and pubs[0] == pubs[1]:
                pubs = [pubs[0]]
            if len(pubs) > 1 and is_cjk(pubs[0]):
                # ordering is not reliable
                pubs = [pubs[1], pubs[0]]
            if pubs:
                publisher = clean_str(pubs[0])
                if len(pubs) > 1:
                    container_extra["publisher_aliases"] = pubs[1:]

        if (container_id is None and self.create_containers
                and (issnl is not None) and container_name):
            # name, type, publisher, issnl
            # extra: issnp, issne, original_name, languages, country
            container_extra["country"] = "jp"
            container_extra["languages"] = ["ja"]
            ce = fatcat_openapi_client.ContainerEntity(
                name=container_name,
                container_type="journal",
                publisher=publisher,
                issnl=issnl,
                extra=(container_extra or None),
            )
            ce_edit = self.create_container(ce)
            container_id = ce_edit.ident
            # short-cut future imports in same batch
            self._issnl_id_map[issnl] = container_id

        # the vast majority of works are in japanese
        # TODO: any indication when *not* in japanese?
        lang = "ja"

        # reasonable default for this collection
        release_type = "article-journal"

        # extra:
        #   translation_of
        #   aliases
        #   container_name
        #   group-title
        # always put at least an empty dict here to indicate the DOI registrar
        # (informally)
        extra["jalc"] = extra_jalc

        title = clean_str(title)
        if not title:
            return None

        re = ReleaseEntity(
            work_id=None,
            title=title,
            original_title=clean_str(original_title),
            release_type=release_type,
            release_stage="published",
            release_date=release_date,
            release_year=release_year,
            ext_ids=fatcat_openapi_client.ReleaseExtIds(doi=doi, ),
            volume=volume,
            issue=issue,
            pages=pages,
            publisher=publisher,
            language=lang,
            # license_slug
            container_id=container_id,
            contribs=contribs,
            extra=extra,
        )
        return re
Пример #13
0
    def parse_datacite_creators(
        self,
        creators: List[Dict[str, Any]],
        role: str = "author",
        set_index: bool = True,
        doi: Optional[str] = None,
    ) -> List[ReleaseContrib]:
        """
        Parses a list of creators into a list of ReleaseContrib objects. Set
        set_index to False, if the index contrib field should be left blank.
        The doi parameter is only used for debugging.
        """
        # Contributors. Many nameIdentifierSchemes, we do not use (yet):
        # "attributes.creators[].nameIdentifiers[].nameIdentifierScheme":
        # ["LCNA", "GND", "email", "NAF", "OSF", "RRID", "ORCID",
        # "SCOPUS", "NRCPID", "schema.org", "GRID", "MGDS", "VIAF", "JACoW-ID"].
        contribs: List[ReleaseContrib] = []

        # Names, that should be ignored right away.
        name_blocklist = set(("Occdownload Gbif.Org", ))

        i: Optional[int] = 0
        for c in creators:
            if not set_index:
                i = None
            nameType = c.get("nameType", "") or ""
            if nameType in ("", "Personal"):
                creator_id = None
                for nid in c.get("nameIdentifiers", []) or []:
                    if not isinstance(nid, dict):
                        # see: fatcat-workers/issues/44035/
                        print(
                            "unexpected nameIdentifiers, expected list of dicts, got: {}"
                            .format(nid),
                            file=sys.stderr,
                        )
                        continue
                    name_scheme = nid.get("nameIdentifierScheme", "") or ""
                    if not name_scheme.lower() == "orcid":
                        continue
                    orcid = nid.get("nameIdentifier") or ""
                    orcid = orcid.replace("https://orcid.org/", "")
                    if not orcid:
                        continue
                    creator_id = self.lookup_orcid(orcid)
                    # TODO(martin): If creator_id is None, should we create creators?

                # If there are multiple affiliation strings, use the first one.
                affiliations = c.get("affiliation", []) or []
                raw_affiliation = None
                if len(affiliations) == 0:
                    raw_affiliation = None
                else:
                    raw_affiliation = clean_str(affiliations[0])

                name = c.get("name")
                given_name = c.get("givenName")
                surname = c.get("familyName")

                if name:
                    name = clean_str(name)
                if not any((name, given_name, surname)):
                    continue
                if not name:
                    name = "{} {}".format(given_name or "", surname
                                          or "").strip()
                if name in name_blocklist:
                    continue
                if name.lower() in UNKNOWN_MARKERS_LOWER:
                    continue
                # Unpack name, if we have an index form (e.g. 'Razis, Panos A') into 'Panos A razis'.
                if name:
                    name = index_form_to_display_name(name)

                if given_name:
                    given_name = clean_str(given_name)
                surname = clean_str(surname)

                # Perform a final assertion that name does not reduce to zero
                # (e.g. whitespace only name).
                if name:
                    name = name.strip()
                if not name:
                    continue

                if raw_affiliation == "":
                    continue

                extra = None

                # "DataManager", "DataCurator", "ContactPerson", "Distributor",
                # "RegistrationAgency", "Sponsor", "Researcher",
                # "RelatedPerson", "ProjectLeader", "Editor", "Other",
                # "ProjectMember", "Funder", "RightsHolder", "DataCollector",
                # "Supervisor", "Producer", "HostingInstitution", "ResearchGroup"
                contributorType = c.get("contributorType", "") or ""

                if contributorType:
                    extra = {"type": contributorType}

                rc = fatcat_openapi_client.ReleaseContrib(
                    creator_id=creator_id,
                    index=i,
                    raw_name=name,
                    given_name=given_name,
                    surname=surname,
                    role=role,
                    raw_affiliation=raw_affiliation,
                    extra=extra,
                )
                # Filter out duplicates early.
                if not contributor_list_contains_contributor(contribs, rc):
                    contribs.append(rc)
                    if i is not None:
                        i += 1
            elif nameType == "Organizational":
                name = c.get("name", "") or ""
                if name in UNKNOWN_MARKERS:
                    continue
                if len(name) < 3:
                    continue
                extra = {"organization": name}
                contribs.append(
                    fatcat_openapi_client.ReleaseContrib(index=i, extra=extra))
                if i is not None:
                    i += 1
            else:
                print("[{}] unknown name type: {}".format(doi, nameType),
                      file=sys.stderr)

        return contribs
Пример #14
0
    def parse_record(self, article: Any) -> Optional[ReleaseEntity]:

        journal_meta = article.front.find("journal-meta")
        article_meta = article.front.find("article-meta")

        extra: Dict[str, Any] = dict()
        extra_jstor: Dict[str, Any] = dict()

        release_type = JSTOR_TYPE_MAP.get(article["article-type"])
        title = article_meta.find("article-title")
        if title and title.get_text():
            title = title.get_text().replace("\n", " ").strip()
        elif title and not title.get_text():
            title = None

        if (not title and release_type and release_type.startswith("review")
                and article_meta.product.source):
            title = "Review: {}".format(
                article_meta.product.source.replace("\n", " ").get_text())

        if not title:
            return None

        if title.endswith("."):
            title = title[:-1]

        if "[Abstract]" in title:
            # TODO: strip the "[Abstract]" bit?
            release_type = "abstract"
        elif "[Editorial" in title:
            release_type = "editorial"
        elif "[Letter" in title:
            release_type = "letter"
        elif "[Poem" in title or "[Photograph" in title:
            release_type = None

        if title.startswith("[") and title.endswith("]"):
            # strip brackets if that is all that is there (eg, translation or non-english)
            title = title[1:-1]

        # JSTOR journal-id
        journal_ids = [j.string for j in journal_meta.find_all("journal-id")]
        if journal_ids:
            extra_jstor["journal_ids"] = journal_ids

        journal_title = journal_meta.find("journal-title").get_text().replace(
            "\n", " ")
        publisher = journal_meta.find("publisher-name").get_text().replace(
            "\n", " ")
        issn = journal_meta.find("issn")
        if issn:
            issn = issn.string
            if len(issn) == 8:
                issn = "{}-{}".format(issn[0:4], issn[4:8])
            else:
                assert len(issn) == 9

        issnl = self.issn2issnl(issn)
        container_id = None
        if issnl:
            container_id = self.lookup_issnl(issnl)

        # create container if it doesn't exist
        if (container_id is None and self.create_containers
                and (issnl is not None) and journal_title):
            ce = fatcat_openapi_client.ContainerEntity(
                issnl=issnl,
                publisher=publisher,
                container_type=self.map_container_type(release_type),
                name=clean_str(journal_title, force_xml=True),
            )
            ce_edit = self.create_container(ce)
            container_id = ce_edit.ident
            self._issnl_id_map[issnl] = container_id

        doi = article_meta.find("article-id", {"pub-id-type": "doi"})
        if doi:
            doi = clean_doi(doi.string.lower())
        else:
            doi = None

        jstor_id = article_meta.find("article-id", {"pub-id-type": "jstor"})
        if jstor_id:
            jstor_id = jstor_id.string.strip()
        if not jstor_id and doi:
            assert doi.startswith("10.2307/")
            jstor_id = doi.replace("10.2307/", "")
        assert jstor_id and int(jstor_id)

        contribs = []
        cgroup = article_meta.find("contrib-group")
        if cgroup:
            for c in cgroup.find_all("contrib"):
                given = c.find("given-names")
                if given:
                    given = clean_str(given.get_text().replace("\n", " "))
                surname = c.find("surname")
                if surname:
                    surname = clean_str(surname.get_text().replace("\n", " "))
                raw_name = c.find("string-name")
                if raw_name:
                    raw_name = clean_str(raw_name.get_text().replace(
                        "\n", " "))

                if not raw_name:
                    if given and surname:
                        raw_name = "{} {}".format(given, surname)
                    elif surname:
                        raw_name = surname

                role = JSTOR_CONTRIB_MAP.get(c.get("contrib-type", "author"))
                if not role and c.get("contrib-type"):
                    sys.stderr.write("NOT IN JSTOR_CONTRIB_MAP: {}\n".format(
                        c["contrib-type"]))
                contribs.append(
                    fatcat_openapi_client.ReleaseContrib(
                        role=role,
                        raw_name=raw_name,
                        given_name=given,
                        surname=surname,
                    ))

        for i, contrib in enumerate(contribs):
            if contrib.raw_name != "et al.":
                contrib.index = i

        release_year = None
        release_date = None
        pub_date = article_meta.find("pub-date")
        if pub_date and pub_date.year:
            release_year = int(pub_date.year.string)
            if pub_date.month and pub_date.day:
                release_date = datetime.date(release_year,
                                             int(pub_date.month.string),
                                             int(pub_date.day.string))
                if release_date.day == 1 and release_date.month == 1:
                    # suspect jan 1st dates get set by JSTOR when actual
                    # date not known (citation needed), so drop them
                    release_date = None

        volume = None
        if article_meta.volume:
            volume = article_meta.volume.string or None

        issue = None
        if article_meta.issue:
            issue = article_meta.issue.string or None

        pages = None
        if article_meta.find("page-range"):
            pages = article_meta.find("page-range").string
        elif article_meta.fpage:
            pages = article_meta.fpage.string

        language = None
        cm = article_meta.find("custom-meta")
        if cm.find("meta-name").string == "lang":
            language = cm.find("meta-value").string.split()[0]
            language = LANG_MAP_MARC.get(language)
            if not language:
                warnings.warn("MISSING MARC LANG: {}".format(
                    cm.find("meta-value").string))

        # JSTOR issue-id
        if article_meta.find("issue-id"):
            issue_id = clean_str(article_meta.find("issue-id").string)
            if issue_id:
                extra_jstor["issue_id"] = issue_id

        # everything in JSTOR is published
        release_stage = "published"

        # extra:
        #   withdrawn_date
        #   translation_of
        #   subtitle
        #   aliases
        #   container_name
        #   group-title
        #   pubmed: retraction refs
        if extra_jstor:
            extra["jstor"] = extra_jstor

        re = fatcat_openapi_client.ReleaseEntity(
            # work_id
            title=title,
            # original_title
            release_type=release_type,
            release_stage=release_stage,
            release_date=release_date,
            release_year=release_year,
            ext_ids=fatcat_openapi_client.ReleaseExtIds(
                doi=doi,
                jstor=jstor_id,
            ),
            volume=volume,
            issue=issue,
            pages=pages,
            publisher=publisher,
            language=language,
            # license_slug
            # content, mimetype, lang
            # abstracts=abstracts,
            contribs=contribs,
            # key, year, container_name, title, locator
            # extra: volume, authors, issue, publisher, identifiers
            # refs=refs,
            #   name, type, publisher, issnl
            #   extra: issnp, issne, original_name, languages, country
            container_id=container_id,
            extra=extra or None,
        )
        return re
Пример #15
0
    def parse_record(self, obj: Dict[str, Any]) -> Optional[ReleaseEntity]:
        """
        bibjson {
            abstract (string, optional),
            author (Array[bibjson.author], optional),
            identifier (Array[bibjson.identifier]),
            journal (bibjson.journal, optional),
            keywords (Array[string], optional),
            link (Array[bibjson.link], optional),
            month (string, optional),
            subject (Array[bibjson.subject], optional),
            title (string),
            year (string, optional)
        }
        bibjson.journal {
            country (string, optional),
            end_page (string, optional),
            language (Array[string], optional),
            license (Array[bibjson.journal.license], optional),
            number (string, optional),
            publisher (string, optional),
            start_page (string, optional),
            title (string, optional),
            volume (string, optional)
        }
        """

        if not obj or not isinstance(obj, dict) or "bibjson" not in obj:
            self.counts["skip-empty"] += 1
            return None

        bibjson = obj["bibjson"]

        title = clean_str(bibjson.get("title"), force_xml=True)
        if not title:
            self.counts["skip-title"] += 1
            return False

        container_name = clean_str(bibjson["journal"]["title"])
        container_id = None
        # NOTE: 'issns' not documented in API schema
        for issn in bibjson["journal"]["issns"]:
            issnl = self.issn2issnl(issn)
            if issnl:
                container_id = self.lookup_issnl(issnl)
            if container_id:
                # don't store container_name when we have an exact match
                container_name = None
                break

        volume = clean_str(bibjson["journal"].get("volume"))
        # NOTE: this schema seems to use "number" as "issue number"
        issue = clean_str(bibjson["journal"].get("number"))
        publisher = clean_str(bibjson["journal"].get("publisher"))

        try:
            release_year: Optional[int] = int(bibjson.get("year"))
        except (TypeError, ValueError):
            release_year = None
        release_month = parse_month(clean_str(bibjson.get("month")))

        # block bogus far-future years/dates
        if release_year is not None and (release_year > (self.this_year + 5)
                                         or release_year < 1000):
            release_month = None
            release_year = None

        license_slug = self.doaj_license_slug(
            bibjson["journal"].get("license"))
        country = parse_country_name(bibjson["journal"].get("country"))
        language = None
        for raw in bibjson["journal"].get("language") or []:
            language = parse_lang_name(raw)
            if language:
                break

        # pages
        # NOTE: error in API docs? seems like start_page not under 'journal' object
        start_page = clean_str(
            bibjson["journal"].get("start_page")) or clean_str(
                bibjson.get("start_page"))
        end_page = clean_str(bibjson["journal"].get("end_page")) or clean_str(
            bibjson.get("end_page"))
        pages: Optional[str] = None
        if start_page and end_page:
            pages = f"{start_page}-{end_page}"
        elif start_page:
            pages = start_page

        doaj_article_id = obj["id"].lower()
        ext_ids = self.doaj_ext_ids(bibjson["identifier"], doaj_article_id)
        abstracts = self.doaj_abstracts(bibjson) or []
        contribs = self.doaj_contribs(bibjson.get("author") or []) or []

        # DOAJ-specific extra
        doaj_extra: Dict[str, Any] = dict()
        if bibjson.get("subject"):
            doaj_extra["subject"] = bibjson.get("subject")
        if bibjson.get("keywords"):
            doaj_extra["keywords"] = [
                k for k in [clean_str(s) for s in bibjson.get("keywords")] if k
            ]

        # generic extra
        extra: Dict[str, Any] = dict()
        if country:
            extra["country"] = country
        if not container_id and container_name:
            extra["container_name"] = container_name
        if release_year and release_month:
            # TODO: schema migration
            extra["release_month"] = release_month

        if doaj_extra:
            extra["doaj"] = doaj_extra

        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            container_id=container_id,
            release_type="article-journal",
            release_stage="published",
            title=title,
            release_year=release_year,
            # release_date,
            publisher=publisher,
            ext_ids=ext_ids,
            contribs=contribs or None,
            volume=volume,
            issue=issue,
            pages=pages,
            language=language,
            abstracts=abstracts or None,
            extra=extra or None,
            license_slug=license_slug,
        )
        re = self.biblio_hacks(re)

        # TODO: filter out some of these by publishers which are known to
        # register DOIs. eg, PLOS, maybe others

        return re
Пример #16
0
    def parse_record(self, xml_elem):
        """
        - title
            => may contain <i>, <sub>, <sup>, <tt>
        - journal (abbrev?)
        - volume, pages, number (number -> issue)
        - publisher
        - year
            => for conferences, year of conference not of publication
        - month
        - crossref (from inproceedings to specific proceedings volume)
        - booktitle
            => for inproceedings, this is the name of conference or workshop. acronym.
        - isbn
        """

        dblp_key = xml_elem.get('key')
        if not dblp_key:
            self.counts['skip-empty-key'] += 1
            return False
        dblp_key_type = dblp_key.split('/')[0]

        # dblp_prefix may be used for container lookup
        dblp_prefix = None
        if dblp_key_type in ('journals', 'conf'):
            dblp_prefix = '/'.join(dblp_key.split('/')[:2])
        elif dblp_key_type in ('series', 'reference', 'tr', 'books'):
            dblp_prefix = '/'.join(dblp_key.split('/')[:-1])

        publtype = xml_elem.get('publtype') or None

        dblp_type = xml_elem.name
        if dblp_type not in self.ELEMENT_TYPES:
            self.counts[f'skip-dblp-type:{dblp_type}'] += 1

        if dblp_key_type in ('homepages', 'persons', 'dblpnote'):
            self.counts['skip-key-type'] += 1
            return False

        if dblp_key.startswith('journals/corr/'):
            self.counts['skip-arxiv-corr'] += 1
            return False

        title = clean_str(" ".join(xml_elem.title.stripped_strings), force_xml=True)
        if not title:
            self.counts['skip-title'] += 1
            return False
        if title.endswith('.'):
            title = title[:-1]

        release_type = None
        release_stage = 'published'
        withdrawn_status = None

        # primary releae_type detection: type of XML element, then prefix of key for granularity
        if dblp_type == 'article':
            release_type = 'article'
            if dblp_key_type == 'journals' and publtype != 'informal':
                release_type = 'article-journal'
            elif dblp_key_type == 'tr':
                release_type = 'report'
            elif title.startswith("Review:"):
                release_type = 'review'
        elif dblp_type == 'inproceedings':
            release_type = 'paper-conference'
        elif dblp_type == 'book':
            release_type = 'book'
        elif dblp_type == 'incollection':
            # XXX: part vs. chapter?
            release_type = 'chapter'
        elif dblp_type == 'data':
            release_type = 'dataset'
        elif dblp_type in ('mastersthesis', 'phdthesis'):
            release_type = 'thesis'

        # overrides/extensions of the above
        if publtype == 'informal':
            # for conferences, seems to indicate peer-review status
            # for journals, seems to indicate things like book reviews; split out above
            pass
        elif publtype == 'encyclopedia':
            release_type = 'entry-encyclopedia'
        elif publtype == 'edited':
            # XXX: article?
            release_type = 'editorial'
        elif publtype == 'data':
            release_type = 'dataset'
        elif publtype == 'data':
            release_type = 'dataset'
        elif publtype == 'software':
            release_type = 'software'
        elif publtype == 'widthdrawn':
            withdrawn_status = 'widthdrawn'
        elif publtype == 'survey':
            # XXX: flag as a review/survey article?
            pass

        #print((release_type, dblp_type, dblp_key_type, publtype), file=sys.stderr)

        container_name = None
        booktitle = clean_str(xml_elem.booktitle and xml_elem.booktitle.text)
        series = clean_str(xml_elem.series and xml_elem.series.text)

        if xml_elem.journal:
            container_name = clean_str(xml_elem.journal.text)

        container_id = None
        if dblp_prefix:
            container_id = self.lookup_dblp_prefix(dblp_prefix)
            # note: we will skip later if couldn't find prefix

        publisher = clean_str(xml_elem.publisher and xml_elem.publisher.text)
        volume = clean_str(xml_elem.volume and xml_elem.volume.text)
        issue = clean_str(xml_elem.number and xml_elem.number.text)
        pages = clean_str(xml_elem.pages and xml_elem.pages.text)
        release_year = clean_str(xml_elem.year and xml_elem.year.text)
        if release_year and release_year.isdigit():
            release_year = int(release_year)
        else:
            release_year = None
        release_month = parse_month(clean_str(xml_elem.month and xml_elem.month.text))
        isbn = clean_isbn13(xml_elem.isbn and xml_elem.isbn.text)
        part_of_key = clean_str(xml_elem.crossref and xml_elem.crossref.text)

        # block bogus far-future years/dates
        if release_year is not None and (release_year > (self.this_year + 5) or release_year < 1000):
            release_month = None
            release_year = None

        contribs = self.dblp_contribs(xml_elem or [])
        ext_ids = self.dblp_ext_ids(xml_elem, dblp_key)
        if isbn:
            ext_ids.isbn13 = isbn
        if ext_ids.doi:
            self.counts['has-doi'] += 1

        # dblp-specific extra
        dblp_extra = dict(type=dblp_type)
        note = clean_str(xml_elem.note and xml_elem.note.text)
        if note and not 'base-search.net' in note:
            dblp_extra['note'] = note
        if part_of_key:
            dblp_extra['part_of_key'] = part_of_key

        # generic extra
        extra = dict()
        if not container_id and container_name:
            extra['container_name'] = container_name

        if series and (dblp_key_type == 'series' or dblp_type == 'book'):
            extra['series-title'] = series
        elif series:
            dblp_extra['series'] = series

        if booktitle and dblp_key_type == 'series':
            extra['container-title'] = booktitle
        elif booktitle and dblp_key_type == 'conf':
            extra['event'] = booktitle
        elif booktitle:
            dblp_extra['booktitle'] = booktitle

        if release_year and release_month:
            # TODO: release_month schema migration
            extra['release_month'] = release_month

        if dblp_extra:
            extra['dblp'] = dblp_extra
        if not extra:
            extra = None

        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            container_id=container_id,
            release_type=release_type,
            release_stage=release_stage,
            withdrawn_status=withdrawn_status,
            title=title,
            release_year=release_year,
            #release_date,
            publisher=publisher,
            ext_ids=ext_ids,
            contribs=contribs,
            volume=volume,
            issue=issue,
            pages=pages,
            extra=extra,
        )
        re = self.biblio_hacks(re)

        if self.dump_json_mode:
            re_dict = entity_to_dict(re, api_client=self.api.api_client)
            re_dict['_dblp_ee_urls'] = self.dblp_ext_urls(xml_elem)
            re_dict['_dblp_prefix'] = dblp_prefix
            print(json.dumps(re_dict, sort_keys=True))
            return False

        if not re.container_id:
            self.counts["skip-dblp-container-missing"] += 1
            return False
        return re
Пример #17
0
    def parse_record(self, obj):
        """
        bibjson {
            abstract (string, optional),
            author (Array[bibjson.author], optional),
            identifier (Array[bibjson.identifier]),
            journal (bibjson.journal, optional),
            keywords (Array[string], optional),
            link (Array[bibjson.link], optional),
            month (string, optional),
            subject (Array[bibjson.subject], optional),
            title (string),
            year (string, optional)
        }
        bibjson.journal {
            country (string, optional),
            end_page (string, optional),
            language (Array[string], optional),
            license (Array[bibjson.journal.license], optional),
            number (string, optional),
            publisher (string, optional),
            start_page (string, optional),
            title (string, optional),
            volume (string, optional)
        }
        """

        if not obj or not isinstance(obj, dict) or not 'bibjson' in obj:
            self.counts['skip-empty'] += 1
            return None

        bibjson = obj['bibjson']

        title = clean_str(bibjson.get('title'), force_xml=True)
        if not title:
            self.counts['skip-title'] += 1
            return False

        container_name = clean_str(bibjson['journal']['title'])
        container_id = None
        # NOTE: 'issns' not documented in API schema
        for issn in bibjson['journal']['issns']:
            issnl = self.issn2issnl(issn)
            if issnl:
                container_id = self.lookup_issnl(self.issn2issnl(issn))
            if container_id:
                # don't store container_name when we have an exact match
                container_name = None
                break

        volume = clean_str(bibjson['journal'].get('volume'))
        # NOTE: this schema seems to use "number" as "issue number"
        issue = clean_str(bibjson['journal'].get('number'))
        publisher = clean_str(bibjson['journal'].get('publisher'))

        try:
            release_year = int(bibjson.get('year'))
        except (TypeError, ValueError):
            release_year = None
        release_month = parse_month(clean_str(bibjson.get('month')))

        # block bogus far-future years/dates
        if release_year is not None and (release_year > (self.this_year + 5)
                                         or release_year < 1000):
            release_month = None
            release_year = None

        license_slug = self.doaj_license_slug(
            bibjson['journal'].get('license'))
        country = parse_country_name(bibjson['journal'].get('country'))
        language = None
        for raw in bibjson['journal'].get('language') or []:
            language = parse_lang_name(raw)
            if language:
                break

        # pages
        # NOTE: error in API docs? seems like start_page not under 'journal' object
        start_page = clean_str(
            bibjson['journal'].get('start_page')) or clean_str(
                bibjson.get('start_page'))
        end_page = clean_str(bibjson['journal'].get('end_page')) or clean_str(
            bibjson.get('end_page'))
        pages: Optional[str] = None
        if start_page and end_page:
            pages = f"{start_page}-{end_page}"
        elif start_page:
            pages = start_page

        doaj_article_id = obj['id'].lower()
        ext_ids = self.doaj_ext_ids(bibjson['identifier'], doaj_article_id)
        abstracts = self.doaj_abstracts(bibjson)
        contribs = self.doaj_contribs(bibjson.get('author') or [])

        # DOAJ-specific extra
        doaj_extra = dict()
        if bibjson.get('subject'):
            doaj_extra['subject'] = bibjson.get('subject')
        if bibjson.get('keywords'):
            doaj_extra['keywords'] = [
                k for k in [clean_str(s) for s in bibjson.get('keywords')] if k
            ]

        # generic extra
        extra = dict()
        if country:
            extra['country'] = country
        if not container_id and container_name:
            extra['container_name'] = container_name
        if release_year and release_month:
            # TODO: schema migration
            extra['release_month'] = release_month

        if doaj_extra:
            extra['doaj'] = doaj_extra
        if not extra:
            extra = None

        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            container_id=container_id,
            release_type='article-journal',
            release_stage='published',
            title=title,
            release_year=release_year,
            #release_date,
            publisher=publisher,
            ext_ids=ext_ids,
            contribs=contribs,
            volume=volume,
            issue=issue,
            pages=pages,
            language=language,
            abstracts=abstracts,
            extra=extra,
            license_slug=license_slug,
        )
        re = self.biblio_hacks(re)
        return re
Пример #18
0
    def parse_record(self, a: Any) -> ReleaseEntity:

        medline = a.MedlineCitation
        # PubmedData isn't required by DTD, but seems to always be present
        pubmed = a.PubmedData
        extra = dict()
        extra_pubmed = dict()

        identifiers = pubmed.ArticleIdList
        pmid = medline.PMID.string.strip()
        doi = identifiers.find("ArticleId", IdType="doi")
        if doi and doi.string:
            doi = clean_doi(doi.string)
        else:
            doi = None

        pmcid = identifiers.find("ArticleId", IdType="pmc")
        if pmcid:
            pmcid = clean_pmcid(pmcid.string.strip().upper())

        release_type = None
        pub_types = []
        for pub_type in medline.Article.PublicationTypeList.find_all("PublicationType"):
            pub_types.append(pub_type.string)
            if pub_type.string in PUBMED_RELEASE_TYPE_MAP:
                release_type = PUBMED_RELEASE_TYPE_MAP[pub_type.string]
                break
        if pub_types:
            extra_pubmed["pub_types"] = pub_types
        if medline.Article.PublicationTypeList.find(string="Retraction of Publication"):
            release_type = "retraction"
            retraction_of = medline.find("CommentsCorrections", RefType="RetractionOf")
            if retraction_of:
                if retraction_of.RefSource:
                    extra_pubmed["retraction_of_raw"] = retraction_of.RefSource.string
                if retraction_of.PMID:
                    extra_pubmed["retraction_of_pmid"] = retraction_of.PMID.string

        # everything in medline is published
        release_stage = "published"
        if medline.Article.PublicationTypeList.find(string="Corrected and Republished Article"):
            release_stage = "updated"
        if medline.Article.PublicationTypeList.find(string="Retraction of Publication"):
            release_stage = "retraction"

        withdrawn_status = None
        if medline.Article.PublicationTypeList.find(string="Retracted Publication"):
            withdrawn_status = "retracted"
        elif medline.find("CommentsCorrections", RefType="ExpressionOfConcernIn"):
            withdrawn_status = "concern"

        pages = medline.find("MedlinePgn")
        if pages:
            pages = pages.string

        title = medline.Article.ArticleTitle.get_text()  # always present
        if title:
            title = title.replace("\n", " ")
            if title.endswith("."):
                title = title[:-1]
            # this hides some "special" titles, but the vast majority are
            # translations; translations don't always include the original_title
            if title.startswith("[") and title.endswith("]"):
                title = title[1:-1]
        else:
            # will filter out later
            title = None

        original_title = medline.Article.find("VernacularTitle", recurse=False)
        if original_title:
            original_title = original_title.get_text() or None
            original_title = original_title.replace("\n", " ")
            if original_title and original_title.endswith("."):
                original_title = original_title[:-1]

        if original_title and not title:
            # if we only have an "original" title, but not translated/english
            # title, sub in the original title so the entity can be created
            title = original_title
            original_title = None

        # TODO: happening in alpha order, not handling multi-language well.
        language = medline.Article.Language
        if language:
            language = language.get_text()
            if language in ("und", "un"):
                # "undetermined"
                language = None
            else:
                language = LANG_MAP_MARC.get(language)
                if not language and not (medline.Article.Language.get_text() in LANG_MAP_MARC):
                    warnings.warn(
                        "MISSING MARC LANG: {}".format(medline.Article.Language.string)
                    )

        ### Journal/Issue Metadata
        # MedlineJournalInfo is always present
        issnl = None
        container_id = None
        container_name = None
        container_extra = dict()
        mji = medline.MedlineJournalInfo
        if mji.find("Country"):
            country_name = mji.Country.string.strip()
            country_code = COUNTRY_NAME_MAP.get(country_name)
            if country_code:
                container_extra["country"] = country_code
            elif country_name:
                container_extra["country_name"] = country_name
        if mji.find("ISSNLinking"):
            issnl = mji.ISSNLinking.string

        journal = medline.Article.Journal
        issnp = journal.find("ISSN", IssnType="Print")
        if issnp:
            issnp = clean_issn(issnp.string)
        else:
            issnp = None

        if not issnl and issnp:
            issnl = self.issn2issnl(issnp)
        else:
            issnl = None

        if issnl:
            container_id = self.lookup_issnl(issnl)

        pub_date = medline.Article.find("ArticleDate")
        if not pub_date:
            pub_date = journal.PubDate
        if not pub_date:
            pub_date = journal.JournalIssue.PubDate
        release_date: Optional[str] = None
        release_year: Optional[int] = None
        if pub_date.Year:
            release_year = int(pub_date.Year.string)
            if pub_date.find("Day") and pub_date.find("Month"):
                try:
                    release_date_date = datetime.date(
                        release_year,
                        MONTH_ABBR_MAP[pub_date.Month.string],
                        int(pub_date.Day.string),
                    )
                    release_date = release_date_date.isoformat()
                except ValueError as ve:
                    print("bad date, skipping: {}".format(ve), file=sys.stderr)
                    release_date = None
        elif pub_date.MedlineDate:
            medline_date = pub_date.MedlineDate.string.strip()
            if len(medline_date) >= 4 and medline_date[:4].isdigit():
                release_year = int(medline_date[:4])
                if release_year < 1300 or release_year > 2040:
                    print(
                        "bad medline year, skipping: {}".format(release_year), file=sys.stderr
                    )
                    release_year = None
            else:
                print(
                    "unparsable medline date, skipping: {}".format(medline_date),
                    file=sys.stderr,
                )

        if journal.find("Title"):
            container_name = journal.Title.get_text()

        if (
            container_id is None
            and self.create_containers
            and (issnl is not None)
            and container_name
        ):
            # name, type, publisher, issnl
            # extra: original_name, languages, country
            ce = fatcat_openapi_client.ContainerEntity(
                name=container_name,
                container_type="journal",
                # NOTE: publisher not included
                issnl=issnl,
                issnp=issnp,
                extra=(container_extra or None),
            )
            ce_edit = self.create_container(ce)
            container_id = ce_edit.ident
            self._issnl_id_map[issnl] = container_id

        ji = journal.JournalIssue
        volume = None
        if ji.find("Volume"):
            volume = ji.Volume.string
        issue = None
        if ji.find("Issue"):
            issue = ji.Issue.string

        ### Abstracts
        # "All abstracts are in English"
        abstracts = []
        primary_abstract = medline.find("Abstract")
        if primary_abstract and primary_abstract.AbstractText.get("NlmCategory"):
            joined = "\n".join(
                [m.get_text() for m in primary_abstract.find_all("AbstractText")]
            )
            abst = fatcat_openapi_client.ReleaseAbstract(
                content=joined,
                mimetype="text/plain",
                lang="en",
            )
            if abst.content:
                abstracts.append(abst)
        elif primary_abstract:
            for abstract in primary_abstract.find_all("AbstractText"):
                abst = fatcat_openapi_client.ReleaseAbstract(
                    content=abstract.get_text().strip(),
                    mimetype="text/plain",
                    lang="en",
                )
                if abst.content:
                    abstracts.append(abst)
                if abstract.find("math"):
                    abst = fatcat_openapi_client.ReleaseAbstract(
                        # strip the <AbstractText> tags
                        content=str(abstract)[14:-15],
                        mimetype="application/mathml+xml",
                        lang="en",
                    )
                    if abst.content:
                        abstracts.append(abst)
        other_abstracts = medline.find_all("OtherAbstract")
        for other in other_abstracts:
            lang: Optional[str] = "en"
            if other.get("Language"):
                lang = LANG_MAP_MARC.get(other["Language"])
            abst = fatcat_openapi_client.ReleaseAbstract(
                content=other.AbstractText.get_text().strip(),
                mimetype="text/plain",
                lang=lang,
            )
            if abst.content:
                abstracts.append(abst)

        ### Contribs
        contribs = []
        if medline.AuthorList:
            for author in medline.AuthorList.find_all("Author"):
                creator_id = None
                given_name = None
                surname = None
                raw_name = None
                if author.ForeName:
                    given_name = author.ForeName.get_text().replace("\n", " ")
                if author.LastName:
                    surname = author.LastName.get_text().replace("\n", " ")
                if given_name and surname:
                    raw_name = "{} {}".format(given_name, surname)
                elif surname:
                    raw_name = surname
                if not raw_name and author.CollectiveName and author.CollectiveName.get_text():
                    raw_name = author.CollectiveName.get_text().replace("\n", " ")
                contrib_extra = dict()
                orcid = author.find("Identifier", Source="ORCID")
                if orcid:
                    # needs re-formatting from, eg, "0000000179841889"
                    orcid = orcid.string
                    if orcid.startswith("http://orcid.org/"):
                        orcid = orcid.replace("http://orcid.org/", "")
                    elif orcid.startswith("https://orcid.org/"):
                        orcid = orcid.replace("https://orcid.org/", "")
                    elif "-" not in orcid:
                        orcid = "{}-{}-{}-{}".format(
                            orcid[0:4],
                            orcid[4:8],
                            orcid[8:12],
                            orcid[12:16],
                        )
                    creator_id = self.lookup_orcid(orcid)
                    contrib_extra["orcid"] = orcid
                affiliations = author.find_all("Affiliation")
                raw_affiliation = None
                if affiliations:
                    raw_affiliation = affiliations[0].get_text().replace("\n", " ")
                    if len(affiliations) > 1:
                        contrib_extra["more_affiliations"] = [
                            ra.get_text().replace("\n", " ") for ra in affiliations[1:]
                        ]
                if author.find("EqualContrib"):
                    # TODO: schema for this?
                    contrib_extra["equal"] = True
                contribs.append(
                    fatcat_openapi_client.ReleaseContrib(
                        raw_name=raw_name,
                        given_name=given_name,
                        surname=surname,
                        role="author",
                        raw_affiliation=raw_affiliation,
                        creator_id=creator_id,
                        extra=contrib_extra,
                    )
                )

            if medline.AuthorList["CompleteYN"] == "N":
                contribs.append(fatcat_openapi_client.ReleaseContrib(raw_name="et al."))

        for i, contrib in enumerate(contribs):
            if contrib.raw_name != "et al.":
                contrib.index = i

        ### References
        refs = []
        if pubmed.ReferenceList:
            # note that Reference always exists within a ReferenceList, but
            # that there may be multiple ReferenceList (eg, sometimes one per
            # Reference)
            for ref in pubmed.find_all("Reference"):
                ref_extra: Dict[str, Any] = dict()
                ref_doi = ref.find("ArticleId", IdType="doi")
                if ref_doi:
                    ref_doi = clean_doi(ref_doi.string)
                ref_pmid = ref.find("ArticleId", IdType="pubmed")
                if ref_pmid:
                    ref_pmid = clean_pmid(ref_pmid.string)
                ref_release_id = None
                if ref_doi:
                    ref_extra["doi"] = ref_doi
                    if self.lookup_refs:
                        ref_release_id = self.lookup_doi(ref_doi)
                if ref_pmid:
                    ref_extra["pmid"] = ref_pmid
                    if self.lookup_refs:
                        ref_release_id = self.lookup_pmid(ref_pmid)
                ref_raw = ref.Citation
                if ref_raw:
                    ref_extra["unstructured"] = ref_raw.get_text()
                refs.append(
                    fatcat_openapi_client.ReleaseRef(
                        target_release_id=ref_release_id,
                        extra=ref_extra or None,
                    )
                )

        # extra:
        #   translation_of
        #   aliases
        #   container_name
        #   group-title
        #   pubmed: retraction refs
        if extra_pubmed:
            extra["pubmed"] = extra_pubmed

        title = clean_str(title)
        if not title:
            return None

        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            title=title,
            original_title=clean_str(original_title),
            release_type=release_type,
            release_stage=release_stage,
            release_date=release_date,
            release_year=release_year,
            withdrawn_status=withdrawn_status,
            ext_ids=fatcat_openapi_client.ReleaseExtIds(
                doi=doi,
                pmid=pmid,
                pmcid=pmcid,
                # isbn13     # never in Article
            ),
            volume=volume,
            issue=issue,
            pages=pages,
            # publisher  # not included?
            language=language,
            # license_slug   # not in MEDLINE
            abstracts=abstracts or None,
            contribs=contribs or None,
            refs=refs or None,
            container_id=container_id,
            extra=extra or None,
        )
        return re
Пример #19
0
    def parse_record(self, xml_elem: Any) -> Optional[ReleaseEntity]:
        """
        - title
            => may contain <i>, <sub>, <sup>, <tt>
        - journal (abbrev?)
        - volume, pages, number (number -> issue)
        - publisher
        - year
            => for conferences, year of conference not of publication
        - month
        - crossref (from inproceedings to specific proceedings volume)
        - booktitle
            => for inproceedings, this is the name of conference or workshop. acronym.
        - isbn
        """

        dblp_key = xml_elem.get("key")
        if not dblp_key:
            self.counts["skip-empty-key"] += 1
            return False
        dblp_key_type = dblp_key.split("/")[0]

        # dblp_prefix may be used for container lookup
        dblp_prefix = None
        if dblp_key_type in ("journals", "conf"):
            dblp_prefix = "/".join(dblp_key.split("/")[:2])
        elif dblp_key_type in ("series", "reference", "tr", "books"):
            dblp_prefix = "/".join(dblp_key.split("/")[:-1])

        publtype = xml_elem.get("publtype") or None

        dblp_type = xml_elem.name
        if dblp_type not in self.ELEMENT_TYPES:
            self.counts[f"skip-dblp-type:{dblp_type}"] += 1

        if dblp_key_type in ("homepages", "persons", "dblpnote"):
            self.counts["skip-key-type"] += 1
            return False

        if dblp_key.startswith("journals/corr/"):
            self.counts["skip-arxiv-corr"] += 1
            return False

        title = clean_str(" ".join(xml_elem.title.stripped_strings),
                          force_xml=True)
        if not title:
            self.counts["skip-title"] += 1
            return False
        if title.endswith("."):
            title = title[:-1]

        release_type = None
        release_stage = "published"
        withdrawn_status = None

        # primary releae_type detection: type of XML element, then prefix of key for granularity
        if dblp_type == "article":
            release_type = "article"
            if dblp_key_type == "journals" and publtype != "informal":
                release_type = "article-journal"
            elif dblp_key_type == "tr":
                release_type = "report"
            elif title.startswith("Review:"):
                release_type = "review"
        elif dblp_type == "inproceedings":
            release_type = "paper-conference"
        elif dblp_type == "book":
            release_type = "book"
        elif dblp_type == "incollection":
            # XXX: part vs. chapter?
            release_type = "chapter"
        elif dblp_type == "data":
            release_type = "dataset"
        elif dblp_type in ("mastersthesis", "phdthesis"):
            release_type = "thesis"

        # overrides/extensions of the above
        if publtype == "informal":
            # for conferences, seems to indicate peer-review status
            # for journals, seems to indicate things like book reviews; split out above
            pass
        elif publtype == "encyclopedia":
            release_type = "entry-encyclopedia"
        elif publtype == "edited":
            # XXX: article?
            release_type = "editorial"
        elif publtype == "data":
            release_type = "dataset"
        elif publtype == "data":
            release_type = "dataset"
        elif publtype == "software":
            release_type = "software"
        elif publtype == "widthdrawn":
            withdrawn_status = "widthdrawn"
        elif publtype == "survey":
            # XXX: flag as a review/survey article?
            pass

        # print((release_type, dblp_type, dblp_key_type, publtype), file=sys.stderr)

        container_name = None
        booktitle = clean_str(xml_elem.booktitle and xml_elem.booktitle.text)
        series = clean_str(xml_elem.series and xml_elem.series.text)

        if xml_elem.journal:
            container_name = clean_str(xml_elem.journal.text)

        container_id = None
        if dblp_prefix:
            container_id = self.lookup_dblp_prefix(dblp_prefix)
            # note: we will skip later if couldn't find prefix

        publisher = clean_str(xml_elem.publisher and xml_elem.publisher.text)
        volume = clean_str(xml_elem.volume and xml_elem.volume.text)
        issue = clean_str(xml_elem.number and xml_elem.number.text)
        pages = clean_str(xml_elem.pages and xml_elem.pages.text)
        release_year_str = clean_str(xml_elem.year and xml_elem.year.text)
        if release_year_str and release_year_str.isdigit():
            release_year: Optional[int] = int(release_year_str)
        else:
            release_year = None
        release_month = parse_month(
            clean_str(xml_elem.month and xml_elem.month.text))
        isbn = clean_isbn13(xml_elem.isbn and xml_elem.isbn.text)
        part_of_key = clean_str(xml_elem.crossref and xml_elem.crossref.text)

        # block bogus far-future years/dates
        if release_year is not None and (release_year > (self.this_year + 5)
                                         or release_year < 1000):
            release_month = None
            release_year = None

        contribs = self.dblp_contribs(xml_elem)
        ext_ids = self.dblp_ext_ids(xml_elem, dblp_key)
        if isbn:
            ext_ids.isbn13 = isbn
        if ext_ids.doi:
            self.counts["has-doi"] += 1

        # dblp-specific extra
        dblp_extra = dict(type=dblp_type)
        note = clean_str(xml_elem.note and xml_elem.note.text)
        if note and "base-search.net" not in note:
            dblp_extra["note"] = note
        if part_of_key:
            dblp_extra["part_of_key"] = part_of_key

        # generic extra
        extra: Dict[str, Any] = dict()
        if not container_id and container_name:
            extra["container_name"] = container_name

        if series and (dblp_key_type == "series" or dblp_type == "book"):
            extra["series-title"] = series
        elif series:
            dblp_extra["series"] = series

        if booktitle and dblp_key_type == "series":
            extra["container-title"] = booktitle
        elif booktitle and dblp_key_type == "conf":
            extra["event"] = booktitle
        elif booktitle:
            dblp_extra["booktitle"] = booktitle

        if release_year and release_month:
            # TODO: release_month schema migration
            extra["release_month"] = release_month

        if dblp_extra:
            extra["dblp"] = dblp_extra

        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            container_id=container_id,
            release_type=release_type,
            release_stage=release_stage,
            withdrawn_status=withdrawn_status,
            title=title,
            release_year=release_year,
            # release_date,
            publisher=publisher,
            ext_ids=ext_ids,
            contribs=contribs or None,
            volume=volume,
            issue=issue,
            pages=pages,
            extra=extra or None,
        )
        re = self.biblio_hacks(re)

        if self.dump_json_mode:
            re_dict = entity_to_dict(re, api_client=self.api.api_client)
            re_dict["_dblp_ee_urls"] = self.dblp_ext_urls(xml_elem)
            re_dict["_dblp_prefix"] = dblp_prefix
            print(json.dumps(re_dict, sort_keys=True))
            return False

        if not re.container_id:
            self.counts["skip-dblp-container-missing"] += 1
            return False
        return re
Пример #20
0
    def parse_record(self, obj: Dict[str, Any]) -> Optional[ReleaseEntity]:
        """
        Mapping datacite JSON to ReleaseEntity.
        """
        if not obj or not isinstance(obj, dict):
            return None
        if "attributes" not in obj:
            return None

        attributes = obj["attributes"]
        doi = clean_doi(attributes.get("doi", "").lower())

        if not doi:
            print("skipping record without a DOI", file=sys.stderr)
            return None

        if not str.isascii(doi):
            print("[{}] skipping non-ascii doi for now".format(doi))
            return None

        creators = attributes.get("creators", []) or []
        contributors = attributes.get("contributors", []) or [
        ]  # Much fewer than creators.

        contribs = self.parse_datacite_creators(creators, doi=doi)

        # Beside creators, we have contributors in datacite. Sample:
        # ContactPerson, DataCollector, DataCurator, DataManager, Distributor,
        # Editor, Funder, HostingInstitution, Other, Producer, ProjectLeader,
        # ProjectMember, RelatedPerson, ResearchGroup, Researcher,
        # RightsHolder, Sponsor, Supervisor
        #
        # Datacite schema:
        # https://schema.datacite.org/meta/kernel-4.3/doc/DataCite-MetadataKernel_v4.3.pdf#page=32
        # -- could be used as a form of controlled vocab?
        #
        # Currently (07/2020) in release_contrib:
        #
        # select count(*), role from release_contrib group by role;
        #    count   |    role
        # -----------+------------
        #  500269665 | author
        #    4386563 | editor
        #      17871 | translator
        #   10870584 |
        # (4 rows)
        #
        # Related: https://guide.fatcat.wiki/entity_release.html -- role
        # (string, of a set): the type of contribution, from a controlled
        # vocabulary. TODO: vocabulary needs review.
        contribs_extra_contributors = self.parse_datacite_creators(
            contributors, set_index=False, doi=doi)

        # Unfortunately, creators and contributors might overlap, refs GH59.
        for cc in contribs_extra_contributors:
            if contributor_list_contains_contributor(contribs, cc):
                continue
            contribs.append(cc)

        # Title, may come with "attributes.titles[].titleType", like
        # "AlternativeTitle", "Other", "Subtitle", "TranslatedTitle"
        titles = attributes.get("titles", []) or []
        title, original_language_title, subtitle = parse_datacite_titles(
            titles)

        if title is None:
            print("[{}] skipping record w/o title: {}".format(doi, obj),
                  file=sys.stderr)
            return False

        title = clean_str(title)
        if not title:
            print("[{}] skipping record w/o title: {}".format(doi, obj),
                  file=sys.stderr)
            return False

        # check for blocklisted "spam", e.g. "FULL MOVIE"
        for rule in DATACITE_TITLE_SPAM_WORDGROUPS:
            seen = set()
            token_list: List[str] = rule.get("tokens") or []
            for token in token_list:
                if token in title.lower():
                    seen.add(token)
            if len(seen) >= rule["min"]:
                print("[{}] skipping spammy title: {}".format(doi, obj),
                      file=sys.stderr)
                return False

        if not subtitle:
            subtitle = None
        else:
            subtitle = clean_str(subtitle)

        # Dates. A few internal dates (registered, created, updated) and
        # published (0..2554). We try to work with typed date list, in
        # "attributes.dates[].dateType", values: "Accepted", "Available"
        # "Collected", "Copyrighted", "Created", "Issued", "Submitted",
        # "Updated", "Valid".
        release_date, release_month, release_year = parse_datacite_dates(
            attributes.get("dates", []))

        # block bogus far-future years/dates
        if release_year is not None and (release_year > (self.this_year + 5)
                                         or release_year < 1000):
            release_date = None
            release_month = None
            release_year = None

        # Some records do not use the "dates" field (e.g. micropub), but:
        # "attributes.published" or "attributes.publicationYear"
        if not any((release_date, release_month, release_year)):
            release_date, release_month, release_year = parse_single_date(
                attributes.get("publicationYear"))
            if not any((release_date, release_month, release_year)):
                release_date, release_month, release_year = parse_single_date(
                    attributes.get("published"))

        if not any((release_date, release_month, release_year)):
            print("[{}] record w/o date: {}".format(doi, obj), file=sys.stderr)

        # Start with clear stages, e.g. published. TODO(martin): we could
        # probably infer a bit more from the relations, e.g.
        # "IsPreviousVersionOf" or "IsNewVersionOf".
        release_stage: Optional[str] = "published"

        # TODO(martin): If 'state' is not 'findable' or 'isActive' is not true,
        # we might want something else than 'published'. See also:
        # https://support.datacite.org/docs/doi-states.

        # Publisher. A few NA values. A few bogus values.
        publisher = attributes.get("publisher")

        if publisher in UNKNOWN_MARKERS | set(("Unpublished", "Unknown")):
            publisher = None
            release_stage = None
        if publisher is not None and len(publisher) > 80:
            # Arbitrary magic value max length. TODO(martin): better heuristic,
            # but factored out; first we have to log misses. Example:
            # "ETH-Bibliothek Zürich, Bildarchiv / Fotograf: Feller,
            # Elisabeth, Empfänger, Unbekannt, Fotograf / Fel_041033-RE /
            # Unbekannt, Nutzungsrechte müssen durch den Nutzer abgeklärt
            # werden"
            publisher = None

        if publisher:
            publisher = clean_str(publisher)

        # Container. For the moment, only ISSN as container.
        container_id = None
        container_name = None

        container = attributes.get("container", {}) or {}
        if container.get("type") in DATACITE_CONTAINER_TYPE_MAP.keys():
            container_type = DATACITE_CONTAINER_TYPE_MAP.get(container["type"])
            if container.get("identifier") and container.get(
                    "identifierType") == "ISSN":
                issn = container.get("identifier")
                if issn and len(issn) == 8:
                    issn = issn[:4] + "-" + issn[4:]
                    issnl = self.issn2issnl(issn)
                else:
                    issnl = None
                if issnl is not None:
                    container_id = self.lookup_issnl(issnl)

                    if container_id is None and container.get("title"):
                        container_name = container.get("title")
                        if isinstance(container_name, list):
                            if len(container_name) > 0:
                                print("[{}] too many container titles: {}".
                                      format(doi, len(container_name)))
                                container_name = container_name[0]
                        assert isinstance(container_name, str)
                        ce = fatcat_openapi_client.ContainerEntity(
                            issnl=issnl,
                            container_type=container_type,
                            name=container_name,
                        )
                        ce_edit = self.create_container(ce)
                        container_id = ce_edit.ident
                        self._issnl_id_map[issnl] = container_id
                else:
                    # TODO(martin): factor this out into a testable function.
                    # TODO(martin): "container_name": "№1(1) (2018)" / 10.26087/inasan.2018.1.1.013
                    container_name = container.get("title")
                    if isinstance(container_name, list):
                        if len(container_name) > 0:
                            print("[{}] too many container titles: {}".format(
                                doi, len(container_name)))
                            container_name = container_name[0]

        # Exception: https://www.micropublication.org/, see: !MR24.
        if container_id is None and container_name is None:
            if publisher and publisher.lower().startswith("micropublication"):
                container_name = publisher

        # Volume and issue.
        volume = container.get("volume")
        issue = container.get("issue")

        if volume:
            volume = clean_str(volume)

        if issue:
            issue = clean_str(issue)

        # Pages.
        pages = None

        first_page = container.get("firstPage")
        last_page = container.get("lastPage")

        if first_page and last_page:
            try:
                _ = int(first_page) < int(last_page)
                pages = "{}-{}".format(first_page, last_page)
            except ValueError as err:  # noqa: F841
                # TODO(martin): This is more debug than info.
                # print('[{}] {}'.format(doi, err), file=sys.stderr)
                pass

        if not pages and first_page:
            pages = first_page

        # License.
        license_slug = None
        license_extra = []

        for lic in attributes.get("rightsList", []):
            slug = datacite_lookup_license_slug(lic.get("rightsUri"))
            if slug:
                license_slug = slug
            license_extra.append(lic)

        release_type = self.datacite_release_type(doi, attributes)

        # Language values are varied ("ger", "es", "English", "ENG", "en-us",
        # "other", ...). Try to crush it with langcodes: "It may sound to you
        # like langcodes solves a pretty boring problem. At one level, that's
        # right. Sometimes you have a boring problem, and it's great when a
        # library solves it for you." -- TODO(martin): We need more of these.
        language = None

        value = attributes.get("language", "") or ""
        try:
            language = pycountry.languages.lookup(value).alpha_2
        except (LookupError, AttributeError) as err:  # noqa: F841
            pass
            # TODO(martin): Print this on debug level, only.
            # print('[{}] language lookup miss for {}: {}'.format(doi, value, err), file=sys.stderr)

        # Abstracts appear in "attributes.descriptions[].descriptionType", some
        # of the observed values: "Methods", "TechnicalInfo",
        # "SeriesInformation", "Other", "TableOfContents", "Abstract". The
        # "Other" fields might contain references or related articles (with
        # DOI). TODO(martin): maybe try to parse out some of those refs.
        abstracts = []
        descs = attributes.get("descriptions", []) or []
        for desc in descs:
            if not desc.get("descriptionType") == "Abstract":
                continue

            # Description maybe a string, int or list.
            text = desc.get("description", "")
            if not text:
                continue
            if isinstance(text, int):
                text = "{}".format(text)
            if isinstance(text, list):
                try:
                    text = "\n".join(text)
                except TypeError:
                    continue  # Bail out, if it is not a list of strings.

            # Limit length.
            if len(text) < 10:
                continue
            if len(text) > MAX_ABSTRACT_LENGTH:
                text = text[:MAX_ABSTRACT_LENGTH] + " [...]"

            # Detect language. This is fuzzy and may be removed, if too unreliable.
            lang = None
            try:
                lang = langdetect.detect(text)
            except (langdetect.lang_detect_exception.LangDetectException,
                    TypeError) as err:
                print(
                    "[{}] language detection failed with {} on {}".format(
                        doi, err, text),
                    file=sys.stderr,
                )
            abstract_text = clean_str(text)
            if not abstract_text:
                continue
            abstracts.append(
                fatcat_openapi_client.ReleaseAbstract(
                    mimetype="text/plain",
                    content=abstract_text,
                    lang=lang,
                ))

        # References and relations. Datacite include many relation types in
        # "attributes.relatedIdentifiers[].relationType", e.g.
        # "IsPartOf", "IsPreviousVersionOf", "Continues", "IsVariantFormOf",
        # "IsSupplementTo", "Cites", "IsSupplementedBy", "IsDocumentedBy", "HasVersion",
        # "IsCitedBy", "IsMetadataFor", "IsNewVersionOf", "IsIdenticalTo", "HasPart",
        # "References", "Reviews", "HasMetadata", "IsContinuedBy", "IsVersionOf",
        # "IsDerivedFrom", "IsSourceOf".
        #
        # For the moment, we only care about References.
        refs, ref_index = [], 0

        relIds = attributes.get("relatedIdentifiers", []) or []
        for rel in relIds:
            if not rel.get("relationType", "") in ("References", "Cites"):
                continue
            ref_extra = dict()
            if rel.get("relatedIdentifierType", "") == "DOI":
                ref_extra["doi"] = rel.get("relatedIdentifier")
            refs.append(
                fatcat_openapi_client.ReleaseRef(
                    index=ref_index,
                    extra=ref_extra or None,
                ))
            ref_index += 1

        # More specific release_type via 'Reviews' relationsship.
        for rel in relIds:
            if rel.get("relatedIdentifierType", "") != "Reviews":
                continue
            release_type = "review"

        # Extra information.
        extra_datacite: Dict[str, Any] = dict()

        if license_extra:
            extra_datacite["license"] = license_extra
        if attributes.get("subjects"):
            # these subjects with schemeUri are too much metadata, which
            # doesn't compress. filter them out.
            extra_subjects = [
                subj for subj in attributes["subjects"]
                if not subj.get("schemeUri")
            ]
            if extra_subjects:
                extra_datacite["subjects"] = extra_subjects

        # Include version information.
        metadata_version = attributes.get("metadataVersion") or ""

        if metadata_version:
            extra_datacite["metadataVersion"] = metadata_version

        # Include resource types.
        types = attributes.get("types") or {}
        resource_type = types.get("resourceType", "") or ""
        resource_type_general = types.get("resourceTypeGeneral", "") or ""

        if resource_type and resource_type.lower(
        ) not in UNKNOWN_MARKERS_LOWER:
            extra_datacite["resourceType"] = resource_type
        if resource_type_general and resource_type_general.lower(
        ) not in UNKNOWN_MARKERS_LOWER:
            extra_datacite["resourceTypeGeneral"] = resource_type_general

        # Include certain relations from relatedIdentifiers. Keeping the
        # original structure of data here, which is a list of dicts, with
        # relation type, identifier and identifier type (mostly).
        relations = []
        for rel in relIds:
            if rel.get("relationType") in (
                    "IsPartOf",
                    "Reviews",
                    "Continues",
                    "IsVariantFormOf",
                    "IsSupplementTo",
                    "HasVersion",
                    "IsMetadataFor",
                    "IsNewVersionOf",
                    "IsIdenticalTo",
                    "IsVersionOf",
                    "IsDerivedFrom",
                    "IsSourceOf",
            ):
                relations.append(rel)

        # TODO: could use many of these relations to do release/work grouping

        if relations:
            extra_datacite["relations"] = relations

        extra: Dict[str, Any] = dict()

        # "1.0.0", "v1.305.2019", "Final", "v1.0.0", "v0.3.0", "1", "0.19.0",
        # "3.1", "v1.1", "{version}", "4.0", "10329", "11672", "11555",
        # "v1.4.5", "2", "V1", "v3.0", "v0", "v0.6", "11124", "v1.0-beta", "1st
        # Edition", "20191024", "v2.0.0", "v0.9.3", "10149", "2.0", null,
        # "v0.1.1", "3.0", "1.0", "3", "v1.12.2", "20191018", "v0.3.1", "v1.0",
        # "10161", "10010691", "10780", # "Presentación"
        version = attributes.get("version") or None

        # top-level extra keys
        if not container_id and container_name:
            extra["container_name"] = container_name

        # Always include datacite key, even if value is empty (dict).
        extra["datacite"] = extra_datacite

        # Preparation for a schema update.
        if release_month:
            extra["release_month"] = release_month

        # Assemble release.
        re = fatcat_openapi_client.ReleaseEntity(
            work_id=None,
            container_id=container_id,
            release_type=release_type,
            release_stage=release_stage,
            title=title,
            subtitle=subtitle,
            original_title=original_language_title,
            release_year=release_year,
            release_date=release_date,
            publisher=publisher,
            ext_ids=fatcat_openapi_client.ReleaseExtIds(doi=doi, ),
            contribs=contribs,
            volume=volume,
            issue=issue,
            pages=pages,
            language=language,
            abstracts=abstracts,
            refs=refs,
            extra=extra,
            license_slug=license_slug,
            version=version,
        )
        re = self.biblio_hacks(re)
        return re