def get_matching_colophon_lines(
        self, query: TransliterationQuery
    ) -> Mapping[int, Sequence[TextLine]]:
        text_lines = self.text_lines

        return pydash.omit_by(
            {
                self.manuscripts[index].id: [
                    line.line
                    for start, end in numbers
                    for line in text_lines[index][start : end + 1]
                    if line.source is None
                ]
                for index, numbers in enumerate(self._match(query))
            },
            pydash.is_empty,
        )
Exemplo n.º 2
0
def test_omit_by(case, expected):
    assert _.omit_by(*case) == expected
 def filter_none(self, data, **kwargs):
     return pydash.omit_by(data, pydash.is_none)
Exemplo n.º 4
0
 def dump_token(self, data, **kwargs):
     return pydash.omit_by(data, lambda value: value is None)
def test_create_response_dto(user):
    lemmatized_fragment = LemmatizedFragmentFactory.build(
        joins=Joins(((JoinFactory.build(), ), )))
    has_photo = True
    assert create_response_dto(
        lemmatized_fragment, user, has_photo) == pydash.omit_by(
            {
                "museumNumber":
                attr.asdict(lemmatized_fragment.number),
                "accession":
                lemmatized_fragment.accession,
                "cdliNumber":
                lemmatized_fragment.cdli_number,
                "bmIdNumber":
                lemmatized_fragment.bm_id_number,
                "publication":
                lemmatized_fragment.publication,
                "description":
                lemmatized_fragment.description,
                "joins":
                JoinsSchema().dump(lemmatized_fragment.joins)["fragments"],
                "length":
                attr.asdict(lemmatized_fragment.length,
                            filter=lambda _, value: value is not None),
                "width":
                attr.asdict(lemmatized_fragment.width,
                            filter=lambda _, value: value is not None),
                "thickness":
                attr.asdict(lemmatized_fragment.thickness,
                            filter=lambda _, value: value is not None),
                "collection":
                lemmatized_fragment.collection,
                "script":
                lemmatized_fragment.script,
                "notes":
                lemmatized_fragment.notes,
                "museum":
                lemmatized_fragment.museum,
                "signs":
                lemmatized_fragment.signs,
                "record": [{
                    "user": entry.user,
                    "type": entry.type.value,
                    "date": entry.date
                } for entry in lemmatized_fragment.record.entries],
                "folios": [
                    attr.asdict(folio) for folio in
                    lemmatized_fragment.folios.filter(user).entries
                ],
                "text":
                TextSchema().dump(lemmatized_fragment.text),
                "references": [{
                    "id": reference.id,
                    "type": reference.type.name,
                    "pages": reference.pages,
                    "notes": reference.notes,
                    "linesCited": list(reference.lines_cited),
                } for reference in lemmatized_fragment.references],
                "uncuratedReferences": ([
                    attr.asdict(reference)
                    for reference in lemmatized_fragment.uncurated_references
                ] if lemmatized_fragment.uncurated_references is not None else
                                        None),
                "atf":
                lemmatized_fragment.text.atf,
                "hasPhoto":
                has_photo,
                "genres": [{
                    "category": genre.category,
                    "uncertain": genre.uncertain
                } for genre in lemmatized_fragment.genres],
                "lineToVec": [[
                    line_to_vec_encoding.value
                    for line_to_vec_encoding in line_to_vec_encodings
                ] for line_to_vec_encodings in lemmatized_fragment.line_to_vec
                              ],
            },
            pydash.is_none,
        )