def test_extent_before_translation() -> None: with pytest.raises(ValueError): Text.of_iterable( [ TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), TextLine.of_iterable(LineNumber(2), [Word.of([Reading.of_name("bu")])]), TranslationLine(tuple(), "en", Extent(LineNumber(1))), ] )
def test_update_lemmatization() -> None: line = TextLine.of_iterable(LINE_NUMBER, [Word.of([Reading.of_name("bu")])]) lemmatization = (LemmatizationToken("bu", (WordId("nu I"), )), ) expected = TextLine.of_iterable( LINE_NUMBER, [Word.of([Reading.of_name("bu")], unique_lemma=(WordId("nu I"), ))]) assert line.update_lemmatization(lemmatization) == expected
def test_update_lemmatization_wrong_lenght() -> None: line = TextLine.of_iterable( LINE_NUMBER, [Word.of([Reading.of_name("bu")]), Word.of([Reading.of_name("bu")])], ) lemmatization = (LemmatizationToken("bu", (WordId("nu I"), )), ) with pytest.raises(LemmatizationError): line.update_lemmatization(lemmatization)
def test_extent_overlapping_languages() -> None: Text.of_iterable( [ TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), TranslationLine(tuple(), "en", Extent(LineNumber(2))), TextLine.of_iterable(LineNumber(2), [Word.of([Reading.of_name("bu")])]), TranslationLine(tuple(), "de"), ] )
def text_with_labels(): return Text.of_iterable([ TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), ColumnAtLine(ColumnLabel.from_int(1)), SurfaceAtLine(SurfaceLabel([], atf.Surface.SURFACE, "Stone wig")), ObjectAtLine(ObjectLabel([], atf.Object.OBJECT, "Stone wig")), TextLine.of_iterable(LineNumber(2), [Word.of([Reading.of_name("bu")])]), ])
def test_exent_overlapping() -> None: with pytest.raises(ValueError): Text.of_iterable( [ TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), TranslationLine(tuple(), extent=Extent(LineNumber(2))), TextLine.of_iterable(LineNumber(2), [Word.of([Reading.of_name("bu")])]), TranslationLine(tuple()), ] )
def test_updating_alignment(client, bibliography, sign_repository, signs, text_repository): allow_signs(signs, sign_repository) chapter = ChapterFactory.build() allow_references(chapter, bibliography) text_repository.create_chapter(chapter) alignment = 0 omitted_words = (1, ) updated_chapter = attr.evolve( chapter, lines=(attr.evolve( chapter.lines[0], variants=(attr.evolve( chapter.lines[0].variants[0], manuscripts=(attr.evolve( chapter.lines[0].variants[0].manuscripts[0], line=TextLine.of_iterable( chapter.lines[0].variants[0].manuscripts[0].line. line_number, (Word.of( [ Reading.of_name("ku"), Joiner.hyphen(), BrokenAway.open(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), BrokenAway.close(), ], alignment=alignment, variant=Word.of( [Logogram.of_name("KU")], language=Language.SUMERIAN, ), ), ), ), omitted_words=omitted_words, ), ), ), ), ), ), ) expected_chapter = ApiChapterSchema().dump(updated_chapter) post_result = client.simulate_post(create_chapter_url( chapter, "/alignment"), body=json.dumps(DTO)) assert post_result.status == falcon.HTTP_OK assert post_result.json == expected_chapter get_result = client.simulate_get(create_chapter_url(chapter)) assert get_result.status == falcon.HTTP_OK assert get_result.json == expected_chapter
def test_text_line_atf_gloss() -> None: line = TextLine.of_iterable( LINE_NUMBER, [ DocumentOrientedGloss.open(), Word.of([Reading.of_name("mu")]), Word.of([Reading.of_name("bu")]), DocumentOrientedGloss.close(), ], ) assert line.atf == f"{line.line_number.atf} {{(mu bu)}}"
def test_lemmatization() -> None: line = TextLine.of_iterable( LINE_NUMBER, [ Word.of([Reading.of_name("bu")], unique_lemma=(WordId("nu I"), )), UnknownNumberOfSigns.of(), Word.of([Reading.of_name("nu")]), ], ) assert line.lemmatization == ( LemmatizationToken("bu", (WordId("nu I"), )), LemmatizationToken("..."), LemmatizationToken("nu", tuple()), )
def test_text_line_atf_erasure(erasure, expected: str) -> None: word = Word.of( [Reading.of_name("mu"), Joiner.hyphen(), Reading.of_name("mu")]) line = TextLine.of_iterable(LINE_NUMBER, [word, *erasure, word]) assert line.atf == f"{line.line_number.atf} {word.value} {expected} {word.value}"
def test_dump_line(): text = Text( ( TextLine.of_iterable( LineNumber(1), [ Word.of( parts=[ Reading.of_name("ha"), Joiner.hyphen(), Reading.of_name("am"), ] ) ], ), EmptyLine(), ControlLine("#", " comment"), ), "1.0.0", ) assert TextSchema().dump(text) == { "lines": OneOfLineSchema().dump(text.lines, many=True), "parser_version": text.parser_version, "numberOfLines": 1, }
class ManuscriptLineFactory(factory.Factory): class Meta: model = ManuscriptLine manuscript_id = factory.Sequence(lambda n: n) labels = ( SurfaceLabel.from_label(Surface.OBVERSE), ColumnLabel.from_label("iii", [Status.COLLATION, Status.CORRECTION]), ) line = factory.Sequence( lambda n: TextLine.of_iterable( LineNumber(n), ( Word.of( [ Reading.of_name("ku"), Joiner.hyphen(), BrokenAway.open(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), BrokenAway.close(), ] ), ), ) ) paratext = (NoteLine((StringPart("note"),)), RulingDollarLine(Ruling.SINGLE)) omitted_words = (1,)
def test_update_lemmatization() -> None: tokens = [list(line) for line in TEXT.lemmatization.tokens] tokens[0][0] = LemmatizationToken(tokens[0][0].value, (WordId("nu I"),)) lemmatization = Lemmatization(tokens) expected = Text( ( TextLine( LineNumber(1), ( Word.of( unique_lemma=(WordId("nu I"),), parts=[ Reading.of_name("ha"), Joiner.hyphen(), Reading.of_name("am"), ], ), ), ), RulingDollarLine(atf.Ruling.SINGLE), ), TEXT.parser_version, ) assert TEXT.update_lemmatization(lemmatization) == expected
def test_parse_normalized_akkadain_shift() -> None: word = "ha" line = f"1. {word} %n {word} %sux {word}" expected = Text((TextLine.of_iterable( LineNumber(1), ( Word.of((Reading.of_name(word), ), DEFAULT_LANGUAGE), LanguageShift.normalized_akkadian(), AkkadianWord.of((ValueToken.of(word), )), LanguageShift.of("%sux"), Word.of((Reading.of_name(word), ), Language.SUMERIAN), ), ), )) assert parse_atf_lark(line).lines == expected.lines
def test_parse_atf_language_shifts(code: str, expected_language: Language) -> None: word = "ha-am" parts = [Reading.of_name("ha"), Joiner.hyphen(), Reading.of_name("am")] line = f"1. {word} {code} {word} %sb {word}" expected = Text((TextLine.of_iterable( LineNumber(1), ( Word.of(parts, DEFAULT_LANGUAGE), LanguageShift.of(code), Word.of(parts, expected_language), LanguageShift.of("%sb"), Word.of(parts, Language.AKKADIAN), ), ), )) assert parse_atf_lark(line).lines == expected.lines
def make_token(self, data, **kwargs): return Word.of( data["parts"], data["language"], tuple(data["unique_lemma"]), data["erasure"], data["alignment"], data["variant"], ).set_enclosure_type(frozenset(data["enclosure_type"]))
def test_statistics(database, fragment_repository): database[COLLECTION].insert_many( [ SCHEMA.dump( FragmentFactory.build( text=Text( ( TextLine( LineNumber(1), ( Word.of([Reading.of_name("first")]), Word.of([Reading.of_name("line")]), ), ), ControlLine("#", "ignore"), EmptyLine(), ) ) ) ), SCHEMA.dump( FragmentFactory.build( text=Text( ( ControlLine("#", "ignore"), TextLine( LineNumber(1), (Word.of([Reading.of_name("second")]),) ), TextLine( LineNumber(2), (Word.of([Reading.of_name("third")]),) ), ControlLine("#", "ignore"), TextLine( LineNumber(3), (Word.of([Reading.of_name("fourth")]),) ), ) ) ) ), SCHEMA.dump(FragmentFactory.build(text=Text())), ] ) assert fragment_repository.count_transliterated_fragments() == 2 assert fragment_repository.count_lines() == 4
def expected_transliteration(language: Language) -> Sequence[Token]: return ( Word.of([Reading.of_name("bu")], language), LanguageShift.of("%es"), Word.of( [ BrokenAway.open(), Reading.of((ValueToken( frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE, "kur", ), )).set_enclosure_type(frozenset({EnclosureType.BROKEN_AWAY })), ], Language.EMESAL, ), UnknownNumberOfSigns(frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE), BrokenAway.close().set_enclosure_type( frozenset({EnclosureType.BROKEN_AWAY})), )
def test_query_lemmas_ignores_in_value(parts, expected, fragment_repository, lemma_repository): fragment = FragmentFactory.build( text=Text.of_iterable([ TextLine.of_iterable( LineNumber(1), [Word.of(parts, unique_lemma=(WordId("ana I"), ))]) ]), signs="DIŠ", ) fragment_repository.create(fragment) assert lemma_repository.query_lemmas("ana", False) == expected
def test_updating_alignment(corpus, text_repository, bibliography, changelog, signs, sign_repository, user, when) -> None: aligmnet = 0 omitted_words = (1, ) updated_chapter = attr.evolve( CHAPTER, lines=(attr.evolve( CHAPTER.lines[0], variants=(attr.evolve( CHAPTER.lines[0].variants[0], manuscripts=(attr.evolve( CHAPTER.lines[0].variants[0].manuscripts[0], line=TextLine.of_iterable( CHAPTER.lines[0].variants[0].manuscripts[0].line. line_number, (Word.of( [ Reading.of_name("ku"), Joiner.hyphen(), BrokenAway.open(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), BrokenAway.close(), ], alignment=aligmnet, ), ), ), omitted_words=omitted_words, ), ), ), ), ), ), ) expect_find_and_update_chapter( bibliography, changelog, CHAPTER_WITHOUT_DOCUMENTS, updated_chapter, signs, sign_repository, text_repository, user, when, ) alignment = Alignment((((ManuscriptLineAlignment( (AlignmentToken("ku-[nu-ši]", aligmnet), ), omitted_words), ), ), )) assert corpus.update_alignment(CHAPTER.id_, alignment, user) == updated_chapter
class ManuscriptFactory(factory.Factory): class Meta: model = Manuscript id = factory.Sequence(lambda n: n + 1) siglum_disambiguator = factory.Faker("word") museum_number = factory.Sequence( lambda n: MuseumNumber("M", str(n)) if pydash.is_odd(n) else None ) accession = factory.Sequence(lambda n: f"A.{n}" if pydash.is_even(n) else "") period_modifier = factory.fuzzy.FuzzyChoice(PeriodModifier) period = factory.fuzzy.FuzzyChoice(set(Period) - {Period.NONE}) provenance = factory.fuzzy.FuzzyChoice(set(Provenance) - {Provenance.STANDARD_TEXT}) type = factory.fuzzy.FuzzyChoice(set(ManuscriptType) - {ManuscriptType.NONE}) notes = factory.Faker("sentence") colophon = Transliteration.of_iterable( [TextLine.of_iterable(LineNumber(1, True), (Word.of([Reading.of_name("ku")]),))] ) unplaced_lines = Transliteration.of_iterable( [TextLine.of_iterable(LineNumber(1, True), (Word.of([Reading.of_name("nu")]),))] ) references = factory.List( [factory.SubFactory(ReferenceFactory, with_document=True)], TupleFactory )
def test_updating_manuscripts(corpus, text_repository, bibliography, changelog, signs, sign_repository, user, when) -> None: uncertain_fragments = (MuseumNumber.of("K.1"), ) updated_chapter = attr.evolve( CHAPTER, manuscripts=(attr.evolve( CHAPTER.manuscripts[0], colophon=Transliteration.of_iterable([ TextLine.of_iterable(LineNumber(1, True), (Word.of([Reading.of_name("ba")]), )) ]), unplaced_lines=Transliteration.of_iterable([ TextLine.of_iterable(LineNumber(1, True), (Word.of([Reading.of_name("ku")]), )) ]), notes="Updated manuscript.", ), ), uncertain_fragments=uncertain_fragments, signs=("KU ABZ075 ABZ207a\\u002F207b\\u0020X\nBA\nKU", ), ) expect_find_and_update_chapter( bibliography, changelog, CHAPTER_WITHOUT_DOCUMENTS, updated_chapter, signs, sign_repository, text_repository, user, when, ) manuscripts = (updated_chapter.manuscripts[0], ) assert (corpus.update_manuscripts(CHAPTER.id_, manuscripts, uncertain_fragments, user) == updated_chapter)
def test_updating_lines_edit(corpus, text_repository, bibliography, changelog, signs, sign_repository, user, when) -> None: updated_chapter = attr.evolve( CHAPTER, lines=(attr.evolve( CHAPTER.lines[0], number=LineNumber(1, True), variants=(attr.evolve( CHAPTER.lines[0].variants[0], manuscripts=(attr.evolve( CHAPTER.lines[0].variants[0].manuscripts[0], line=TextLine.of_iterable( LineNumber(1, True), (Word.of([ Reading.of_name("nu"), Joiner.hyphen(), BrokenAway.open(), Reading.of_name("ku"), Joiner.hyphen(), Reading.of_name("ši"), BrokenAway.close(), ]), ), ), ), ), ), ), ), ), signs=("ABZ075 KU ABZ207a\\u002F207b\\u0020X\nKU\nABZ075", ), parser_version=ATF_PARSER_VERSION, ) expect_find_and_update_chapter( bibliography, changelog, CHAPTER_WITHOUT_DOCUMENTS, updated_chapter, signs, sign_repository, text_repository, user, when, ) assert (corpus.update_lines( CHAPTER.id_, LinesUpdate([], set(), {0: updated_chapter.lines[0]}), user) == updated_chapter)
def test_text_line_of_iterable(code: str, language: Language) -> None: tokens = [ Word.of([Reading.of_name("first")]), LanguageShift.of(code), Word.of([Reading.of_name("second")]), LanguageShift.of("%sb"), LoneDeterminative.of([Determinative.of([Reading.of_name("third")])]), Word.of([BrokenAway.open(), Reading.of_name("fourth")]), UnknownNumberOfSigns.of(), BrokenAway.close(), ] expected_tokens = ( Word.of([Reading.of_name("first")], DEFAULT_LANGUAGE), LanguageShift.of(code), Word.of([Reading.of_name("second")], language), LanguageShift.of("%sb"), LoneDeterminative.of([Determinative.of([Reading.of_name("third")])], Language.AKKADIAN), Word.of( [ BrokenAway.open(), Reading.of((ValueToken( frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE, "fourth", ), )).set_enclosure_type(frozenset({EnclosureType.BROKEN_AWAY })), ], DEFAULT_LANGUAGE, ), UnknownNumberOfSigns(frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE), BrokenAway.close().set_enclosure_type( frozenset({EnclosureType.BROKEN_AWAY})), ) line = TextLine.of_iterable(LINE_NUMBER, tokens) assert line.line_number == LINE_NUMBER assert line.content == expected_tokens assert ( line.key == f"TextLine⁞{line.atf}⟨{'⁚'.join(token.get_key() for token in expected_tokens)}⟩" ) assert line.atf == f"1. first {code} second %sb {{third}} [fourth ...]"
from ebl.transliteration.domain.word_tokens import Word from ebl.fragmentarium.domain.joins import Join, Joins COLLECTION = "fragments" JOINS_COLLECTION = "joins" ANOTHER_LEMMATIZED_FRAGMENT = attr.evolve( TransliteratedFragmentFactory.build(), text=Text( ( TextLine( LineNumber(1), ( Word.of( [Logogram.of_name("GI", 6)], unique_lemma=(WordId("ginâ I"),) ), Word.of([Reading.of_name("ana")], unique_lemma=(WordId("ana II"),)), Word.of([Reading.of_name("ana")], unique_lemma=(WordId("ana II"),)), Word.of( [ Reading.of_name("u", 4), Joiner.hyphen(), Reading.of_name("šu"), ], unique_lemma=(WordId("ūsu I"),), ), AkkadianWord.of( [ValueToken.of("ana")], unique_lemma=(WordId("normalized I"),) ), ),
def test_updating_lemmatization(client, bibliography, sign_repository, signs, text_repository): allow_signs(signs, sign_repository) chapter: Chapter = ChapterFactory.build() allow_references(chapter, bibliography) text_repository.create_chapter(chapter) updated_chapter = attr.evolve( chapter, lines=(attr.evolve( chapter.lines[0], variants=(attr.evolve( chapter.lines[0].variants[0], reconstruction=( chapter.lines[0].variants[0].reconstruction[0], chapter.lines[0].variants[0].reconstruction[1]. set_unique_lemma( LemmatizationToken( chapter.lines[0].variants[0].reconstruction[1]. value, (WordId("aklu I"), ), )), *chapter.lines[0].variants[0].reconstruction[2:6], chapter.lines[0].variants[0].reconstruction[6]. set_unique_lemma( LemmatizationToken( chapter.lines[0].variants[0].reconstruction[6]. value, tuple(), )), ), manuscripts=(attr.evolve( chapter.lines[0].variants[0].manuscripts[0], line=TextLine.of_iterable( chapter.lines[0].variants[0].manuscripts[0].line. line_number, (Word.of( [ Reading.of_name("ku"), Joiner.hyphen(), BrokenAway.open(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), BrokenAway.close(), ], unique_lemma=[WordId("aklu I")], ), ), ), ), ), ), ), ), ), ) expected = create_chapter_dto(updated_chapter) post_result = client.simulate_post(create_chapter_url( chapter, "/lemmatization"), body=json.dumps(DTO)) assert post_result.status == falcon.HTTP_OK assert post_result.json == expected get_result = client.simulate_get(create_chapter_url(chapter)) assert get_result.status == falcon.HTTP_OK assert get_result.json == expected
from ebl.transliteration.domain.sign_tokens import Divider, Reading from ebl.transliteration.domain.unknown_sign_tokens import UnclearSign, UnidentifiedSign from ebl.transliteration.domain.word_tokens import ErasureState, Word ERASURE_LEFT = Erasure.open() ERASURE_CENTER = Erasure.center() ERASURE_RIGHT = Erasure.close() @pytest.mark.parametrize("parser", [parse_erasure]) @pytest.mark.parametrize( "atf,erased,over_erased", [ ( "°ku\\ku°", (Word.of(erasure=ErasureState.ERASED, parts=[Reading.of_name("ku")]), ), (Word.of(erasure=ErasureState.OVER_ERASED, parts=[Reading.of_name("ku")]), ), ), ( "°::\\:.°", (Divider.of("::").set_erasure(ErasureState.ERASED), ), (Divider.of(":.").set_erasure(ErasureState.OVER_ERASED), ), ), ( "°\\ku°", tuple(), (Word.of(erasure=ErasureState.OVER_ERASED, parts=[Reading.of_name("ku")]), ), ), (
from ebl.transliteration.domain.language import Language from ebl.transliteration.domain.markup import EmphasisPart, LanguagePart, StringPart from ebl.transliteration.domain.note_line import NoteLine from ebl.transliteration.domain.sign_tokens import Reading from ebl.transliteration.domain.tokens import ( EnclosureType, ErasureState, LanguageShift, Token, UnknownNumberOfSigns, ValueToken, ) from ebl.transliteration.domain.word_tokens import Word TRANSLITERATION: Sequence[Token] = ( Word.of([Reading.of_name("bu")]), LanguageShift.of("%es"), Word.of([BrokenAway.open(), Reading.of_name("kur")]), UnknownNumberOfSigns.of(), BrokenAway.close(), ) EXPECTED_ATF = "bu %es [kur ...]" def expected_transliteration(language: Language) -> Sequence[Token]: return ( Word.of([Reading.of_name("bu")], language), LanguageShift.of("%es"), Word.of( [ BrokenAway.open(),
from ebl.transliteration.domain import atf from ebl.transliteration.domain.dollar_line import RulingDollarLine from ebl.transliteration.domain.labels import ColumnLabel, ObjectLabel, SurfaceLabel from ebl.transliteration.domain.line import Line from ebl.transliteration.domain.line_number import LineNumber from ebl.transliteration.domain.sign_tokens import Reading from ebl.transliteration.domain.text import LineLabel, Text from ebl.transliteration.domain.text_line import TextLine from ebl.transliteration.domain.tokens import Joiner from ebl.transliteration.domain.translation_line import Extent, TranslationLine from ebl.transliteration.domain.word_tokens import Word LINES: Sequence[Line] = ( TextLine( LineNumber(1), (Word.of([Reading.of_name("ha"), Joiner.hyphen(), Reading.of_name("am")]),), ), RulingDollarLine(atf.Ruling.SINGLE), ) PARSER_VERSION = "1.0.0" TEXT: Text = Text(LINES, PARSER_VERSION) def test_of_iterable() -> None: assert Text.of_iterable(LINES) == Text(LINES, atf.ATF_PARSER_VERSION) def test_lines() -> None: assert TEXT.lines == LINES
from ebl.transliteration.domain.enclosure_tokens import BrokenAway, PerhapsBrokenAway from ebl.transliteration.domain.line import ControlLine, EmptyLine, Line from ebl.transliteration.domain.line_number import LineNumber from ebl.transliteration.domain.sign_tokens import Reading from ebl.transliteration.domain.text_line import TextLine from ebl.transliteration.domain.tokens import Joiner, LanguageShift from ebl.transliteration.domain.word_tokens import Word @pytest.mark.parametrize( # pyre-ignore[56] "old,new,expected", [ ( EmptyLine(), TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), ), ( TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), ControlLine("#", " comment"), ControlLine("#", " comment"), ), ( TextLine.of_iterable(LineNumber(1), [Word.of([Reading.of_name("bu")])]), TextLine.of_iterable(LineNumber(2), [Word.of([Reading.of_name("bu")])]), TextLine.of_iterable(LineNumber(2),