def test_set_unique_lemma_empty() -> None: word = AkkadianWord.of((ValueToken.of("bu"), ), unique_lemma=(WordId("nu I"), )) lemma = LemmatizationToken("bu", tuple()) expected = AkkadianWord.of((ValueToken.of("bu"), )) assert word.set_unique_lemma(lemma) == expected
def test_language_shift(value, expected_language, normalized): shift = LanguageShift.of(value) equal = LanguageShift.of(value) other = ValueToken.of(r"%bar") assert shift.value == value assert shift.clean_value == value assert shift.get_key() == f"LanguageShift⁝{value}" assert shift.lemmatizable is False assert shift.normalized == normalized assert shift.language == expected_language serialized = { "type": "LanguageShift", "normalized": normalized, "language": shift.language.name, } assert_token_serialization(shift, serialized) assert shift == equal assert hash(shift) == hash(equal) assert shift != other assert hash(shift) != hash(other) assert shift != ValueToken.of(value)
class DollarLineSchema(LineBaseSchema): prefix = fields.Constant("$") content = fields.Function( lambda obj: [OneOfTokenSchema().dump(ValueToken.of(f" {obj.display_value}"))], lambda value: value, )
class ParallelLineSchema(LineBaseSchema): prefix = fields.Constant("//") content = fields.Function( lambda obj: [OneOfTokenSchema().dump(ValueToken.of(obj.display_value))], lambda value: value, ) display_value = fields.String(data_key="displayValue") has_cf = fields.Boolean(data_key="hasCf", required=True)
def of_name( name: str, sub_index: Optional[int] = 1, modifiers: Sequence[str] = tuple(), flags: Sequence[atf.Flag] = tuple(), sign: Optional[Token] = None, ) -> "Reading": return Reading.of((ValueToken.of(name), ), sub_index, modifiers, flags, sign)
def of_name( name: str, modifiers: Sequence[str] = tuple(), flags: Sequence[atf.Flag] = tuple(), sign: Optional[Token] = None, sub_index: int = 1, ) -> "Number": return Number.of((ValueToken.of(name), ), modifiers, flags, sign, sub_index)
def test_text_line_of_iterable_normalized() -> None: tokens = [ LanguageShift.normalized_akkadian(), AkkadianWord.of((ValueToken.of("kur"), )), ] expected_tokens = ( LanguageShift.normalized_akkadian(), AkkadianWord.of((ValueToken.of("kur"), )), ) line = TextLine.of_iterable(LINE_NUMBER, tokens) assert line.content == expected_tokens assert ( line.key == f"TextLine⁞{line.atf}⟨{'⁚'.join(token.get_key() for token in expected_tokens)}⟩" ) assert line.atf == "1. %n kur"
def of_name( name: str, sub_index: Optional[int] = 1, modifiers: Sequence[str] = tuple(), flags: Sequence[atf.Flag] = tuple(), sign: Optional[Token] = None, surrogate: Sequence[Token] = tuple(), ) -> "Logogram": return Logogram.of((ValueToken.of(name), ), sub_index, modifiers, flags, sign, surrogate)
def test_value_token(): value = "value" token = ValueToken.of(value) equal = ValueToken.of(value) other = ValueToken.of("anothervalue") assert token.value == value assert token.clean_value == value assert token.get_key() == f"ValueToken⁝{value}" assert token.lemmatizable is False serialized = {"type": "ValueToken"} assert_token_serialization(token, serialized) assert token == equal assert hash(token) == hash(equal) assert token != other assert hash(token) != hash(other)
def tokens_to_value_tokens(children: Sequence) -> Sequence[EblToken]: return tuple( ( ValueToken.of(token.value) # pyre-ignore[16] if isinstance(token, Token) else token ) for child in children for token in _token_to_list(child) )
def test_variant(): reading = Reading.of([ValueToken.of("sa"), BrokenAway.open(), ValueToken.of("l")]) divider = Divider.of(":") variant = Variant.of(reading, divider) expected_value = "sa[l/:" assert variant.value == expected_value assert variant.clean_value == "sal/:" assert variant.tokens == (reading, divider) assert variant.parts == variant.tokens assert ( variant.get_key() == f"Variant⁝{expected_value}⟨{'⁚'.join(token.get_key() for token in variant.tokens)}⟩" ) assert variant.lemmatizable is False serialized = { "type": "Variant", "tokens": OneOfTokenSchema().dump([reading, divider], many=True), } assert_token_serialization(variant, serialized)
class NoteLineSchema(LineBaseSchema): prefix = fields.Constant("#note: ") content = fields.Function( lambda obj: OneOfTokenSchema().dump( [ValueToken.of(part.value) for part in obj.parts], many=True), lambda value: value, ) parts = fields.List(fields.Nested(OneOfNoteLinePartSchema), required=True) @post_load def make_line(self, data, **kwargs) -> NoteLine: return NoteLine(data["parts"])
class ControlLineSchema(LineBaseSchema): prefix = fields.String(required=True) content = fields.Function( lambda obj: [OneOfTokenSchema().dump(ValueToken.of(obj.content))], lambda value: OneOfTokenSchema().load(value, many=True), required=True, ) @post_load def make_line(self, data, **kwargs) -> ControlLine: return ControlLine(data["prefix"], " ".join(token.value for token in data["content"]))
class LineVariantFactory(factory.Factory): class Meta: model = LineVariant class Params: manuscript_id = factory.Sequence(lambda n: n) manuscript = factory.SubFactory( ManuscriptLineFactory, manuscript_id=factory.SelfAttribute("..manuscript_id"), ) reconstruction = ( LanguageShift.normalized_akkadian(), AkkadianWord.of((ValueToken.of("buāru"),)), MetricalFootSeparator.uncertain(), BrokenAway.open(), UnknownNumberOfSigns.of(), Caesura.certain(), AkkadianWord.of( ( UnknownNumberOfSigns.of(), BrokenAway.close(), Joiner.hyphen(), ValueToken.of("buāru"), ), (Flag.DAMAGE,), ), ) note = factory.fuzzy.FuzzyChoice([None, NoteLine((StringPart("a note"),))]) manuscripts = factory.List([factory.SelfAttribute("..manuscript")], TupleFactory) intertext = factory.fuzzy.FuzzyChoice([tuple(), (StringPart("bar"),)]) parallel_lines = factory.List( [ factory.SubFactory(ParallelCompositionFactory), factory.SubFactory(ParallelTextFactory), factory.SubFactory(ParallelFragmentFactory), ], TupleFactory, )
def test_parse_normalized_akkadain_shift() -> None: word = "ha" line = f"1. {word} %n {word} %sux {word}" expected = Text((TextLine.of_iterable( LineNumber(1), ( Word.of((Reading.of_name(word), ), DEFAULT_LANGUAGE), LanguageShift.normalized_akkadian(), AkkadianWord.of((ValueToken.of(word), )), LanguageShift.of("%sux"), Word.of((Reading.of_name(word), ), Language.SUMERIAN), ), ), )) assert parse_atf_lark(line).lines == expected.lines
class TranslationLineSchema(LineBaseSchema): prefix = fields.String() content = fields.Function( lambda obj: [ OneOfTokenSchema().dump( ValueToken.of("".join(part.value for part in obj.parts))) ], lambda value: value, ) parts = fields.List(fields.Nested(OneOfNoteLinePartSchema), required=True) language = fields.String(required=True) extent = fields.Nested(ExtentSchema, required=True, allow_none=True) @post_load def make_line(self, data, **kwargs) -> TranslationLine: return TranslationLine(data["parts"], data["language"], data["extent"])
def test_text_line_of_iterable(code: str, language: Language) -> None: tokens = [ Word.of([Reading.of_name("first")]), LanguageShift.of(code), Word.of([Reading.of_name("second")]), LanguageShift.of("%sb"), LoneDeterminative.of([Determinative.of([Reading.of_name("third")])]), Word.of([BrokenAway.open(), Reading.of_name("fourth")]), UnknownNumberOfSigns.of(), BrokenAway.close(), ] expected_tokens = ( Word.of([Reading.of_name("first")], DEFAULT_LANGUAGE), LanguageShift.of(code), Word.of([Reading.of_name("second")], language), LanguageShift.of("%sb"), LoneDeterminative.of([Determinative.of([Reading.of_name("third")])], Language.AKKADIAN), Word.of( [ BrokenAway.open(), Reading.of((ValueToken( frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE, "fourth", ), )).set_enclosure_type(frozenset({EnclosureType.BROKEN_AWAY })), ], DEFAULT_LANGUAGE, ), UnknownNumberOfSigns(frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE), BrokenAway.close().set_enclosure_type( frozenset({EnclosureType.BROKEN_AWAY})), ) line = TextLine.of_iterable(LINE_NUMBER, tokens) assert line.line_number == LINE_NUMBER assert line.content == expected_tokens assert ( line.key == f"TextLine⁞{line.atf}⟨{'⁚'.join(token.get_key() for token in expected_tokens)}⟩" ) assert line.atf == f"1. first {code} second %sb {{third}} [fourth ...]"
def expected_transliteration(language: Language) -> Sequence[Token]: return ( Word.of([Reading.of_name("bu")], language), LanguageShift.of("%es"), Word.of( [ BrokenAway.open(), Reading.of((ValueToken( frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE, "kur", ), )).set_enclosure_type(frozenset({EnclosureType.BROKEN_AWAY })), ], Language.EMESAL, ), UnknownNumberOfSigns(frozenset({EnclosureType.BROKEN_AWAY}), ErasureState.NONE), BrokenAway.close().set_enclosure_type( frozenset({EnclosureType.BROKEN_AWAY})), )
def test_lone_determinative(language): value = "{mu}" parts = [Determinative.of([Reading.of_name("mu")])] lone_determinative = LoneDeterminative.of(parts, language) equal = LoneDeterminative.of(parts, language) other_language = LoneDeterminative.of(parts, Language.UNKNOWN) other_parts = LoneDeterminative.of( [Determinative.of([Reading.of_name("bu")])], language) assert lone_determinative.value == value assert lone_determinative.lemmatizable is False assert lone_determinative.language == language assert lone_determinative.normalized is False assert lone_determinative.unique_lemma == tuple() serialized = { "type": "LoneDeterminative", "uniqueLemma": [], "normalized": False, "language": lone_determinative.language.name, "lemmatizable": lone_determinative.lemmatizable, "alignable": lone_determinative.lemmatizable, "erasure": ErasureState.NONE.name, "parts": OneOfTokenSchema().dump(parts, many=True), } assert_token_serialization(lone_determinative, serialized) assert lone_determinative == equal assert hash(lone_determinative) == hash(equal) for not_equal in [other_language, other_parts]: assert lone_determinative != not_equal assert hash(lone_determinative) != hash(not_equal) assert lone_determinative != ValueToken.of(value)
( Word.of( [Logogram.of_name("GI", 6)], unique_lemma=(WordId("ginâ I"),) ), Word.of([Reading.of_name("ana")], unique_lemma=(WordId("ana II"),)), Word.of([Reading.of_name("ana")], unique_lemma=(WordId("ana II"),)), Word.of( [ Reading.of_name("u", 4), Joiner.hyphen(), Reading.of_name("šu"), ], unique_lemma=(WordId("ūsu I"),), ), AkkadianWord.of( [ValueToken.of("ana")], unique_lemma=(WordId("normalized I"),) ), ), ), ) ), signs="MI DIŠ DIŠ UD ŠU", ) SCHEMA = FragmentSchema() def test_create(database, fragment_repository): fragment = LemmatizedFragmentFactory.build() fragment_id = fragment_repository.create(fragment)
def ebl_atf_text_line__number_name_part(self, children): return ValueToken.of("".join(children))
def test_set_alignment_empty() -> None: word = AkkadianWord.of((ValueToken.of("bu"), ), alignment=1) expected = AkkadianWord.of((ValueToken.of("bu"), )) assert word.set_alignment(None, None) == expected
def test_akkadian_word_invalid_modifier() -> None: with pytest.raises(ValueError): AkkadianWord.of((ValueToken.of("ibnû"), ), (Flag.COLLATION, ))
BrokenAway, Emendation, PerhapsBrokenAway, ) from ebl.transliteration.domain.normalized_akkadian import ( AkkadianWord, Caesura, MetricalFootSeparator, ) from ebl.transliteration.domain.tokens import Joiner, UnknownNumberOfSigns, ValueToken @pytest.mark.parametrize( # pyre-ignore[56] "word,expected,lemmatizable", [ (AkkadianWord.of((ValueToken.of("ibnû"), )), "ibnû", True), ( AkkadianWord.of((ValueToken.of("ibnû"), ), (Flag.UNCERTAIN, Flag.DAMAGE, Flag.CORRECTION)), "ibnû?#!", True, ), (AkkadianWord.of( (BrokenAway.open(), ValueToken.of("ibnû"))), "[ibnû", True), ( AkkadianWord.of(( BrokenAway.open(), PerhapsBrokenAway.open(), ValueToken.of("ib"), PerhapsBrokenAway.close(), ValueToken.of("nû"),
expected_value = "x!" assert sign.value == expected_value assert sign.clean_value == "x" assert sign.get_key() == f"UnclearSign⁝{expected_value}" assert sign.flags == tuple(flags) assert sign.lemmatizable is False serialized = {"type": "UnclearSign", "flags": ["!"]} assert_token_serialization(sign, serialized) @pytest.mark.parametrize( # pyre-ignore[56] "name_parts,sub_index,modifiers,flags,sign,expected_value,expected_clean_value," "expected_name", [ ((ValueToken.of("kur"), ), 1, [], [], None, "kur", "kur", "kur"), ((ValueToken.of("kurʾ"), ), 1, [], [], None, "kurʾ", "kurʾ", "kurʾ"), ((ValueToken.of("ʾ"), ), 1, [], [], None, "ʾ", "ʾ", "ʾ"), ( (ValueToken.of("k"), BrokenAway.open(), ValueToken.of("ur")), 1, [], [], None, "k[ur", "kur", "kur", ), ( (ValueToken.of("ku"), BrokenAway.close(), ValueToken.of("r")), 1,
def _make_sequence(values: List[str]) -> List[Token]: return [AkkadianWord.of((ValueToken.of(value), )) for value in values]
def parts(self) -> Sequence[Token]: return [ValueToken.of(part) for part in self.compound_parts]
from ebl.corpus.domain.manuscript import Manuscript from ebl.transliteration.domain.text_id import TextId from ebl.transliteration.domain.line_number import LineNumber from ebl.transliteration.domain.sign_tokens import Reading from ebl.transliteration.domain.text_line import TextLine from ebl.transliteration.domain.tokens import ValueToken from ebl.transliteration.domain.word_tokens import Word from ebl.transliteration.domain.genre import Genre from ebl.transliteration.domain.labels import SurfaceLabel from ebl.transliteration.domain.atf import Surface from ebl.corpus.web.extant_lines import ExtantLinesSchema from ebl.transliteration.application.line_number_schemas import OneOfLineNumberSchema LABELS = (SurfaceLabel.from_label(Surface.OBVERSE), ) MANUSCRIPT_TEXT_1 = TextLine(LineNumber(2), (Word.of([Reading.of([ValueToken.of("ku")])]), )) def test_extant_lines_schema() -> None: manuscript = Manuscript(1) manuscript_line = ManuscriptLine(1, LABELS, MANUSCRIPT_TEXT_1) variant = LineVariant(tuple(), manuscripts=(manuscript_line, )) text_line = Line(LineNumber(1), (variant, )) chapter = Chapter(TextId(Genre.LITERATURE, 0, 0), manuscripts=(manuscript, ), lines=(text_line, )) assert ExtantLinesSchema().dump(chapter) == { "extantLines": { str(manuscript.siglum): { " ".join(label.to_value() for label in manuscript_line.labels): [{
class TransliteratedFragmentFactory(FragmentFactory): text = Text(( TextLine.of_iterable( LineNumber(1, True), ( Word.of([UnidentifiedSign.of()]), Word.of([ Logogram.of_name( "BA", surrogate=[ Reading.of_name("ku"), Joiner.hyphen(), Reading.of_name("u", 4), ], ) ]), Column.of(), Tabulation.of(), Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), Joiner.hyphen(), Reading.of_name("ku"), BrokenAway.close(), Joiner.hyphen(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), ]), Variant.of(Divider.of(":"), Reading.of_name("ku")), Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), BrokenAway.close(), ]), Column.of(2), Divider.of(":", ("@v", ), (Flag.DAMAGE, )), CommentaryProtocol.of("!qt"), Word.of([Number.of_name("10", flags=[Flag.DAMAGE])]), ), ), TextLine.of_iterable( LineNumber(2, True), ( Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), BrokenAway.close(), ]), Word.of([Logogram.of_name("GI", 6)]), Word.of([Reading.of_name("ana")]), Word.of([ Reading.of_name("u", 4), Joiner.hyphen(), Reading.of(( ValueToken.of("š"), BrokenAway.open(), ValueToken.of("u"), )), ]), Word.of([UnknownNumberOfSigns.of(), BrokenAway.close()]), ), ), TextLine.of_iterable( LineNumber(3, True), ( Word.of([BrokenAway.open(), UnknownNumberOfSigns.of()]), Word.of([ Reading.of(( ValueToken.of("k"), BrokenAway.close(), ValueToken.of("i"), )), Joiner.hyphen(), Reading.of_name("du"), ]), Word.of([Reading.of_name("u")]), Word.of([ Reading.of_name("ba"), Joiner.hyphen(), Reading.of_name("ma"), Joiner.hyphen(), Reading.of(( ValueToken.of("t"), BrokenAway.open(), ValueToken.of("i"), )), ]), Word.of([UnknownNumberOfSigns.of(), BrokenAway.close()]), ), ), TextLine.of_iterable( LineNumber(6, True), ( Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), BrokenAway.close(), ]), Word.of([UnclearSign.of([Flag.DAMAGE])]), Word.of([Reading.of_name("mu")]), Word.of([ Reading.of_name("ta"), Joiner.hyphen(), Reading.of_name("ma"), InWordNewline.of(), Joiner.hyphen(), Reading.of_name("tu", 2), ]), ), ), TextLine.of_iterable( LineNumber(7, True), ( Word.of([ Variant.of(Reading.of_name("šu"), CompoundGrapheme.of(["BI×IS"])) ]), LanguageShift.normalized_akkadian(), AkkadianWord.of([ValueToken.of("kur")]), ), ), StateDollarLine( atf.Qualification.AT_LEAST, 1, ScopeContainer(atf.Surface.OBVERSE, ""), atf.State.MISSING, None, ), ImageDollarLine("1", None, "numbered diagram of triangle"), RulingDollarLine(atf.Ruling.SINGLE), LooseDollarLine("this is a loose line"), SealDollarLine(1), SealAtLine(1), HeadingAtLine(1), ColumnAtLine(ColumnLabel([atf.Status.COLLATION], 1)), SurfaceAtLine( SurfaceLabel([atf.Status.COLLATION], atf.Surface.SURFACE, "stone wig")), ObjectAtLine( ObjectLabel([atf.Status.COLLATION], atf.Object.OBJECT, "stone wig")), DiscourseAtLine(atf.Discourse.DATE), DivisionAtLine("paragraph", 5), CompositeAtLine(atf.Composite.DIV, "part", 1), NoteLine(( StringPart("a note "), EmphasisPart("italic"), LanguagePart.of_transliteration( Language.AKKADIAN, (Word.of([Reading.of_name("bu")]), )), )), ParallelComposition(False, "my name", LineNumber(1)), ParallelText( True, TextId(CorpusGenre.LITERATURE, 1, 1), ChapterName(Stage.OLD_BABYLONIAN, "", "my name"), LineNumber(1), False, ), ParallelFragment(False, MuseumNumber.of("K.1"), True, Labels(), LineNumber(1), False), )) signs = ( "X BA KU ABZ075 ABZ207a\\u002F207b\\u0020X ABZ377n1/KU ABZ377n1 ABZ411\n" "MI DIŠ UD ŠU\n" "KI DU ABZ411 BA MA TI\n" "X MU TA MA UD\n" "ŠU/|BI×IS|") folios = Folios((Folio("WGL", "3"), Folio("XXX", "3"))) record = Record((RecordEntry("test", RecordType.TRANSLITERATION), )) line_to_vec = (( LineToVecEncoding.TEXT_LINE, LineToVecEncoding.TEXT_LINE, LineToVecEncoding.TEXT_LINE, LineToVecEncoding.TEXT_LINE, LineToVecEncoding.TEXT_LINE, LineToVecEncoding.SINGLE_RULING, ), )
class LemmatizedFragmentFactory(TransliteratedFragmentFactory): text = Text(( TextLine.of_iterable( LineNumber(1, True), ( Word.of([UnidentifiedSign.of()]), Word.of([ Logogram.of_name( "BA", surrogate=[ Reading.of_name("ku"), Joiner.hyphen(), Reading.of_name("u", 4), ], ) ]), Column.of(), Tabulation.of(), Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), Joiner.hyphen(), Reading.of_name("ku"), BrokenAway.close(), Joiner.hyphen(), Reading.of_name("nu"), Joiner.hyphen(), Reading.of_name("ši"), ]), Variant.of(Divider.of(":"), Reading.of_name("ku")), Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), BrokenAway.close(), ]), Column.of(2), Divider.of(":", ("@v", ), (Flag.DAMAGE, )), CommentaryProtocol.of("!qt"), Word.of([Number.of_name("10", flags=[Flag.DAMAGE])]), ), ), TextLine.of_iterable( LineNumber(2, True), ( Word.of([BrokenAway.open(), UnknownNumberOfSigns.of()]), Word.of([Logogram.of_name("GI", 6)], unique_lemma=(WordId("ginâ I"), )), Word.of([Reading.of_name("ana")], unique_lemma=(WordId("ana I"), )), Word.of( [ Reading.of_name("u₄"), Joiner.hyphen(), Reading.of_name("š[u"), ], unique_lemma=(WordId("ūsu I"), ), ), Word.of([UnknownNumberOfSigns.of(), BrokenAway.close()]), ), ), TextLine.of_iterable( LineNumber(3, True), ( Word.of([BrokenAway.open(), UnknownNumberOfSigns.of()]), Word.of( unique_lemma=(WordId("kīdu I"), ), parts=[ Reading.of(( ValueToken.of("k"), BrokenAway.close(), ValueToken.of("i"), )), Joiner.hyphen(), Reading.of_name("du"), ], ), Word.of(unique_lemma=(WordId("u I"), ), parts=[Reading.of_name("u")]), Word.of( unique_lemma=(WordId("bamātu I"), ), parts=[ Reading.of_name("ba"), Joiner.hyphen(), Reading.of_name("ma"), Joiner.hyphen(), Reading.of(( ValueToken.of("t"), BrokenAway.open(), ValueToken.of("i"), )), ], ), Word.of([UnknownNumberOfSigns.of(), BrokenAway.close()]), ), ), TextLine.of_iterable( LineNumber(6, True), ( Word.of([ BrokenAway.open(), UnknownNumberOfSigns.of(), BrokenAway.close(), ]), Word.of([UnclearSign.of([Flag.DAMAGE])]), Word.of(unique_lemma=(WordId("mu I"), ), parts=[Reading.of_name("mu")]), Word.of( unique_lemma=(WordId("tamalāku I"), ), parts=[ Reading.of_name("ta"), Joiner.hyphen(), Reading.of_name("ma"), InWordNewline.of(), Joiner.hyphen(), Reading.of_name("tu", 2), ], ), ), ), TextLine.of_iterable( LineNumber(7, True), ( Word.of([ Variant.of(Reading.of_name("šu"), CompoundGrapheme.of(["BI×IS"])) ]), LanguageShift.normalized_akkadian(), AkkadianWord.of([ValueToken.of("kur")], unique_lemma=(WordId("normalized I"), )), ), ), StateDollarLine( atf.Qualification.AT_LEAST, 1, ScopeContainer(atf.Surface.OBVERSE, ""), atf.State.MISSING, None, ), ImageDollarLine("1", None, "numbered diagram of triangle"), RulingDollarLine(atf.Ruling.SINGLE), LooseDollarLine("this is a loose line"), SealDollarLine(1), SealAtLine(1), HeadingAtLine(1), ColumnAtLine(ColumnLabel([atf.Status.COLLATION], 1)), SurfaceAtLine( SurfaceLabel([atf.Status.COLLATION], atf.Surface.SURFACE, "stone wig")), ObjectAtLine( ObjectLabel([atf.Status.COLLATION], atf.Object.OBJECT, "stone wig")), DiscourseAtLine(atf.Discourse.DATE), DivisionAtLine("paragraph", 5), CompositeAtLine(atf.Composite.DIV, "part", 1), NoteLine(( StringPart("a note "), EmphasisPart("italic"), LanguagePart.of_transliteration( Language.AKKADIAN, (Word.of([Reading.of_name("bu")]), )), )), ParallelComposition(False, "my name", LineNumber(1)), ParallelText( True, TextId(CorpusGenre.LITERATURE, 1, 1), ChapterName(Stage.OLD_BABYLONIAN, "", "my name"), LineNumber(1), False, ), ParallelFragment(False, MuseumNumber.of("K.1"), True, Labels(), LineNumber(1), False), ))