Esempio n. 1
0
def test_update_lemmatization() -> None:
    tokens = [list(line) for line in TEXT.lemmatization.tokens]
    tokens[0][0] = LemmatizationToken(tokens[0][0].value, (WordId("nu I"),))
    lemmatization = Lemmatization(tokens)

    expected = Text(
        (
            TextLine(
                LineNumber(1),
                (
                    Word.of(
                        unique_lemma=(WordId("nu I"),),
                        parts=[
                            Reading.of_name("ha"),
                            Joiner.hyphen(),
                            Reading.of_name("am"),
                        ],
                    ),
                ),
            ),
            RulingDollarLine(atf.Ruling.SINGLE),
        ),
        TEXT.parser_version,
    )

    assert TEXT.update_lemmatization(lemmatization) == expected
def test_update_lemmatization(
    fragment_updater, user, fragment_repository, parallel_line_injector, changelog, when
):
    transliterated_fragment = TransliteratedFragmentFactory.build()
    number = transliterated_fragment.number
    tokens = [list(line) for line in transliterated_fragment.text.lemmatization.tokens]
    tokens[1][3] = LemmatizationToken(tokens[1][3].value, ("aklu I",))
    lemmatization = Lemmatization(tokens)
    lemmatized_fragment = transliterated_fragment.update_lemmatization(lemmatization)
    (
        when(fragment_repository)
        .query_by_museum_number(number)
        .thenReturn(transliterated_fragment)
    )
    injected_fragment = lemmatized_fragment.set_text(
        parallel_line_injector.inject_transliteration(lemmatized_fragment.text)
    )
    when(changelog).create(
        "fragments",
        user.profile,
        {"_id": str(number), **SCHEMA.dump(transliterated_fragment)},
        {"_id": str(number), **SCHEMA.dump(lemmatized_fragment)},
    ).thenReturn()
    when(fragment_repository).update_lemmatization(lemmatized_fragment).thenReturn()

    result = fragment_updater.update_lemmatization(number, lemmatization, user)
    assert result == (injected_fragment, False)
Esempio n. 3
0
def test_lemmatization() -> None:
    assert TEXT.lemmatization == Lemmatization(
        (
            (LemmatizationToken("ha-am", tuple()),),
            (LemmatizationToken(" single ruling"),),
        )
    )
 def insert_lemmatization(
     self, updater: FragmentUpdater, lemmatization, museum_number
 ):
     lemmatization = Lemmatization(tuple(lemmatization))
     user = AtfImporterUser(self.username)
     updater.update_lemmatization(
         parse_museum_number(museum_number), lemmatization, user
     )
def test_update_update_lemmatization_not_found(
    fragment_updater, user, fragment_repository, when
):
    number = "K.1"
    (when(fragment_repository).query_by_museum_number(number).thenRaise(NotFoundError))

    with pytest.raises(NotFoundError):
        fragment_updater.update_lemmatization(
            number, Lemmatization(((LemmatizationToken("1.", tuple()),),)), user
        )
def test_update_lemmatization(fragment_repository):
    transliterated_fragment = TransliteratedFragmentFactory.build()
    fragment_repository.create(transliterated_fragment)
    tokens = [list(line) for line in transliterated_fragment.text.lemmatization.tokens]
    tokens[1][3] = LemmatizationToken(tokens[1][3].value, ("aklu I",))
    lemmatization = Lemmatization(tokens)
    updated_fragment = transliterated_fragment.update_lemmatization(lemmatization)

    fragment_repository.update_lemmatization(updated_fragment)
    result = fragment_repository.query_by_museum_number(transliterated_fragment.number)

    assert result == updated_fragment
def test_update_lemmatization():
    transliterated_fragment = TransliteratedFragmentFactory.build()
    tokens = [
        list(line)
        for line in transliterated_fragment.text.lemmatization.tokens
    ]
    tokens[1][3] = LemmatizationToken(tokens[1][3].value, ("nu I", ))
    lemmatization = Lemmatization(tokens)
    expected = attr.evolve(
        transliterated_fragment,
        text=transliterated_fragment.text.update_lemmatization(lemmatization),
    )

    assert transliterated_fragment.update_lemmatization(
        lemmatization) == expected
 def lemmatization(self) -> Lemmatization:
     return Lemmatization(tuple(line.lemmatization for line in self.lines))
def test_update_lemmatization_incompatible():
    fragment = FragmentFactory.build()
    lemmatization = Lemmatization(((LemmatizationToken("mu", tuple()), ), ))
    with pytest.raises(LemmatizationError):
        fragment.update_lemmatization(lemmatization)
Esempio n. 10
0
import json

import falcon
import pytest

from ebl.transliteration.domain.museum_number import MuseumNumber
from ebl.fragmentarium.web.dtos import create_response_dto
from ebl.fragmentarium.web.lemmatizations import LemmatizationSchema
from ebl.lemmatization.domain.lemmatization import Lemmatization, LemmatizationToken
from ebl.tests.factories.fragment import FragmentFactory, TransliteratedFragmentFactory
from ebl.transliteration.application.lemmatization_schema import (
    LemmatizationTokenSchema, )

TOKEN = LemmatizationToken("kur", tuple())
LEMMATIZATION = Lemmatization(((TOKEN, ), ))

SERIALIZED = {"lemmatization": [[LemmatizationTokenSchema().dump(TOKEN)]]}


def test_serialize_lemmatization():
    assert LemmatizationSchema().dump(LEMMATIZATION) == SERIALIZED


def test_deserialize_lemmatization():
    assert LemmatizationSchema().load(SERIALIZED) == LEMMATIZATION


def test_update_lemmatization(client, fragmentarium, user, database):
    transliterated_fragment = TransliteratedFragmentFactory.build()
    fragmentarium.create(transliterated_fragment)
    tokens = [
Esempio n. 11
0
def test_update_lemmatization_incompatible() -> None:
    lemmatization = Lemmatization(((LemmatizationToken("mu", tuple()),),))
    with pytest.raises(LemmatizationError):
        TEXT.update_lemmatization(lemmatization)
Esempio n. 12
0
def test_update_lemmatization_wrong_lines() -> None:
    lemmatization = Lemmatization((*TEXT.lemmatization.tokens, tuple()))

    with pytest.raises(LemmatizationError):
        TEXT.update_lemmatization(lemmatization)
 def make_lemmatization(self, data, **kwargs):
     return Lemmatization(tuple(tuple(line) for line in data["tokens"]))
Esempio n. 14
0
def test_tokens():
    lemmatization = Lemmatization(TOKENS)

    assert lemmatization.tokens == TOKENS