def pipeline(merge_patterns=None, terminal_patterns=None):
    CYRILLIC_UPPER = r'[\p{Lu}&&\p{Cyrillic}]'
    r'(?<=[{au}])\.(?=\w+)'.format(au=CYRILLIC_UPPER)

    Language = get_lang_class('ru')
    Language.Defaults.infixes += ('«»',)
    Language.Defaults.infixes += ('-',)
    Language.Defaults.infixes += ('"\/',)
    Language.Defaults.infixes += ('/',)
    Language.Defaults.infixes += (r'(?<=[{au}])\.(?=\w+)'.format(au=CYRILLIC_UPPER),)

    # Token.set_extension('is_adjective', default=False, force=True)
    nlp = Language()
    russian_tokenizer = RussianTokenizer(nlp, merge_patterns=merge_patterns, terminal_patterns=terminal_patterns)

    nlp.add_pipe(detect_sentence_boundaries, name='detect_sentence_boundaries', first=True)
    # nlp.add_pipe(match_adjective, name='match_adjective', after='detect_sentence_boundaries')
    nlp.add_pipe(russian_tokenizer, name='russian_tokenizer', after='detect_sentence_boundaries')

    for case in SPECIAL_CASES:
        nlp.tokenizer.add_special_case(case, [{'ORTH': case}])

    for case in DOT_SPECIAL_CASES:
        nlp.tokenizer.add_special_case(case, [{'ORTH': case}])

    nlp.tokenizer.add_special_case('--', [{'ORTH': '—'}])
    nlp.tokenizer.add_special_case('  ', [{'ORTH': ' '}])

    return nlp
Beispiel #2
0
def main(lang_id, lang_data_dir, corpora_dir, model_dir):
    model_dir = Path(model_dir)
    lang_data_dir = Path(lang_data_dir) / lang_id
    corpora_dir = Path(corpora_dir) / lang_id

    assert corpora_dir.exists()
    assert lang_data_dir.exists()

    if not model_dir.exists():
        model_dir.mkdir()

    tag_map = json.load((lang_data_dir / 'tag_map.json').open())
    setup_tokenizer(lang_data_dir, model_dir / 'tokenizer')
    setup_vocab(get_lang_class(lang_id).Defaults.lex_attr_getters, tag_map, corpora_dir,
                model_dir / 'vocab')

    if (lang_data_dir / 'gazetteer.json').exists():
        copyfile((lang_data_dir / 'gazetteer.json').as_posix(),
                 (model_dir / 'vocab' / 'gazetteer.json').as_posix())

    copyfile((lang_data_dir / 'tag_map.json').as_posix(),
             (model_dir / 'vocab' / 'tag_map.json').as_posix())

    if (lang_data_dir / 'lemma_rules.json').exists():
        copyfile((lang_data_dir / 'lemma_rules.json').as_posix(),
                 (model_dir / 'vocab' / 'lemma_rules.json').as_posix())

    if not (model_dir / 'wordnet').exists() and (corpora_dir / 'wordnet').exists():
        copytree((corpora_dir / 'wordnet' / 'dict').as_posix(),
                 (model_dir / 'wordnet').as_posix())
Beispiel #3
0
def test_lang_initialize(lang, capfd):
    """Test that languages can be initialized."""
    nlp = get_lang_class(lang)()
    # Check for stray print statements (see #3342)
    doc = nlp("test")  # noqa: F841
    captured = capfd.readouterr()
    assert not captured.out
Beispiel #4
0
def load_default_model_sentencizer(lang):
    """ Load a generic spaCy model and add the sentencizer for sentence tokenization"""
    loading_start = time.time()
    lang_class = get_lang_class(lang)
    nlp = lang_class()
    nlp.add_pipe(nlp.create_pipe('sentencizer'))
    loading_end = time.time()
    loading_time = loading_end - loading_start
    return nlp, loading_time, lang + "_default_" + 'sentencizer'
def pipeline(merge_patterns=[], terminal_patterns=[]):
    def rules_matcher(doc):
        spans = []
        for id, start, end in matcher(doc):
            if id == 15329811787164753587:
                spans.append(doc[start:end])
            elif id == 7038656598907266222:
                for token in doc[start:end]:
                    if token.sent_start:
                        token.sent_start = False
        if spans:
            for span in spans:
                # try:
                #     if span.text not in EXCLUSIONS:
                #         span.merge()
                # except IndexError as error:
                #     # print(doc)
                #     # error occurs when there are more than one hyphen within span, basically it can be ignored
                span.merge()
        return doc

    CYRILLIC_UPPER = r'[\p{Lu}&&\p{Cyrillic}]'
    r'(?<=[{au}])\.(?=\w+)'.format(au=CYRILLIC_UPPER)

    Language = get_lang_class('ru')
    Language.Defaults.infixes += ('«»',)
    Language.Defaults.infixes += ('-',)
    Language.Defaults.infixes += ('"\/',)
    Language.Defaults.infixes += (r'(?<=[{au}])\.(?=\w+)'.format(au=CYRILLIC_UPPER),)
    # Token.set_extension('is_adjective', default=False, force=True)
    nlp = Language()
    matcher = Matcher(nlp.vocab)
    pattern = nlp.vocab.strings['pattern']
    sentence_terminal = nlp.vocab.strings['sentence_terminal']
    if merge_patterns:
        matcher.add(pattern, None, *merge_patterns)
    if terminal_patterns:
        matcher.add(sentence_terminal, None, *terminal_patterns)
    # nlp.add_pipe(match_adjective, name='match_adjective', last=True)
    nlp.add_pipe(detect_sentence_boundaries, name='detect_sentence_boundaries', first=True)
    nlp.add_pipe(rules_matcher, name='rules_matcher', after='detect_sentence_boundaries')

    for case in HYPHEN_SPICIAL_CASES:
        nlp.tokenizer.add_special_case(case, [{'ORTH': case}])

    for case in DOT_SPECIAL_CASES:
        nlp.tokenizer.add_special_case(case, [{'ORTH': case}])

    nlp.tokenizer.add_special_case('--', [{'ORTH': '—'}])
    nlp.tokenizer.add_special_case('  ', [{'ORTH': ' '}])

    return nlp
Beispiel #6
0
def ru_tokenizer():
    pytest.importorskip("pymorphy2")
    return get_lang_class("ru").Defaults.create_tokenizer()
Beispiel #7
0
def pt_tokenizer():
    return get_lang_class("pt")().tokenizer
Beispiel #8
0
def sr_tokenizer():
    return get_lang_class("sr")().tokenizer
Beispiel #9
0
def pl_tokenizer():
    return get_lang_class("pl")().tokenizer
Beispiel #10
0
def de_vocab():
    return get_lang_class("de")().vocab
Beispiel #11
0
def de_tokenizer():
    return get_lang_class("de").Defaults.create_tokenizer()
Beispiel #12
0
def fr_tokenizer():
    return get_lang_class("fr").Defaults.create_tokenizer()
Beispiel #13
0
def tt_tokenizer():
    return get_lang_class("tt")().tokenizer
Beispiel #14
0
def ky_tokenizer():
    return get_lang_class("ky")().tokenizer
Beispiel #15
0
def tokenizer():
    return get_lang_class("xx")().tokenizer
Beispiel #16
0
def tr_tokenizer():
    return get_lang_class("tr")().tokenizer
Beispiel #17
0
def ti_tokenizer():
    return get_lang_class("ti")().tokenizer
Beispiel #18
0
def th_tokenizer():
    pytest.importorskip("pythainlp")
    return get_lang_class("th")().tokenizer
Beispiel #19
0
def sv_tokenizer():
    return get_lang_class("sv")().tokenizer
Beispiel #20
0
def th_tokenizer():
    pytest.importorskip("pythainlp")
    return get_lang_class("th").Defaults.create_tokenizer()
Beispiel #21
0
def uk_tokenizer():
    pytest.importorskip("pymorphy2")
    return get_lang_class("uk")().tokenizer
Beispiel #22
0
def uk_tokenizer():
    pytest.importorskip("pymorphy2")
    pytest.importorskip("pymorphy2.lang")
    return get_lang_class("uk").Defaults.create_tokenizer()
Beispiel #23
0
def uk_lemmatizer():
    pytest.importorskip("pymorphy2")
    pytest.importorskip("pymorphy2_dicts_uk")
    return get_lang_class("uk")().add_pipe("lemmatizer")
Beispiel #24
0
def en_parser(en_vocab):
    nlp = get_lang_class("en")(en_vocab)
    return nlp.create_pipe("parser")
def load_tokenizer(b):
    tok = get_lang_class("en").Defaults.create_tokenizer()
    tok.from_bytes(b)
    return tok
Beispiel #26
0
def ru_lemmatizer():
    pytest.importorskip("pymorphy2")
    return get_lang_class("ru")().add_pipe("lemmatizer")
Beispiel #27
0
def id_tokenizer():
    return get_lang_class("id").Defaults.create_tokenizer()
Beispiel #28
0
def de_tokenizer():
    return get_lang_class("de")().tokenizer
Beispiel #29
0
def nb_tokenizer():
    return get_lang_class("nb").Defaults.create_tokenizer()
Beispiel #30
0
def ro_tokenizer():
    return get_lang_class("ro")().tokenizer
Beispiel #31
0
def pl_tokenizer():
    return get_lang_class("pl").Defaults.create_tokenizer()
Beispiel #32
0
def ur_tokenizer():
    return get_lang_class("ur")().tokenizer
Beispiel #33
0
def sa_tokenizer():
    return get_lang_class("sa")().tokenizer
Beispiel #34
0
def hu_tokenizer():
    return get_lang_class("hu").Defaults.create_tokenizer()
Beispiel #35
0
def vi_tokenizer():
    pytest.importorskip("pyvi")
    return get_lang_class("vi")().tokenizer
Beispiel #36
0
def ja_tokenizer():
    pytest.importorskip("MeCab")
    return get_lang_class("ja").Defaults.create_tokenizer()
Beispiel #37
0
def yo_tokenizer():
    return get_lang_class("yo")().tokenizer
Beispiel #38
0
def nl_lemmatizer(scope="session"):
    return get_lang_class("nl").Defaults.create_lemmatizer()
Beispiel #39
0
def zh_tokenizer_char():
    nlp = get_lang_class("zh")()
    return nlp.tokenizer
Beispiel #40
0
def ro_tokenizer():
    return get_lang_class("ro").Defaults.create_tokenizer()
Beispiel #41
0
def am_tokenizer():
    return get_lang_class("am")().tokenizer
Beispiel #42
0
def sv_tokenizer():
    return get_lang_class("sv").Defaults.create_tokenizer()
Beispiel #43
0
def hy_tokenizer():
    return get_lang_class("hy")().tokenizer
Beispiel #44
0
def tt_tokenizer():
    return get_lang_class("tt").Defaults.create_tokenizer()
Beispiel #45
0
def ar_tokenizer():
    return get_lang_class("ar")().tokenizer
Beispiel #46
0
def bn_tokenizer():
    return get_lang_class("bn").Defaults.create_tokenizer()
Beispiel #47
0
def bg_tokenizer():
    return get_lang_class("bg")().tokenizer
Beispiel #48
0
def en_vocab():
    return get_lang_class("en").Defaults.create_vocab()
Beispiel #49
0
def bn_tokenizer():
    return get_lang_class("bn")().tokenizer
Beispiel #50
0
def es_tokenizer():
    return get_lang_class("es").Defaults.create_tokenizer()
Beispiel #51
0
def ca_tokenizer():
    return get_lang_class("ca")().tokenizer
Beispiel #52
0
def ga_tokenizer():
    return get_lang_class("ga").Defaults.create_tokenizer()
Beispiel #53
0
def cs_tokenizer():
    return get_lang_class("cs")().tokenizer
Beispiel #54
0
def da_tokenizer():
    return get_lang_class("da")().tokenizer
Beispiel #55
0
def nl_tokenizer():
    return get_lang_class("nl")().tokenizer