gap = '*' + replace_noun_with_question(e.title, modifiers.GAP_VALUE.strip('*')) + '*'
        ret.text = re.sub(abbreviation_re, gap, ret.text, flags=re.IGNORECASE)
        return ret
    return apply

definitions_mods = [

    # there is even a board on trello for almost all of these modifiers: trello.com/b/IEP8jusD
    modifiers.strip(' -3', target_field='title'),
    modifiers.translate(
        '?і3',  # the second symbol is not i but \xd1\x96 in utf8
        'ЁЁЗ',
        '",.+:124567890',
        target_field='title'
    ),
    modifiers.str_replace('||', 'П', target_field='title'),
    modifiers.re_search_ban(r'[^-ЁёА-Яа-я]', target_field='title'),
    modifiers.normalize_title(),
    # Text OCR problems

    # modifiers.translate(
    #     '?~[]{}',
    #     'ё-()()',
    #     '|*o'  # the o is latin
    # ),
    # modifiers.re_replace(r'знай\.', 'знач.'),
    # modifiers.str_replace('3а', 'За'),
    # modifiers.re_replace(r'(?<={alph})\d+(-\d+)?'.format(alph=modifiers.ALPH_RE), ''),
    modifiers.str_replace(r' :', ':'),
    modifiers.re_replace(r'([,:])(?=[^ 0-9])', r'\1 '),
__author__ = 'moskupols'

import os

from hb_res.storage import get_storage, FileExplanationStorage
from preparation import modifiers
from preparation.resources.Resource import gen_resource, applied_modifiers

CUR_DIR = os.path.dirname(os.path.abspath(__file__))
INPUT_PATH = os.path.join(CUR_DIR, 'Selected.asset')
OUTPUT_PATH = os.path.join(CUR_DIR, 'SelectedAfterMissedModifiers.asset')

missed_modifiers = [
    modifiers.str_replace('p', 'р'),
    modifiers.re_replace(r'\s+', ' '),
    modifiers.re_replace(r'([,:])(?=[^ ])', r'\1 '),
    modifiers.str_replace(r' :', ':'),
    modifiers.str_replace(r' ,', ','),
]

with FileExplanationStorage(INPUT_PATH) as inp:
    PatchedResource = gen_resource('SelectedResource',
                                   missed_modifiers)(inp.entries)
    with FileExplanationStorage(OUTPUT_PATH) as outp:
        outp.clear()
        for e in applied_modifiers(PatchedResource()):
            outp.add_entry(e)
import itertools

from preparation.resources.Resource import gen_resource
from preparation import modifiers
from hb_res.explanations.Explanation import Explanation


sample_modifiers = (
    modifiers.str_replace('?', 'ё'),
    modifiers.shadow_cognates(length_threshold=3, sep_re='(\\s|!|\\.)+'),
    modifiers.re_replace('\\.', ' '),
    modifiers.normalize_title(),
    modifiers.calculate_key(),
)


@gen_resource('SampleResource', sample_modifiers)
def sample_parser():
    raw_expls = itertools.starmap(Explanation, (
        ('ПОРА', 'Однажды.в.студ?ную.зимнюю.пору.я.из.лесу.вышел.'),
        ('ИВАН', 'Один день Ивана Денисовича'),
        ('унылая', 'Унылая пора! очей очарованье!')
    ))
    return raw_expls
__author__ = 'shkiper'

# noinspection PyProtectedMember
from preparation.resources.crosswords import _raw_data
from preparation.resources.Resource import gen_resource
from hb_res.explanations import Explanation
from preparation import modifiers

crosswords_mods = [
    modifiers.re_replace('p', 'р'),
    modifiers.strip(target_field='title'),
    modifiers.strip(),
    modifiers.normalize_title(),
    modifiers.re_replace(r'\s+', ' '),
    modifiers.re_replace(r'([,:])(?=[^ ])', r'\1 '),
    modifiers.str_replace(r' :', ':'),
    modifiers.str_replace(r' ,', ','),
    modifiers.shadow_cognates(8, '\W+', with_pronoun=True),
    modifiers.remove_to_much_gap_percentage(r'\W+', r'\*(\w+)[?]?\*', 0.5),
    modifiers.calculate_key()
]


@gen_resource('CrosswordsResource', crosswords_mods)
def read_data():
    with open(_raw_data, 'r', encoding='utf-8') as source:
        for line in source:
            tokens = line.split('$')
            word_and_text = tokens[1], tokens[2]
            yield Explanation(*word_and_text)
예제 #5
0
from preparation.resources.Resource import gen_resource
from hb_res.explanations import Explanation
from ._synonyms_quality import choose_best_synonyms

synonyms_mods = [
    modifiers.normalize_title(0.01, True),

    modifiers.re_replace('[^#]+? [^#]+?(#|$)', ''),  # remove multi-word synonyms (containing spaces)
    modifiers.re_fullmatch_ban(''),

    modifiers.delete_cognates(4, '#'),
    modifiers.choose_normal_words_in_explanation('#'),

    choose_best_synonyms(5, '#'),
    modifiers.calculate_prior_frequency_rate('#'),

    modifiers.str_replace('#', ', '),
    modifiers.calculate_key()
]


@gen_resource('SynonymsResource', synonyms_mods)
def read_data():
    explanations = set()
    with open(_raw_data, 'r', encoding='utf-8') as source:
        for line in source:
            [title, text] = line.split('@')
            explanations.add((title, text))
    for explanation in sorted(explanations):
        yield Explanation(explanation[0], explanation[1])
        ret.text = text
        return ret
    return apply

antonyms_mods = [
    modifiers.normalize_title(0.01, True),

    modifiers.re_replace('[^#]+ [^#]+(#|$)', ''),  # remove multi-word antonyms (containing spaces)
    modifiers.re_fullmatch_ban(''),

    modifiers.delete_cognates(6, '#'),
    modifiers.choose_normal_words_in_explanation('#'),

    modifiers.calculate_prior_frequency_rate('#'),

    modifiers.str_replace('#', ', ', target_field='text'),

    add_antonyms_common_text(),

    modifiers.calculate_key()
]


@gen_resource('AntonymsResource', antonyms_mods)
def read_data():
    explanations = set()
    with open(_raw_data, 'r', encoding='utf-8') as source:
        for line in source:
            [title, text] = line.split('@')
            explanations.add((title, text))
    for explanation in sorted(explanations):
예제 #7
0
        ret = copy.copy(e)
        ret.text = text
        return ret

    return apply


antonyms_mods = [
    modifiers.normalize_title(0.01, True),
    modifiers.re_replace('[^#]+ [^#]+(#|$)',
                         ''),  # remove multi-word antonyms (containing spaces)
    modifiers.re_fullmatch_ban(''),
    modifiers.delete_cognates(6, '#'),
    modifiers.choose_normal_words_in_explanation('#'),
    modifiers.calculate_prior_frequency_rate('#'),
    modifiers.str_replace('#', ', ', target_field='text'),
    add_antonyms_common_text(),
    modifiers.calculate_key()
]


@gen_resource('AntonymsResource', antonyms_mods)
def read_data():
    explanations = set()
    with open(_raw_data, 'r', encoding='utf-8') as source:
        for line in source:
            [title, text] = line.split('@')
            explanations.add((title, text))
    for explanation in sorted(explanations):
        yield Explanation(explanation[0], explanation[1])
import itertools

from preparation.resources.Resource import gen_resource
from preparation import modifiers
from hb_res.explanations.Explanation import Explanation

sample_modifiers = (
    modifiers.str_replace('?', 'ё'),
    modifiers.shadow_cognates(length_threshold=3, sep_re='(\\s|!|\\.)+'),
    modifiers.re_replace('\\.', ' '),
    modifiers.normalize_title(),
    modifiers.calculate_key(),
)


@gen_resource('SampleResource', sample_modifiers)
def sample_parser():
    raw_expls = itertools.starmap(
        Explanation,
        (('ПОРА', 'Однажды.в.студ?ную.зимнюю.пору.я.из.лесу.вышел.'),
         ('ИВАН', 'Один день Ивана Денисовича'),
         ('унылая', 'Унылая пора! очей очарованье!')))
    return raw_expls