def get_all_collocation(lines, word): """ Function for finding all collocations of word and any word after it. :param lines: list of string Lines for processing. :param word: str Word for searching. :return: List of all valid collocations. """ if not isinstance(lines, list) or not isinstance(word, str): raise TypeError gr = rule(normalized(word), and_(not_(yargy_type('PUNCT')), not_(yargy_type('OTHER')))) result_list = [] for line in lines: if not isinstance(line, str): raise TypeError for match in Parser(gr).findall(line): result_list.append(' '.join( [Normalizer.normalise(token.value) for token in match.tokens])) return result_list
def yargy_smart_home(msg): Do = fact('Entity', ['action', 'object', 'place']) Actions = dictionary({'Включи', 'Отключи', 'Выключи'}) Objects = dictionary( {'Лампочку', 'Свет', 'Розетку', 'Видеокамеру', 'Камеру'}) ObjectsList = or_( rule(Objects), rule(Objects, Objects), ) Prep = dictionary({'в', 'на'}) Place = dictionary({ 'Гостевой', 'Ванной', 'спальной', 'спальне', 'холле', 'коридоре', 'кухне' }) Room = {'комната'} ActionPhrase = or_( rule(Actions.interpretation(Do.action.normalized()), Objects.interpretation(Do.object.normalized()), Prep.optional(), Place.interpretation(Do.place.normalized()), rule(normalized('комната')).optional()), rule(Actions.interpretation(Do.action.normalized()), Objects.interpretation(Do.object.normalized()), Prep.optional(), Place.interpretation(Do.place.normalized())), rule(Prep.optional(), Place.interpretation(Do.place.normalized()), rule(normalized('комната')).optional(), Actions.interpretation(Do.action.normalized()), Objects.interpretation( Do.object.normalized()))).interpretation(Do) res = [] parser = Parser(ActionPhrase) for match in parser.findall(msg): res.append({ 'Действие': match.fact.action, 'Объект': match.fact.object, 'Место': match.fact.place, }) return res
def yargy_get_genre(msg): Genre = fact('Genre', ['genre']) GENRES = { 'ужасы', 'ужастики', 'мелодрама', 'комедия', 'боевик', 'триллер', 'мультик', 'мультфильм', 'драма' } GENRES_NAME = dictionary(GENRES) GENRES_WORDS = or_(rule(normalized('жанр')), rule(normalized('раздел'))) GENRE_PHRASE = or_(rule(GENRES_NAME, GENRES_WORDS.optional()), rule(GENRES_WORDS.optional(), GENRES_NAME)).interpretation( Genre.genre.inflected()).interpretation(Genre) res = [] parser = Parser(GENRE_PHRASE) for match in parser.findall(msg): res.append(match.fact.genre) return res
def test_predicate(): tokenizer = MorphTokenizer() predicate = or_(normalized('московским'), and_(gram('NOUN'), not_(gram('femn')))) predicate = predicate.activate(tokenizer) tokens = tokenizer('московский зоопарк') values = [predicate(_) for _ in tokens] assert values == [True, True] tokens = tokenizer('московская погода') values = [predicate(_) for _ in tokens] assert values == [True, False]
def yargy_get_channel(msg): Channel = fact('Channel', ['name']) CNANNELS = { 'Первый', 'Россия', 'ТВЦ', 'НТВ', 'ТНТ', 'СТС', 'Культура', 'Дождь', 'Спас' } CNANNELS_NAME = dictionary(CNANNELS) CHANNEL_WORDS = or_(rule(normalized('канал')), rule(normalized('программа'))) CHANNEL_PHRASE = or_( rule(CHANNEL_WORDS, CNANNELS_NAME), rule(CNANNELS_NAME, CHANNEL_WORDS.optional())).interpretation( Channel.name.inflected()).interpretation(Channel) res = [] parser = Parser(CHANNEL_PHRASE) for match in parser.findall(msg): # print(match.fact) for channel in CNANNELS: if channel.lower() in match.fact.name: res.append(channel) return res
def _abbreviate(word: str, abbrs: List[str], opt=False): abbrs, dashed = partition(lambda abbr: '-' in abbr, abbrs) dashed = map( lambda a: rule(*map(caseless, intersperse('-', a.split('-')))), dashed) original_word = rule(normalized(word)) dashed_sequence = rule(or_(*dashed)) abbr_with_dot = rule( or_(*map(caseless, abbrs)), eq('.').optional(), ) result = or_(original_word, dashed_sequence, abbr_with_dot) \ .interpretation(interpretation.const(word)) return result.optional() if opt else result
def test_predicate(): tokenizer = MorphTokenizer() predicate = or_( normalized('московским'), and_( gram('NOUN'), not_(gram('femn')) ) ) context = Context(tokenizer) predicate = predicate.activate(context) tokens = tokenizer('московский зоопарк') values = [predicate(_) for _ in tokens] assert values == [True, True] tokens = tokenizer('московская погода') values = [predicate(_) for _ in tokens] assert values == [True, False]
def make_rule_from_station(title: str) -> Rule: title = title.replace('1', '').replace('2', '').lower().strip() phrase = [] for token in title.split(' '): word = Abbrs.get(token) if Abbrs.is_abbr(token) \ else normalized(token).interpretation(meaning.const(token)) phrase.append(word.interpretation(Array.element)) phrase = rule(*phrase).means(Array).interpretation( meaning.custom(lambda p: Restore.get(' '.join(p.element)))).means( StationTitle.value) if Synonyms.has(title): synonym = Synonyms.get(title).interpretation( meaning.custom(lambda p: Restore.get(p))).means(StationTitle.value) return or_(synonym, phrase) return phrase
def req_predicate(word: str = "?", predicate_type: str = "глаг"): # add predicate_type handling if predicate_type == "глаг": predicate = y.or_(yp.gram("VERB"), yp.gram("INFN")) elif predicate_type == "сущ": predicate = y.or_(yp.gram("INFN"), yp.gram("NOUN")) elif predicate_type == "любой": predicate = y.or_(yp.gram("VERB"), yp.gram("INFN"), yp.gram("NOUN")) else: raise ValueError("predicate_type must be глаг or сущ or любой") if word != "?": if "|" not in word: # single-word scope predicate = y.and_(yp.normalized(word), predicate) else: predicate_words = word.split("|") scope_rule = list(map(yp.normalized, predicate_words)) scope_rule = y.or_(*scope_rule) predicate = y.and_(scope_rule, predicate) return predicate
# coding: utf-8 from __future__ import unicode_literals from yargy import (rule, and_, or_, fact) from yargy.predicates import (eq, in_, gram, normalized, caseless) Money = fact('Money', ['amount', 'currency']) EURO = normalized('евро') DOLLARS = or_(normalized('доллар'), eq('$')) RUBLES = or_(rule(normalized('рубль')), rule(or_(caseless('руб'), caseless('р')), eq('.').optional())) CURRENCY = or_(rule(EURO), rule(DOLLARS), RUBLES).interpretation(Money.currency) INT = gram('INT') AMOUNT_ = or_( rule(INT), rule(INT, INT), rule(INT, INT, INT), rule(INT, '.', INT), rule(INT, '.', INT, '.', INT), ) FRACTION_AMOUN = rule(AMOUNT_, in_({',', '.'}), INT)
'июль': 7, 'август': 8, 'сентябрь': 9, 'октябрь': 10, 'ноябрь': 11, 'декабрь': 12, } MONTH_NAME = dictionary(MONTHS).interpretation(Date.month.normalized().custom( MONTHS.__getitem__)) MONTH = and_(gte(1), lte(12)).interpretation(Date.month.custom(int)) DAY = and_(gte(1), lte(31)).interpretation(Date.day.custom(int)) YEAR_WORD = or_(rule('г', eq('.').optional()), rule(normalized('год'))) YEAR = and_(gte(1000), lte(2100)).interpretation(Date.year.custom(int)) YEAR_SHORT = and_(gte(0), lte(99)).interpretation( Date.year.custom(lambda _: 1900 + int(_))) ERA_YEAR = and_(gte(1), lte(100000)).interpretation(Date.year.custom(int)) ERA_WORD = rule( eq('до'), or_(rule('н', eq('.'), 'э', eq('.').optional()), rule(normalized('наша'), normalized('эра')))).interpretation(Date.current_era.const(False))
######## # # CURRENCY # ########## # EURO = or_( # normalized('евро'), # #in_(['€', 'EUR']) # eq('€'), # #eq('EUR') # ).interpretation( # const(dsl.EURO) # ) # EURO = caseless_pipeline(['евро', '€', 'eur'])#.interpretation(const(dsl.EURO)) EURO = or_(normalized('евро'), eq('€'), eq('EUR')).interpretation(const(dsl.EURO)) DOLLARS = or_(normalized('доллар'), eq('$'), eq('USD')).interpretation(const(dsl.DOLLARS)) RUBLES = or_( rule(normalized('рубль')), rule(or_(caseless('руб'), caseless('р'), eq('₽')), DOT.optional())).interpretation(const(dsl.RUBLES)) CURRENCY = or_(EURO, DOLLARS, RUBLES).interpretation(Money.currency) # TODO: копейки и центы тоже можно выпилить для ускорения KOPEIKA = or_(rule(normalized('копейка')), rule(or_(caseless('коп'), caseless('к')), DOT.optional()))
attribute('integer_max', -1), attribute('currency', '-'), attribute('multiplier', -1), attribute('period', '-') ]) DOT = eq('.') INT = type('INT') ######## # # CURRENCY # ########## EURO = or_(normalized('евро'), normalized('euro'), eq('€'), caseless('EUR')).interpretation(const('EUR')) DOLLARS = or_(normalized('доллар'), normalized('дол'), normalized('dollar'), eq('$'), caseless('USD')).interpretation(const('USD')) RUBLES = or_( rule(normalized('ruble')), rule(normalized('рубль')), rule(normalized('рубл')), rule( or_( caseless('руб'), caseless('rub'), # caseless('rur'), caseless('р'),
gte(1), lte(12) ).interpretation( Date.month.custom(int) ) DAY = and_( gte(1), lte(31) ).interpretation( Date.day.custom(int) ) YEAR_WORD = or_( rule('г', eq('.').optional()), rule(normalized('год')) ) YEAR = and_( gte(1000), lte(2100) ).interpretation( Date.year.custom(int) ) YEAR_SHORT = and_( gte(0), lte(99) ).interpretation( Date.year.custom(lambda _: 1900 + int(_)) )
from yargy import or_, rule from yargy.interpretation import attribute, fact import yargy.interpretation as meaning from yargy.predicates import caseless, eq, in_, in_caseless, normalized from .common import Array from .literal import LIST_OF_NUMERALS from .station_title import STATION_TITLE Station = fact('Station', ['name', attribute('num', default=[])]) STATION_WORD = or_( rule(caseless('ст'), '.'), rule(normalized('станция')), ) METRO_WORD = or_( rule(caseless('м'), '.'), rule(normalized('метро')), ) __quotes = "„“”‚‘’'\"" LEFT_QUOTE = in_("«" + __quotes) RIGHT_QUOTE = in_("»" + __quotes) STATION = rule( STATION_WORD.optional(), METRO_WORD.optional(), LEFT_QUOTE.optional(), STATION_TITLE.interpretation( meaning.custom(lambda p: p.value)).interpretation(Station.name),
'апрель': 4, 'май': 5, 'июнь': 6, 'июль': 7, 'август': 8, 'сентябрь': 9, 'октябрь': 10, 'ноябрь': 11, 'декабрь': 12, } MONTH_NAME = dictionary(MONTHS).interpretation(Date.month.normalized()) MONTH = and_(gte(1), lte(12)).interpretation(Date.month) DAY = and_(gte(1), lte(31)).interpretation(Date.day) YEAR_WORD = or_(rule('г', eq('.').optional()), rule(normalized('год'))) YEAR = and_(gte(1900), lte(2100)).interpretation(Date.year) YEAR_SHORT = and_(gte(0), lte(99)).interpretation(Date.year) DATE = or_( rule(DAY, '.', MONTH, '.', or_(YEAR, YEAR_SHORT), YEAR_WORD.optional()), rule(YEAR, YEAR_WORD), rule(DAY, MONTH_NAME), rule(MONTH_NAME, YEAR, YEAR_WORD.optional()), rule(DAY, MONTH_NAME, YEAR, YEAR_WORD.optional()), ).interpretation(Date)
def extract(text): with open(os.path.join(os.getcwd(), 'list_diseases\\diseases'), encoding='UTF-8') as f: diseases = f.read().split('\n') text = text.replace('\ufeff', '') text = text.replace('\n', ' \n ') text = text.replace('\\', ' ') symptoms = ['Дата рождения', 'Дата осмотра','Дата заболевания', 'Возраст', 'Болен дней','Болен часов','Возраст в днях','Время поступления', 'Время заболевания', 'рост','вес', 'IMT', 'давление диаст', 'давление сист', 'температура поступления','мах температура', 'Т-Ан01', 'Т-Ан03', 'пол', 'др заболевания в анамнезе', 'кем направлен', 'побочное действие лекартсв','аллергическая реакция', 'озноб', 'слабость', 'вялость','головная боль', 'нарушение сна', 'нарушение аппетита', 'ломота','тошнота', 'нарушение сознания', 'Судороги', 'Парестезии', 'эритема', 'с четкими границами', 'валик', 'боль','Гиперемия', 'Отек', 'Лимфаденит', 'Лимфангит', 'квартира, дом','контакт с зараженными','речная рыба','провоцирущие факторы', 'предрасполагающие факторы','кол-во сопут заболеваний','соц категория','сопутствующий диагноз','основной диагноз', 'контакт с зараженными', 'пищевой анамнез', 'раневые ворота', 'аллергия на лекарства', 'клещ', 'географический анамнез', 'вредные привычки', 'домашние животные', 'условия труда','избыточное питание', 'ППТ', 'ЛПТ', 'бытовые условия', 'питание', 'интоксикация', 'ЧСС'] dict_symp = dict.fromkeys(symptoms) # In[5]: dates_lst = [] DAY = and_( gte(1), lte(31) ) MONTH = and_( gte(1), lte(12) ) YEAR = and_( gte(1), lte(19) ) YEARFULL = and_( gte(1900), lte(2020) ) DATE = or_( rule(YEAR,'.',MONTH,'.',DAY), rule(DAY,'.',MONTH,'.',YEAR), rule(DAY,'.',MONTH,'.',YEARFULL), rule(DAY,'.',MONTH), rule(DAY,'.',MONTH,YEARFULL), rule(DAY,'.',MONTH,YEAR)) parser = Parser(DATE) for match in parser.findall(text): dates_lst.append(''.join([_.value for _ in match.tokens])) if int(dates_lst[1][-2:])-int(dates_lst[0][-2:])<0: dict_symp['Дата рождения'] = dates_lst[0] dict_symp['Дата осмотра'] = dates_lst[1] dict_symp['Дата заболевания'] = dates_lst[2] else: birth = None dict_symp['Дата осмотра'] = dates_lst[0] dict_symp['Дата заболевания'] = dates_lst[1] if len(dict_symp['Дата заболевания'])==5: dict_symp['Дата заболевания'] += dict_symp['Дата осмотра'][dict_symp['Дата осмотра'].rfind('.'):] TYPE = morph_pipeline(['дней']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0 and dict_symp['Дата заболевания'] is None: dict_symp['Дата заболевания'] = text[lst[0][0][0]-20:lst[0][0][0]+20] dict_symp['Дата заболевания'] = re.findall(r'\d+', dict_symp['Дата заболевания'])[0] dict_symp['Дата заболевания'] = str(int(dict_symp['Дата осмотра'][:2])-int(dict_symp['Дата заболевания'])) dict_symp['Дата заболевания'] = dict_symp['Дата заболевания']+dict_symp['Дата осмотра'][2:] age_lst = [] AGE = and_( gte(0), lte(100) ) AGE_RULE = or_(rule("(",AGE,")"), rule(gram('ADJF'),",",AGE)) parser = Parser(AGE_RULE) for match in parser.findall(text): s = ''.join([_.value for _ in match.tokens]) age_lst.append((re.findall(r'\d+', s)[0])) if len(age_lst)>0: dict_symp['Возраст'] = age_lst[-1] try: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%Y') except: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%y') d1 = d1.strftime('%d.%m.%Y') d1 = datetime.strptime(d1, '%d.%m.%Y') try: d2 = datetime.strptime(dict_symp['Дата заболевания'], '%d.%m.%Y') except: d2 = datetime.strptime(dict_symp['Дата заболевания'], '%d.%m.%y') d2 = d2.strftime('%d.%m.%Y') d2 = datetime.strptime(d2, '%d.%m.%Y') dict_symp['Болен дней'] = (d1 - d2).days dict_symp['Болен часов'] = (int(dict_symp['Болен дней'])-1)*24 if dict_symp['Дата рождения'] is None: dict_symp['Возраст в днях'] = int(dict_symp['Возраст'])*365 else: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%Y') d2 = datetime.strptime(dict_symp['Дата рождения'], '%d.%m.%Y') dict_symp['Возраст в днях'] = (d1 - d2).days time_lst = [] HOURS = and_( gte(0), lte(59) ) MINUTES = and_( gte(0), lte(59) ) TIME = or_(rule(HOURS,':',MINUTES), rule(not(normalized('.')),HOURS, normalized('час')),) parser = Parser(TIME) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) s = s.replace('часов', ':00') s = s.replace('час', ':00') time_lst.append(s) if len(time_lst)>0: dict_symp['Время поступления'] = time_lst[0] dict_symp['Время заболевания'] = time_lst[0] if len(time_lst)>1: dict_symp['Время заболевания'] = time_lst[1] t1 = dict_symp['Время поступления'] t2 = dict_symp['Время заболевания'] delta = int(t1[:t1.find(':')])+24-int(t2[:t2.find(':')]) dict_symp['Болен часов'] = dict_symp['Болен часов'] + delta HEIGHT = and_( gte(50), lte(250) ) WEIGHT = and_( gte(10), lte(150) ) HEIGHT_RULE = or_(rule(normalized('рост'),'-',HEIGHT), rule(normalized('рост'),':',HEIGHT), rule(normalized('рост'),HEIGHT)) WEIGHT_RULE = or_(rule(normalized('вес'),'-',WEIGHT), rule(normalized('вес'),':',WEIGHT), rule(normalized('вес'),WEIGHT)) s='' parser = Parser(HEIGHT_RULE) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) s = re.findall(r'\d+', s)[0] if s != '': dict_symp['рост'] = int(s) s = '' parser = Parser(WEIGHT_RULE) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) s = re.findall(r'\d+', s)[0] if s != '': dict_symp['вес'] = int(s) if (dict_symp['рост'] is not None) and (dict_symp['вес'] is not None): dict_symp['IMT'] = round(dict_symp['вес']/(dict_symp['рост']/100*dict_symp['рост']/100),2) ADSIST = and_( gte(50), lte(250) ) ADDIAST = and_( gte(20), lte(200) ) PRES = or_(rule('АД', ADSIST,'/',ADDIAST), rule('АД', ADSIST,ADDIAST), rule('АД', ADSIST, ':',ADDIAST), rule('АД','-', ADSIST, '/',ADDIAST), rule('А/Д', ADSIST, '/',ADDIAST), rule('А/Д', ADSIST, ADDIAST), rule('А/Д',' ', ADSIST, '/',ADDIAST), rule(ADSIST, '/',ADDIAST)) s = '' parser = Parser(PRES) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) s = re.findall(r'\d+', s) if len(s)>1: dict_symp['давление сист'] = s[0] dict_symp['давление диаст'] = s[1] PULSE = and_( gte(40), lte(150) ) PRES = or_(rule('ЧСС','-',PULSE), rule('ЧСС',PULSE), rule('ЧСС','/',PULSE), rule('пульс',PULSE),) s = '' parser = Parser(PRES) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) s = re.findall(r'\d+', s) if len(s)>0: dict_symp['ЧСС'] = s[0] status = text[text.find('Объективный статус'): text.find('Объективный статус')+text[text.find('Объективный статус')+1:].find(' \n \n')] DEGREES = and_( gte(34), lte(42) ) SUBDEGREES = and_( gte(0), lte(9) ) TEMP = or_(rule(DEGREES,',',SUBDEGREES), rule(DEGREES,'.',SUBDEGREES), rule(DEGREES)) temp_lst = [] parser = Parser(TEMP) for match in parser.findall(status): temp_lst.append(''.join([_.value for _ in match.tokens])) if len(temp_lst)>0: dict_symp['температура поступления'] = temp_lst[0] temp_lst = [] parser = Parser(TEMP) for match in parser.findall(text): temp_lst.append(''.join([_.value for _ in match.tokens])) if len(temp_lst)>0: if dict_symp['температура поступления'] is None: dict_symp['температура поступления'] = temp_lst[0] dict_symp['мах температура'] = max([float(i.replace(',','.')) for i in temp_lst]) if dict_symp['мах температура']>38: dict_symp['Т-Ан01'] = 1 else: dict_symp['Т-Ан01'] = 0 if dict_symp['мах температура']>40: dict_symp['Т-Ан03'] = 3 elif dict_symp['мах температура']>39: dict_symp['Т-Ан03'] = 2 elif dict_symp['мах температура']>38: dict_symp['Т-Ан03'] = 1 else: dict_symp['Т-Ан03'] = 0 sex_lst = [] SEX_RULE = or_(rule(normalized('женский')), rule(normalized('мужской'))) parser = Parser(SEX_RULE) for match in parser.findall(text): sex_lst.append(''.join([_.value for _ in match.tokens])) dict_symp['пол'] = sex_lst[0] dict_symp['пол'] = dict_symp['пол'].lower().replace('женский', '2') dict_symp['пол'] = dict_symp['пол'].lower().replace('мужской', '1') TYPE = morph_pipeline(diseases[:-1]) anamnez = text[text.find('Анамнез'): text.find('Анамнез')+text[text.find('Анамнез')+1:].rfind('Анамнез')] anamnez = anamnez.replace('туберкулез',' ') anamnez = anamnez.replace('туберкулёз',' ') family = anamnez[anamnez.find('Семейный'):anamnez.find('Семейный')+60] anamnez = anamnez.replace(family,' ') anamnez = anamnez.replace('описторхоз',' ') dis_lst = [] parser = Parser(TYPE) for match in parser.findall(anamnez): dis_lst.append(' '.join([_.value for _ in match.tokens])) op_rule = or_(rule(normalized('описторхоз'), not_(normalized('не')))) parser = Parser(op_rule) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dis_lst.append(' описторхоз') tub_rule = rule(normalized('туберкулез'), not_(normalized('отрицает'))) parser = Parser(tub_rule) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dis_lst.append(' туберкулез') dict_symp['др заболевания в анамнезе'] = ', '.join(dis_lst) dict_symp['др заболевания в анамнезе'] = morph.parse(dict_symp['др заболевания в анамнезе'])[0].normal_form TYPE = morph_pipeline(['Поликлиника',"скорая помощь", "ск/помощь", 'СМП', "обратился"]) napr = None napr_lst = [] parser = Parser(TYPE) for match in parser.findall(text): napr_lst.append(' '.join([_.value for _ in match.tokens])) if len(napr_lst)>0: napr = napr_lst[-1] napr = morph.parse(napr)[0].normal_form if napr == "обратиться": dict_symp['кем направлен'] = 3 elif napr == "скорая помощь" or napr == "ск/помощь" or napr == 'смп'or napr == "ск / помощь" or napr == "скорой помощь" or napr == "скорую помощь": dict_symp['кем направлен'] = 1 elif napr == "поликлиника": dict_symp['кем направлен'] = 2 ALLERG_RULE = or_(rule(normalized('Аллергическая'),normalized('реакция'), normalized('на'), gram('NOUN').optional().repeatable(), gram('ADJF').optional().repeatable()), rule(normalized('Аллергическая'),normalized('реакция'), normalized('на'), gram('NOUN').optional().repeatable(), gram('ADJF').optional().repeatable(), '"', gram('ADJF').optional().repeatable(), gram('NOUN').optional().repeatable(), '"'), rule(normalized('Аллергическая'),normalized('реакция'), normalized('на'), gram('NOUN').optional().repeatable(), gram('ADJF').optional().repeatable(),',', gram('NOUN').optional().repeatable(), gram('ADJF').optional().repeatable(),',',gram('NOUN').optional().repeatable(), gram('ADJF').optional().repeatable()), rule(normalized('Аллергическая'),normalized('реакция'), normalized('на'), gram('ADJF').optional(),gram('NOUN').optional().repeatable())) parser = Parser(ALLERG_RULE) for match in parser.findall(text): item = (' '.join([_.value for _ in match.tokens])) dict_symp['аллергическая реакция'] = item[item.find('на')+3:] if dict_symp['аллергическая реакция'] is not None: dict_symp['побочное действие лекартсв'] = 1 dict_symp['аллергия на лекарства'] = 1 symptoms = [['озноб', 'познабливание'], 'слабость', 'вялость','головная боль', 'нарушение сна', 'нарушение аппетита', 'ломота', 'тошнота', 'нарушение сознания','Судороги', 'Парестезии', 'эритема', ['с четкими границами', 'границами четкими' , 'четкими неровными краями','с четкими краями', 'краями четкими' , 'четкими неровными краями'], 'валик', 'боль',['Гиперемия', 'гиперемирована'], 'Отек', 'Лимфаденит', 'Лимфангит'] for i in symptoms: lst = [] if isinstance(i, str): TYPE = morph_pipeline([i]) else: TYPE = morph_pipeline(i) parser = Parser(TYPE) for match in parser.findall(text): lst.append(' '.join([_.value for _ in match.tokens])) if len(lst)>0: if isinstance(i, str): dict_symp[i]=1 else: dict_symp[i[0]]=1 else: if isinstance(i, str): dict_symp[i]=0 else: dict_symp[i[0]]=0 # In[20]: TYPE = morph_pipeline(['географический', 'выезжал']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-40:match.span[1]+40] geo_rule = rule(not_(normalized('не')),normalized('выезжал')) parser = Parser(geo_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['географический анамнез'] = 1 else: dict_symp['географический анамнез'] = 0 TYPE = morph_pipeline(['бытовые']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-30:match.span[1]+30] cond_rule = rule(not_(normalized('не')),normalized('удовлетворительные')) parser = Parser(cond_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['бытовые условия'] = 1 else: dict_symp['бытовые условия'] = 0 TYPE = morph_pipeline(['условия труда']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-20:match.span[1]+20] cond_rule = rule(not_(normalized('не')),normalized('удовлетворительные')) parser = Parser(cond_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['условия труда'] = 1 else: dict_symp['условия труда'] = 0 TYPE = morph_pipeline(['питание']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-20:match.span[1]+20] food_rule = or_(rule(not_(normalized('не')),normalized('удовлетворительное')), rule(not_(normalized('не')),normalized('полноценное'))) parser = Parser(food_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['питание'] = 1 else: dict_symp['питание'] = 0 food_rule = rule(not_(normalized('не')),normalized('избыточное')) parser = Parser(food_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['избыточное питание'] = 1 else: dict_symp['избыточное питание'] = 0 TYPE = morph_pipeline(['рыба']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-40:match.span[1]+40] TYPE = morph_pipeline(['да', 'постоянно']) parser = Parser(TYPE) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['речная рыба'] = 1 fish_rule = rule(not_(normalized('не')),normalized('употребляет')) parser = Parser(fish_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['речная рыба'] = 1 else: dict_symp['речная рыба'] = 0 TYPE = morph_pipeline(['контакт']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-40:match.span[1]+40] TYPE = morph_pipeline(['да']) parser = Parser(TYPE) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['контакт с зараженными'] = 1 else: dict_symp['контакт с зараженными'] = 0 lst = [] TYPE = morph_pipeline(['рана', "раневые ворота", "входные ворота"]) parser = Parser(TYPE) for match in parser.findall(text): lst.append(' '.join([_.value for _ in match.tokens])) if len(lst)>0: dict_symp["раневые ворота"] = 1 else: dict_symp["раневые ворота"] = 0 lst = [] TYPE = morph_pipeline(['интоксикация']) parser = Parser(TYPE) for match in parser.findall(text): lst.append(' '.join([_.value for _ in match.tokens])) if len(lst)>0: dict_symp["интоксикация"] = 1 else: dict_symp["интоксикация"] = 0 lst = [] TYPE = morph_pipeline(['клещ', "присасывание"]) parser = Parser(TYPE) for match in parser.findall(text): lst.append(' '.join([_.value for _ in match.tokens])) if len(lst)>0: dict_symp["клещ"] = 1 else: dict_symp["клещ"] = 0 TYPE = morph_pipeline(['сырой воды']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-80:match.span[1]+80] TYPE = morph_pipeline(['не было', 'отрицает', 'нет']) parser = Parser(TYPE) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['пищевой анамнез'] = 0 else: dict_symp['пищевой анамнез'] = 1 TYPE = morph_pipeline(['вредные привычки', 'алкоголь']) parser = Parser(TYPE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: text_fish = text[match.span[1]-80:match.span[1]+80] TYPE = morph_pipeline(['не было', 'отрицает', 'нет', 'не употребляет']) parser = Parser(TYPE) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['вредные привычки'] = 0 else: dict_symp['вредные привычки'] = 1 smoke_rule = or_(rule(not_(normalized('не')),normalized('курит')), rule(not_(normalized('не')),normalized('употребляет'))) parser = Parser(smoke_rule) lst = [] for match in parser.findall(text_fish): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['вредные привычки'] = 1 home = None home_types = [['бездомный'], ['дом благоустроенный'], ['дом не благоустроенный','дом неблагоустроенный'], ['квартира не благоустроенная', 'квартира неблагоустроенная'], ['квартира благоустроенная'],] for i in range(len(home_types)): home_lst = [] TYPE = morph_pipeline(home_types[i]) parser = Parser(TYPE) for match in parser.findall(text): home_lst.append(' '.join([_.value for _ in match.tokens])) if len(home_lst)>0: home = i dict_symp['квартира, дом'] = home pets = [] pet_types = [['кошка'], ['собака'], ['корова','коза']] for i in range(len(pet_types)): pet_lst = [] TYPE = morph_pipeline(pet_types[i]) parser = Parser(TYPE) for match in parser.findall(text): pet_lst.append(' '.join([_.value for _ in match.tokens])) if len(pet_lst)>0: pets.append(i+1) if len(pets)>1: pets = 4 elif len(pets)>0: pets = pets[0] else: pets = 0 dict_symp['домашние животные'] = pets factors = [] factor_types = [['ссадины',"царапины", "раны", "расчесы", "уколы", "потертости", "трещины", 'вскрытие'], ['ушибы'], ['переохлаждение','перегревание','смена температуры'], ['инсоляция'], ['стресс'], ['переутомление']] for i in range(len(factor_types)): factor_lst = [] TYPE = morph_pipeline(factor_types[i]) parser = Parser(TYPE) for match in parser.findall(text): factor_lst.append(' '.join([_.value for _ in match.tokens])) if len(factor_lst)>0: factors.append(i+1) dict_symp['провоцирущие факторы'] = factors factors = [] factor_types = [['микоз',"диабет", "ожирение", "варикоз", "недостаточность", "лимфостаз", "экзема"], ['тонзилит',"отит", "синусит", "кариес", "пародонтоз", "остеомиелит", "тромбофлебит", "трофические язвы"], ['резиновая обувь','загрязнения кожных'], ['соматические заболевания']] for i in range(len(factor_types)): factor_lst = [] TYPE = morph_pipeline(factor_types[i]) parser = Parser(TYPE) for match in parser.findall(text): factor_lst.append(' '.join([_.value for _ in match.tokens])) if len(factor_lst)>0: factors.append(i+1) dict_symp['предрасполагающие факторы'] = factors lst = [] TYPE = morph_pipeline(['работает']) parser = Parser(TYPE) for match in parser.findall(text): lst.append(' '.join([_.value for _ in match.tokens])) if len(lst)>0: dict_symp['соц категория'] = 0 soc_rule = rule(not_(normalized('не')),normalized('работает')) parser = Parser(soc_rule) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['соц категория'] = 1 DIAGNOZ_RULE = or_(rule(normalized('сопутствующий'), not_(or_(gram('NOUN')))), rule(normalized('сопутствующий'),normalized('диагноз')), rule(normalized('диагноз'),normalized('сопутствующий')),) rules = ['сопутствующий', 'сопутствующий диагноз', 'диагноз сопутствующий'] TYPE = morph_pipeline(rules) parser = Parser(DIAGNOZ_RULE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: dict_symp['сопутствующий диагноз'] = text[match.span[1]+2:match.span[1]+text[match.span[1]:].find(' \n \n')] dict_symp['кол-во сопут заболеваний'] = dict_symp['сопутствующий диагноз'].count('\n') if dict_symp['кол-во сопут заболеваний']==0: dict_symp['кол-во сопут заболеваний']=1 DIAGNOZ_RULE = or_(rule(normalized('диагноз'),normalized('при'),normalized('поступлении')), rule(normalized('клинический'),normalized('диагноз')), rule(normalized('диагноз'),normalized('клинический')), rule(normalized('основной'),normalized('диагноз')), rule(normalized('диагноз'),normalized('основной')), rule(normalized('Ds')), rule(not_(or_(gram('ADJF'),gram('NOUN'))),normalized('диагноз'),not_(or_(gram('ADJF'),gram('PREP'))))) lst = [] parser = Parser(DIAGNOZ_RULE) for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) last = match.span[1]+text[match.span[1]:].find(' \n \n') if last == match.span[1]-1: last = len(text)-1 dict_symp['основной диагноз'] = text[match.span[1]+1:last] # In[38]: TYPE = morph_pipeline(['левая', 'слева']) parser = Parser(TYPE) lst = [] for match in parser.findall(dict_symp['основной диагноз']): lst.append((match.span, [_.value for _ in match.tokens])) TYPE = morph_pipeline(['правая', 'справа']) parser = Parser(TYPE) for match in parser.findall(dict_symp['основной диагноз']): lst.append((match.span, [_.value for _ in match.tokens])) part = dict_symp['основной диагноз'] if len(lst) == 0: parser = Parser(DIAGNOZ_RULE) for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) match = lst[0][0][1] last = match+text[match:].find(' \n \n') if last == match-1: last = len(text)-1 dict_symp['основной диагноз'] = text[match+1:last] part = text[text.find('Диагноз'):] TYPE = morph_pipeline(['левая', 'слева']) parser = Parser(TYPE) left_rozha = [] lst = [] rozha_types = [['волосистая часть головы', 'волостистой части головы'], ['лицо', "ушная раковина"], ['нос','губы'],['верняя часть туловища', 'верхняя конечность'],['нижняя часть туловища'], ['пах', 'половые органы'],['верняя часть спины'],['нижняя часть спины'], ['плечо'],['предплечье'],['кисть'],['бедро'],['голень'],['стопа'],['голеностоп']] for match in parser.findall(part): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: for i in range(len(rozha_types)): rozha_lst = [] TYPE = morph_pipeline(rozha_types[i]) parser = Parser(TYPE) for match in parser.findall(part): rozha_lst.append(' '.join([_.value for _ in match.tokens])) if len(rozha_lst)>0: left_rozha.append(i+1) dict_symp['ЛПТ'] = left_rozha TYPE = morph_pipeline(['правая', 'справа']) parser = Parser(TYPE) right_rozha = [] lst = [] for match in parser.findall(part): lst.append((match.span, [_.value for _ in match.tokens])) if len(lst)>0: for i in range(len(rozha_types)): rozha_lst = [] TYPE = morph_pipeline(rozha_types[i]) parser = Parser(TYPE) for match in parser.findall(part): rozha_lst.append(' '.join([_.value for _ in match.tokens])) if len(rozha_lst)>0: right_rozha.append(i+1) dict_symp['ППТ'] = right_rozha return dict_symp
def extract(text): with open(os.path.join(os.getcwd(), 'list_diseases\\diseases'), encoding='UTF-8') as f: diseases = f.read().split('\n') text = text.replace('\ufeff', '') text = text.replace('\n', ' \n ') text = text.replace('\\', ' ') symptoms = ['Дата рождения', 'Дата осмотра','Дата заболевания', 'Возраст', 'Болен дней','Болен часов','Возраст в днях','Время поступления', 'Время заболевания', 'рост','вес', 'IMT', 'давление диаст', 'давление сист', 'температура поступления','мах температура', 'Т-Ан01', 'Т-Ан03', 'пол', 'др заболевания в анамнезе', 'кем направлен', 'побочное действие лекартсв','аллергическая реакция', 'озноб', 'слабость', 'вялость','головная боль', 'нарушение сна', 'нарушение аппетита', 'ломота','тошнота', 'нарушение сознания', 'Судороги', 'Парестезии', 'эритема', 'с четкими границами', 'валик', 'боль','Гиперемия', 'Отек', 'Лимфаденит', 'Лимфангит', 'квартира, дом','контакт с зараженными','речная рыба','провоцирущие факторы', 'предрасполагающие факторы','кол-во сопут заболеваний','соц категория','сопутствующий диагноз','основной диагноз', 'контакт с зараженными', 'пищевой анамнез', 'раневые ворота', 'аллергия на лекарства', 'клещ', 'географический анамнез', 'вредные привычки', 'домашние животные', 'условия труда','избыточное питание', 'ППТ', 'ЛПТ', 'бытовые условия', 'питание', 'интоксикация', 'ЧСС', 'болезненность лимфоузлов', 'увеличенность лимфоузлов','размер лимфоузлов', 'острое начало'] dict_symp = dict.fromkeys(symptoms) dict_index = dict.fromkeys(symptoms) dates_lst = [] dates_spans = [] # Rule for dates detecting DAY = and_(gte(1),lte(31)) MONTH = and_(gte(1),lte(12)) YEAR = and_(gte(1),lte(19)) YEARFULL = and_(gte(1900),lte(2020)) DATE = or_( rule(YEAR,'.',MONTH,'.',DAY), rule(DAY,'.',MONTH,'.',YEAR), rule(DAY,'.',MONTH,'.',YEARFULL), rule(DAY,'.',MONTH), rule(DAY,'.',MONTH,YEARFULL), rule(DAY,'.',MONTH,YEAR)) parser = Parser(DATE) for match in parser.findall(text): dates_lst.append(''.join([_.value for _ in match.tokens])) dates_spans.append(match.span) # Sometimes we dont have information about birthday and we should check difference between years # in first two dates to determine there is information about birthday or not if int(dates_lst[1][-2:])-int(dates_lst[0][-2:])<0: # According medical cards dates have this order dict_symp['Дата рождения'] = dates_lst[0] dict_symp['Дата осмотра'] = dates_lst[1] dict_symp['Дата заболевания'] = dates_lst[2] dict_index['Дата рождения'] = dates_spans[0] dict_index['Дата осмотра'] = dates_spans[1] dict_index['Дата заболевания'] = dates_spans[2] else: birth = None dict_symp['Дата осмотра'] = dates_lst[0] dict_symp['Дата заболевания'] = dates_lst[1] dict_index['Дата осмотра'] = dates_spans[0] dict_index['Дата заболевания'] = dates_spans[1] # If date was written without year, we take year from previous date if len(dict_symp['Дата заболевания'])==5: dict_symp['Дата заболевания'] += dict_symp['Дата осмотра'][dict_symp['Дата осмотра'].rfind('.'):] # Rule for detecring dates with such situation "болен 5 дней" DAY_RULE = morph_pipeline(['дней']) parser = Parser(DAY_RULE) day_lst = [] for match in parser.findall(text): day_lst.append((match.span, [_.value for _ in match.tokens])) if day_lst and dict_symp['Дата заболевания'] is None: dict_symp['Дата заболевания'] = text[day_lst[0][0][0]-20:day_lst[0][0][0]+20] dict_symp['Дата заболевания'] = re.findall(r'\d+', dict_symp['Дата заболевания'])[0] dict_symp['Дата заболевания'] = str(int(dict_symp['Дата осмотра'][:2])-int(dict_symp['Дата заболевания'])) dict_symp['Дата заболевания'] = dict_symp['Дата заболевания']+dict_symp['Дата осмотра'][2:] dict_index['Дата заболевания'] = day_lst[0][0] # Rule for detecting Age age_lst = [] age_spans = [] AGE = and_(gte(0),lte(100)) AGE_RULE = or_(rule("(",AGE,")"), rule(gram('ADJF'),",",AGE)) parser = Parser(AGE_RULE) for match in parser.findall(text): s = ''.join([_.value for _ in match.tokens]) age_lst.append((re.findall(r'\d+', s)[0])) age_spans.append(match.span) if age_lst: dict_symp['Возраст'] = int(age_lst[-1]) dict_index['Возраст'] = age_spans[-1] # Transform dates to datetime format to make calculations try: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%Y') except: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%y') d1 = d1.strftime('%d.%m.%Y') d1 = datetime.strptime(d1, '%d.%m.%Y') try: d2 = datetime.strptime(dict_symp['Дата заболевания'], '%d.%m.%Y') except: d2 = datetime.strptime(dict_symp['Дата заболевания'], '%d.%m.%y') d2 = d2.strftime('%d.%m.%Y') d2 = datetime.strptime(d2, '%d.%m.%Y') dict_symp['Болен дней'] = (d1 - d2).days dict_symp['Болен часов'] = (int(dict_symp['Болен дней'])-1)*24 if dict_symp['Дата рождения'] is None: dict_symp['Возраст в днях'] = int(dict_symp['Возраст'])*365 else: d1 = datetime.strptime(dict_symp['Дата осмотра'], '%d.%m.%Y') d2 = datetime.strptime(dict_symp['Дата рождения'], '%d.%m.%Y') dict_symp['Возраст в днях'] = (d1 - d2).days #Rule for time detecting time_lst = [] time_spans = [] HOURS = and_(gte(0),lte(24)) MINUTES = and_(gte(0),lte(59)) TIME = or_(rule(HOURS,':',MINUTES), rule(HOURS, normalized('час')),) parser = Parser(TIME) for match in parser.findall(text): s = (''.join([_.value for _ in match.tokens])) time_spans.append(match.span) s = s.replace('часов', ':00') s = s.replace('час', ':00') time_lst.append(s) # if we have only 1 date 'Время поступления' = 'Время заболевания' if time_lst: dict_symp['Время поступления'] = time_lst[0] dict_symp['Время заболевания'] = time_lst[0] dict_index['Время поступления'] = time_spans[0] dict_index['Время заболевания'] = time_spans[0] if len(time_lst)>1: dict_symp['Время заболевания'] = time_lst[1] dict_index['Время заболевания'] = time_spans[1] t1 = dict_symp['Время поступления'] t2 = dict_symp['Время заболевания'] delta = int(t1[:t1.find(':')])+24-int(t2[:t2.find(':')]) dict_symp['Болен часов'] = dict_symp['Болен часов'] + delta # Rules for detecting Weight, Height and IMT HEIGHT = and_(gte(50),lte(250)) WEIGHT = and_(gte(10),lte(150)) HEIGHT_RULE = or_(rule(normalized('рост'),'-',HEIGHT), rule(normalized('рост'),'–',HEIGHT), rule(normalized('рост'),':',HEIGHT), rule(normalized('рост'),HEIGHT)) WEIGHT_RULE = or_(rule(normalized('вес'),'-',WEIGHT), rule(normalized('вес'),'–',WEIGHT), rule(normalized('вес'),':',WEIGHT), rule(normalized('вес'),WEIGHT)) height = None parser = Parser(HEIGHT_RULE) for match in parser.findall(text): height = (''.join([_.value for _ in match.tokens])) height_spans = match.span height = re.findall(r'\d+', height)[0] if height: dict_symp['рост'] = int(height) dict_index['рост'] = height_spans weight = None parser = Parser(WEIGHT_RULE) for match in parser.findall(text): weight = (''.join([_.value for _ in match.tokens])) weight = re.findall(r'\d+', weight)[0] weight_spans = match.span if weight: dict_symp['вес'] = int(weight) dict_index['вес'] = weight_spans if (dict_symp['рост'] is not None) and (dict_symp['вес'] is not None): dict_symp['IMT'] = round(dict_symp['вес']/(dict_symp['рост']/100*dict_symp['рост']/100),2) # Rules for detecting pressure ADSIST = and_(gte(50),lte(250)) ADDIAST = and_(gte(20),lte(200)) PRES_RULE = or_(rule('АД', ADSIST,'/',ADDIAST), rule('АД', ADSIST,ADDIAST), rule('АД', ADSIST, ':',ADDIAST), rule('АД','-', ADSIST, '/',ADDIAST), rule('А/Д', ADSIST, '/',ADDIAST), rule('А/Д', ADSIST, ADDIAST), rule('А/Д',' ', ADSIST, '/',ADDIAST), rule(ADSIST, '/',ADDIAST)) pres = None parser = Parser(PRES_RULE) for match in parser.findall(text): pres = (''.join([_.value for _ in match.tokens])) pres = re.findall(r'\d+', pres) pres_spans = match.span if pres: dict_symp['давление сист'] = int(pres[0]) dict_symp['давление диаст'] = int(pres[1]) dict_index['давление сист'] = pres_spans dict_index['давление диаст'] = pres_spans # Rule for detecting Pulse PULSE = and_(gte(40),lte(150)) PULSE_RULE = or_(rule('ЧСС','-',PULSE), rule('ЧСС',PULSE), rule('ЧСС','-',PULSE), rule('ЧСС','/',PULSE), rule('пульс',PULSE),) pulse = None parser = Parser(PULSE_RULE) for match in parser.findall(text): pulse = (''.join([_.value for _ in match.tokens])) pulse = re.findall(r'\d+', pulse) pulse_spans = match.span if pulse: dict_symp['ЧСС'] = int(pulse[0]) dict_index['ЧСС'] = pulse_spans #Rules for detecting temperatures DEGREES = and_(gte(34),lte(42)) SUBDEGREES = and_(gte(0),lte(9)) TEMP_RULE = or_(rule(DEGREES,',',SUBDEGREES), rule(DEGREES,'.',SUBDEGREES), rule(DEGREES)) # Find 'Объективный статус', because this pert contains information about 'температура поступления' status = text[text.find('Объективный статус'): text.find('Объективный статус')+text[text.find('Объективный статус')+1:].find(' \n \n')] temp_lst = [] temp_spans = [] parser = Parser(TEMP_RULE) for match in parser.findall(status): temp_lst.append(''.join([_.value for _ in match.tokens])) temp_spans.append(match.span) if temp_lst: dict_symp['температура поступления'] = temp_lst[0] dict_index['температура поступления'] = temp_spans[0] # Find temperatures in whole text temp_text = text[text.find('Жалобы'):] temp_lst = [] temp_spans = [] parser = Parser(TEMP_RULE) for match in parser.findall(temp_text): temp_lst.append(''.join([_.value for _ in match.tokens])) temp_spans.append(match.span) if temp_lst: if dict_symp['температура поступления'] is None: dict_symp['температура поступления'] = temp_lst[0] dict_index['температура поступления'] = temp_spans[0] dict_symp['мах температура'] = max([float(i.replace(',','.')) for i in temp_lst]) if dict_symp['мах температура']>=38: dict_symp['Т-Ан01'] = 1 else: dict_symp['Т-Ан01'] = 0 if dict_symp['мах температура']>=40: dict_symp['Т-Ан03'] = 3 elif dict_symp['мах температура']>=39: dict_symp['Т-Ан03'] = 2 elif dict_symp['мах температура']>=38: dict_symp['Т-Ан03'] = 1 else: dict_symp['Т-Ан03'] = 0 # Rule for detecting Sex sex_lst = [] sex_spans = [] SEX_RULE = or_(rule(normalized('женский')), rule(normalized('мужской'))) parser = Parser(SEX_RULE) for match in parser.findall(text): sex_lst.append(''.join([_.value for _ in match.tokens])) sex_spans.append(match.span) if sex_lst: dict_symp['пол'] = sex_lst[0] dict_index['пол'] = sex_spans[0] dict_symp['пол'] = dict_symp['пол'].lower().replace('женский', '2') dict_symp['пол'] = dict_symp['пол'].lower().replace('мужской', '1') dict_symp['пол'] = int(dict_symp['пол']) # Rule for detecting DISEASES DISEASES_RULE = morph_pipeline(diseases[:-1]) # anamnez contains information about diseases of patient, but family anamnez contains # information about diseases of patient, and we should remove this part anamnez = text[text.find('Анамнез'): text.find('Анамнез')+text[text.find('Анамнез')+1:].rfind('Анамнез')] family = anamnez[anamnez.find('Семейный'):anamnez.find('Семейный')+60] if family: anamnez = anamnez.replace(family,' ') anamnez = anamnez[:anamnez.rfind('Диагноз')] dis_lst = [] dis_spans = [] parser = Parser(DISEASES_RULE) for match in parser.findall(anamnez): dis_lst.append(' '.join([_.value for _ in match.tokens])) dis_spans.append(match.span) # Special rule for описторхоз OP_RULE = or_(rule(normalized('описторхоз'), not_(normalized('не')))) parser = Parser(OP_RULE) op_lst = [] for match in parser.findall(anamnez):#text op_lst.append((match.span, [_.value for _ in match.tokens])) if op_lst: dis_lst.append(' описторхоз') dis_spans.append(match.span) # Special rule for туберкулез TUB_RULE = rule(normalized('туберкулез'), not_(normalized('отрицает'))) parser = Parser(TUB_RULE) tub_lst = [] for match in parser.findall(anamnez):#text tub_lst.append((match.span, [_.value for _ in match.tokens])) if tub_lst: dis_lst.append(' туберкулез') dis_spans.append(match.span) # Special rule for ВИЧ VICH_RULE = morph_pipeline(['ВИЧ']) parser = Parser(VICH_RULE) vich_lst = [] for match in parser.findall(anamnez):#text vich_lst.append((match.span, [_.value for _ in match.tokens])) if vich_lst: text_vich = text[match.span[1]-30:match.span[1]+30] TYPE = morph_pipeline(['отрицает']) parser = Parser(TYPE) vich_lst = [] for match in parser.findall(text_vich): vich_lst.append((match.span, [_.value for _ in match.tokens])) if not vich_lst: dis_lst.append(' ВИЧ') dis_spans.append(match.span) if dis_lst: dis_lst = list(set(dis_lst)) dict_symp['др заболевания в анамнезе'] = ', '.join(dis_lst) dict_index['др заболевания в анамнезе'] = dis_spans dict_symp['др заболевания в анамнезе'] = morph.parse(dict_symp['др заболевания в анамнезе'])[0].normal_form # Rules for detecting information about л/у LU_RULE = morph_pipeline(['лимфатические узлы', "лимфоузлы", "лу", "л/у"]) parser = Parser(LU_RULE) lu_lst = [] lu_spans = [] for match in parser.findall(text): lu_lst.append((match.span, [_.value for _ in match.tokens])) if lu_lst: dict_symp['Лимфаденит'] = 0 dict_index['Лимфаденит'] = lu_spans text_lu = text[match.span[1]-70:match.span[1]+70] TYPE = morph_pipeline(["болезненны", "болезненные", "болезнены"]) parser = Parser(TYPE) lu_lst = [] for match in parser.findall(text_lu): lu_lst.append((match.span, [_.value for _ in match.tokens])) if lu_lst: dict_symp['болезненность лимфоузлов'] = 1 dict_index['болезненность лимфоузлов'] = match.span dict_symp['Лимфаденит'] = 1 else: dict_symp['болезненность лимфоузлов'] = 0 TYPE = morph_pipeline(['Увеличены', 'увеличенные']) parser = Parser(TYPE) lu_lst = [] for match in parser.findall(text_lu): lu_lst.append((match.span, [_.value for _ in match.tokens])) if lu_lst: dict_symp['увеличенность лимфоузлов'] = 1 dict_index['увеличенность лимфоузлов'] = match.span dict_symp['Лимфаденит'] = 1 else: dict_symp['увеличенность лимфоузлов'] = 0 number = and_(gte(0),lte(9)) LU_SIZE_RULE = or_(rule(number,'.',number), rule(number,',',number)) lu_lst = [] lu_spans = [] parser = Parser(LU_SIZE_RULE) for match in parser.findall(text_lu): lu_lst.append(''.join([_.value for _ in match.tokens])) lu_spans.append(match.span) if lu_lst: dict_symp['размер лимфоузлов'] = lu_lst[0] dict_index['размер лимфоузлов'] = lu_spans[0] # Rule for 'кем направлен' NAPR_RULE = morph_pipeline(['Поликлиника',"скорая помощь", "ск/помощь", 'СМП', "обратился"]) napr = None napr_lst = [] napr_spans = [] parser = Parser(NAPR_RULE) for match in parser.findall(text): napr_lst.append(' '.join([_.value for _ in match.tokens])) napr_spans.append(match.span) if napr_lst: dict_index['кем направлен'] = napr_spans[0] napr = napr_lst[-1] napr = morph.parse(napr)[0].normal_form if napr == "обратиться": dict_symp['кем направлен'] = 3 elif napr == "скорая помощь" or napr == "ск/помощь" or napr == 'смп'or napr == "ск / помощь" or napr == "скорой помощь" or napr == "скорую помощь": dict_symp['кем направлен'] = 1 elif napr == "поликлиника": dict_symp['кем направлен'] = 2 # Rule for allergy ALLERG_RULE = or_(rule(normalized('Аллергическая'),normalized('реакция'), normalized('на')), rule(normalized('не'),normalized('переносит'))) all_lst = [] parser = Parser(ALLERG_RULE) for match in parser.findall(text): all_lst.append((match.span, [_.value for _ in match.tokens])) if all_lst: index = all_lst[0][0][1] dict_symp['аллергическая реакция'] = text[index:text[index:].find('.')+index] dict_index['аллергическая реакция'] = [all_lst[0][0][0], text[index:].find('.')+index] # Rules for different symptoms symptoms = [['озноб', 'познабливание'], 'слабость', ['вялость', 'разбитость'],'головная боль', 'нарушение сна', 'нарушение аппетита', 'ломота','тошнота', 'нарушение сознания','Судороги', 'Парестезии', ['эритема', 'эритематозная', 'эритематозно'], ['с четкими границами', 'границами четкими', 'четкими неровными краями', 'с четкими краями', 'краями четкими' , 'четкими неровными краями', 'четкими контурами', 'языков пламени'], ['валик', 'вал'], 'боль',['Гиперемия', 'гиперемирована'], 'Отек', 'Лимфангит', ['рана', "раневые ворота", "входные ворота"],['клещ', "присасывание"], 'интоксикация', 'острое начало'] for i in symptoms: sym_lst = [] sym_spans = [] if isinstance(i, str): SYM_RULE = morph_pipeline([i]) parser = Parser(SYM_RULE) for match in parser.findall(text): sym_lst.append(' '.join([_.value for _ in match.tokens])) sym_spans.append(match.span) if sym_lst: dict_symp[i] = 1 dict_index[i] = sym_spans[0] else: dict_symp[i] = 0 else: SYM_RULE = morph_pipeline(i) parser = Parser(SYM_RULE) for match in parser.findall(text): sym_lst.append(' '.join([_.value for _ in match.tokens])) sym_spans.append(match.span) if sym_lst: dict_symp[i[0]] = 1 dict_index[i[0]] = sym_spans[0] else: dict_symp[i[0]] = 0 #This fuction used for features which have the same rule def find_feature(feature, RULE, RULE2, space=[40,40]): parser = Parser(RULE) lst = [] for match in parser.findall(text): lst.append((match.span, [_.value for _ in match.tokens])) if lst: dict_index[feature] = match.span add_text = text[match.span[1]-space[0]:match.span[1]+space[1]] parser = Parser(RULE2) lst = [] for match in parser.findall(add_text): lst.append((match.span, [_.value for _ in match.tokens])) if lst: dict_symp[feature] = 1 dict_index[feature] = match.span else: dict_symp[feature] = 0 GEO_RULE = morph_pipeline(['географический', 'выезжал']) GEO_RULE2 = rule(not_(normalized('не')),normalized('выезжал')) geo_space = [40,40] COND_RULE = morph_pipeline(['бытовые']) COND_RULE2 = rule(not_(normalized('не')),normalized('удовлетворительные')) cond_space = [0,60] SEC_COND_RULE = morph_pipeline(['Социально-бытовые']) sec_cond_space = [0,60] WORK_COND_RULE = morph_pipeline(['условия труда']) work_cond_space = [20,20] CONTACT_RULE = morph_pipeline(['контакт']) CONTACT_RULE2 = morph_pipeline(['да']) contact_space = [0,40] WATER_RULE = morph_pipeline(['сырой воды']) WATER_RULE2 = morph_pipeline(['не было', 'отрицает', 'нет']) water_space = [80,80] features = ['географический анамнез', 'бытовые условия', 'бытовые условия', 'условия труда','контакт с зараженными','пищевой анамнез'] rules = [GEO_RULE, COND_RULE, SEC_COND_RULE, WORK_COND_RULE, CONTACT_RULE, WATER_RULE] sec_rules = [GEO_RULE2, COND_RULE2, COND_RULE2, COND_RULE2, CONTACT_RULE2, WATER_RULE2] spaces = [geo_space, cond_space, sec_cond_space, work_cond_space, contact_space, water_space] for i in range(len(features)): find_feature(features[i],rules[i],sec_rules[i],spaces[i]) # Rules for bad habbits HAB_RULE = morph_pipeline(['вредные привычки', 'алкоголь']) parser = Parser(HAB_RULE) hab_lst = [] for match in parser.findall(text): hab_lst.append((match.span, [_.value for _ in match.tokens])) if hab_lst: dict_index['вредные привычки'] = match.span text_hab = text[match.span[1]-80:match.span[1]+80] HAB_RULE = morph_pipeline(['не было', 'отрицает', 'нет', 'не употребляет']) parser = Parser(HAB_RULE) hab_lst = [] for match in parser.findall(text_hab): hab_lst.append((match.span, [_.value for _ in match.tokens])) if hab_lst: dict_symp['вредные привычки'] = 0 dict_index['вредные привычки'] = match.span else: dict_symp['вредные привычки'] = 1 SMOKE_RULE = or_(rule(not_(normalized('не')),normalized('курит')), rule(not_(normalized('не')),normalized('употребляет'))) parser = Parser(SMOKE_RULE) hab_lst = [] for match in parser.findall(text): hab_lst.append((match.span, [_.value for _ in match.tokens])) if hab_lst: dict_symp['вредные привычки'] = 1 dict_index['вредные привычки'] = match.span # Rules for work work_lst = [] WORK_RULE = morph_pipeline(['работает']) parser = Parser(WORK_RULE) for match in parser.findall(text): work_lst.append((match.span, [_.value for _ in match.tokens])) if work_lst: dict_symp['соц категория'] = 0 dict_index['соц категория'] = match.span WORK_RULE = rule(not_(normalized('не')),normalized('работает')) parser = Parser(WORK_RULE) work_lst = [] for match in parser.findall(text): work_lst.append((match.span, [_.value for _ in match.tokens])) if work_lst: dict_symp['соц категория'] = 1 dict_index['соц категория'] = match.span # If patient has условия труда probably he has a job if dict_symp['условия труда'] is not None: dict_symp['соц категория'] = 1 # Rule for food FOOD_RULE = morph_pipeline(['питание']) parser = Parser(FOOD_RULE) food_lst = [] for match in parser.findall(text): food_lst.append((match.span, [_.value for _ in match.tokens])) if food_lst: dict_index['избыточное питание'] = match.span text_food = text[match.span[1]-20:match.span[1]+20] FOOD_RULE = or_(rule(not_(normalized('не')),normalized('удовлетворительное')), rule(not_(normalized('не')),normalized('полноценное'))) parser = Parser(FOOD_RULE) food_lst = [] for match in parser.findall(text_food): food_lst.append((match.span, [_.value for _ in match.tokens])) if food_lst: dict_symp['питание'] = 1 dict_index['питание'] = match.span else: dict_symp['питание'] = 0 FOOD_RULE = rule(not_(normalized('не')),normalized('избыточное')) parser = Parser(FOOD_RULE) food_lst = [] for match in parser.findall(text_food): food_lst.append((match.span, [_.value for _ in match.tokens])) if food_lst: dict_index['избыточное питание'] = match.span dict_symp['избыточное питание'] = 1 else: dict_symp['избыточное питание'] = 0 # Rule for fish FISH_RULE = morph_pipeline(['рыба']) parser = Parser(FISH_RULE) fish_lst = [] for match in parser.findall(text): fish_lst.append((match.span, [_.value for _ in match.tokens])) if fish_lst: dict_index['речная рыба'] = match.span text_fish = text[match.span[1]-40:match.span[1]+40] FISH_RULE = morph_pipeline(['да', 'постоянно']) parser = Parser(FISH_RULE) fish_lst = [] for match in parser.findall(text_fish): fish_lst.append((match.span, [_.value for _ in match.tokens])) if fish_lst: dict_symp['речная рыба'] = 1 FISH_RULE = rule(not_(normalized('не')),normalized('употребляет')) parser = Parser(FISH_RULE) fish_lst = [] for match in parser.findall(text_fish): fish_lst.append((match.span, [_.value for _ in match.tokens])) if fish_lst: dict_symp['речная рыба'] = 0 dict_index['речная рыба'] = match.span # Rule for home home = None home_span = None home_types = [['бездомный'], ['дом благоустроенный', 'частный дом'], ['дом не благоустроенный','дом неблагоустроенный'], ['квартира не благоустроенная', 'квартира неблагоустроенная'], ['квартира благоустроенная', 'благоустроенная квартира'],] for i in range(len(home_types)): home_lst = [] HOME_RULE = morph_pipeline(home_types[i]) parser = Parser(HOME_RULE) for match in parser.findall(text): home_lst.append((match.span, [_.value for _ in match.tokens])) if home_lst: home = i home_span = match.span dict_symp['квартира, дом'] = home dict_index['квартира, дом'] = home_span pets = [] pets_span = [] pet_types = [['кошка'], ['собака'], ['корова','коза']] # Rule for pets for i in range(len(pet_types)): pet_lst = [] PET_RULE = morph_pipeline(pet_types[i]) parser = Parser(PET_RULE) for match in parser.findall(text): pet_lst.append(' '.join([_.value for _ in match.tokens])) pets_span.append(match.span) if pet_lst: pets.append(i+1) if len(pets)>1: pets = 4 elif pets: pets = pets[0] else: pets = 0 dict_symp['домашние животные'] = pets dict_index['домашние животные'] = pets_span # Rules for different factors factors = [] factors_span = [] factor_types = [['ссадины',"царапины", "раны", "расчесы", "уколы", "потертости", "трещины", 'вскрытие'], ['ушибы'], ['переохлаждение','перегревание','смена температуры'], ['инсоляция'], ['стресс'], ['переутомление']] def find_factors(factor_types): for i in range(len(factor_types)): factor_lst = [] FACT_RULE = morph_pipeline(factor_types[i]) parser = Parser(FACT_RULE) for match in parser.findall(text): factor_lst.append(' '.join([_.value for _ in match.tokens])) factors_span.append(match.span) if factor_lst: factors.append(i+1) find_factors(factor_types) if factors: dict_symp['провоцирущие факторы'] = factors dict_index['провоцирущие факторы'] = factors_span factors = [] factors_span = [] factor_types = [['микоз',"диабет", "ожирение", "варикоз", "недостаточность", "лимфостаз", "экзема"], ['тонзилит',"отит", "синусит", "кариес", "пародонтоз", "остеомиелит", "тромбофлебит", "трофические язвы"], ['резиновая обувь','загрязнения кожных'], ['соматические заболевания']] find_factors(factor_types) if factors: dict_symp['предрасполагающие факторы'] = factors dict_index['предрасполагающие факторы'] = factors_span # Rule for detecting the second diagnosis DIAGNOZ_RULE = or_(rule(normalized('сопутствующий'), not_(or_(gram('NOUN')))), rule(normalized('сопутствующий'),normalized('диагноз')), rule(normalized('диагноз'),normalized('сопутствующий')),) parser = Parser(DIAGNOZ_RULE) diag_lst = [] for match in parser.findall(text): diag_lst.append((match.span, [_.value for _ in match.tokens])) if diag_lst: dict_symp['сопутствующий диагноз'] = text[match.span[1]+2:match.span[1]+text[match.span[1]:].find(' \n \n')] dict_index['сопутствующий диагноз'] = [match.span[1]+2,match.span[1]+text[match.span[1]:].find(' \n \n')] dict_symp['кол-во сопут заболеваний'] = dict_symp['сопутствующий диагноз'].count('\n') if dict_symp['кол-во сопут заболеваний']==0: dict_symp['кол-во сопут заболеваний']=1 # Rule for detecting the first diagnosis DIAGNOZ_RULE = or_(rule(normalized('диагноз'),normalized('при'),normalized('поступлении')), rule(normalized('клинический'),normalized('диагноз')), rule(normalized('диагноз'),normalized('клинический')), rule(normalized('основной'),normalized('диагноз')), rule(normalized('диагноз'),normalized('основной')), rule(normalized('Ds')), rule(normalized('Ds:')), rule(not_(or_(gram('ADJF'),gram('NOUN'))),normalized('диагноз'),not_(or_(gram('ADJF'),gram('PREP'))))) diag_lst = [] parser = Parser(DIAGNOZ_RULE) for match in parser.findall(text): diag_lst.append((match.span, [_.value for _ in match.tokens])) last = match.span[1]+text[match.span[1]:].find(' \n \n') if last == match.span[1]-1: last = len(text)-1 dict_symp['основной диагноз'] = text[match.span[1]+1:last] dict_index['основной диагноз'] = [match.span[1]+1,last] # Rules for detecting ЛПТ and ППТ LEFT_RULE = morph_pipeline(['левая', 'слева']) parser = Parser(LEFT_RULE) side_lst = [] for match in parser.findall(dict_symp['основной диагноз']): side_lst.append((match.span, [_.value for _ in match.tokens])) RIGHT_RULE = morph_pipeline(['правая', 'справа']) parser = Parser(RIGHT_RULE) for match in parser.findall(dict_symp['основной диагноз']): side_lst.append((match.span, [_.value for _ in match.tokens])) # If we dont have information about side in 'основной диагноз', check other diagnosis DIAGNOZ_RULE = or_(rule(normalized('Обоснование'),normalized('Диагноза'))) part = dict_symp['основной диагноз'] if len(side_lst) == 0: part = text[text.find('Диагноз'):] side_lst = [] parser = Parser(DIAGNOZ_RULE) for match in parser.findall(part): side_lst.append((match.span, [_.value for _ in match.tokens])) last = match.span[1]+part[match.span[1]:].find(' \n \n') if last == match.span[1]-1: last = len(part)-1 explaining = part[match.span[1]+1:last] if len(explaining)>1: part = part.replace(explaining,' ') # If we dont have information about side in diagnosis, check other 'Жалобы' DIAGNOZ_RULE = or_(rule(normalized('Жалобы'))) comp_lst = [] parser = Parser(DIAGNOZ_RULE) for match in parser.findall(text): comp_lst.append((match.span, [_.value for _ in match.tokens])) last = comp_lst[0][0][1]+text[comp_lst[0][0][1]:].find(' \n \n') if last == comp_lst[0][0][1]-1: last = len(text)-1 zhalobi = text[comp_lst[0][0][1]+1:last] rozha_types = [['волосистая часть головы', 'волостистой части головы'], ['лицо','щека','лоб','глаз'], ['нос','губы'],['верняя часть туловища', 'верхняя конечность'],['нижняя часть туловища'], ['пах', 'половые органы'],['верняя часть спины'],['нижняя часть спины'], ['плечо'],['предплечье'],['кисть'],['бедро'],['голень'],['стопа'],['голеностоп'], ["ушная раковина"]] def find_side(parser, sidetext): rozha = [] lst = [] for match in parser.findall(sidetext): lst.append((match.span, [_.value for _ in match.tokens])) if lst: for i in range(len(rozha_types)): rozha_lst = [] TYPE = morph_pipeline(rozha_types[i]) parser = Parser(TYPE) for match in parser.findall(sidetext):#part): rozha_lst.append(' '.join([_.value for _ in match.tokens])) if rozha_lst: if i ==15: rozha.append('2.1') else: rozha.append(i+1) return(rozha) parser = Parser(LEFT_RULE) dict_symp['ЛПТ'] = find_side(parser, part) parser = Parser(RIGHT_RULE) dict_symp['ППТ'] = find_side(parser, part) if not dict_symp['ППТ'] and not dict_symp['ЛПТ']: parser = Parser(LEFT_RULE) dict_symp['ЛПТ'] = find_side(parser, zhalobi) parser = Parser(RIGHT_RULE) dict_symp['ППТ'] = find_side(parser, zhalobi) # Special rule for detecting face face_lst = [] FACE_RULE = morph_pipeline(['нос','губы']) parser = Parser(FACE_RULE) for match in parser.findall(part): face_lst.append((match.span, [_.value for _ in match.tokens])) if face_lst: dict_symp['ППТ'].append(3) dict_symp['ЛПТ'].append(3) dict_symp['ЛПТ'] = list(set(dict_symp['ЛПТ'])) dict_symp['ППТ'] = list(set(dict_symp['ППТ'])) if not dict_symp['ППТ']: dict_symp['ППТ'] = None if not dict_symp['ЛПТ']: dict_symp['ЛПТ'] = None return dict_symp, dict_index
from yargy import rule, and_, or_, not_ from yargy.predicates import eq, type as _type, normalized, custom from yargy.pipelines import morph_pipeline from yargy.interpretation import fact CityFact = fact('city', ['prefix', 'title']) CityTitle = morph_pipeline({ 'липецк', 'сургут', 'нальчик', 'москва', 'санкт-петербург', 'питер', 'нижний новгород', 'видное' }).interpretation(CityFact.title.normalized()) CityRule = rule( normalized('город').optional().interpretation(CityFact.prefix), CityTitle, eq(';').optional()).interpretation(CityFact)
'девятьсот': 900, 'тысяча': 10**3, 'миллион': 10**6, 'миллиард': 10**9, 'триллион': 10**12, } DOT = eq('.') INT = type('INT') THOUSANDTH = rule(caseless_pipeline(['тысячных', 'тысячная' ])).interpretation(const(10**-3)) HUNDREDTH = rule(caseless_pipeline(['сотых', 'сотая'])).interpretation(const(10**-2)) TENTH = rule(caseless_pipeline(['десятых', 'десятая'])).interpretation(const(10**-1)) THOUSAND = or_(rule(caseless('т'), DOT), rule(caseless('тыс'), DOT.optional()), rule(normalized('тысяча')), rule(normalized('тыща'))).interpretation(const(10**3)) MILLION = or_(rule(caseless('млн'), DOT.optional()), rule(normalized('миллион'))).interpretation(const(10**6)) MILLIARD = or_(rule(caseless('млрд'), DOT.optional()), rule(normalized('миллиард'))).interpretation(const(10**9)) TRILLION = or_(rule(caseless('трлн'), DOT.optional()), rule(normalized('триллион'))).interpretation(const(10**12)) MULTIPLIER = or_(THOUSANDTH, HUNDREDTH, TENTH, THOUSAND, MILLION, MILLIARD, TRILLION).interpretation(Number.multiplier) NUM_RAW = rule( morph_pipeline(NUMS_RAW).interpretation(Number.int.normalized().custom( NUMS_RAW.get))) NUM_INT = rule(INT).interpretation(Number.int.custom(int)) NUM = or_(NUM_RAW, NUM_INT).interpretation(Number.int) NUMBER = or_(rule(NUM, MULTIPLIER.optional())).interpretation(Number)
'июль': 7, 'август': 8, 'сентябрь': 9, 'октябрь': 10, 'ноябрь': 11, 'декабрь': 12, } MONTH_NAME = dictionary(MONTHS).interpretation( FullDateFact.month.normalized().custom(MONTHS.__getitem__)) MONTH = and_(gte(1), lte(12)).interpretation(FullDateFact.month.custom(int)) DAY = and_(gte(1), lte(31)).interpretation(FullDateFact.day.custom(int)) YEAR_WORD = or_(rule('г', eq('.').optional()), rule(normalized('год'))) YEAR = and_(gte(1000), lte(2100)).interpretation(FullDateFact.year.custom(int)) YEAR_SHORT = and_(length_eq(2), gte(0), lte(99)).interpretation( FullDateFact.year.custom(lambda _: 1900 + int(_))) ERA_YEAR = and_(gte(1), lte(100000)).interpretation(FullDateFact.year.custom(int)) ERA_WORD = rule( eq('до'), or_(rule('н', eq('.'), 'э', eq('.').optional()), rule(normalized('наша'), normalized('эра')))).interpretation( FullDateFact.current_era.const(False))
from yargy import rule, or_, Parser from yargy.interpretation import fact from yargy.predicates import normalized from yargy.predicates import type as y_type RelativeDate = fact('RelativeDate', ['relDay', 'relWeek', 'relMonth', 'relYear']) INT = y_type('INT') day_rule = rule(INT.interpretation(RelativeDate.relDay), rule(normalized('день'))).interpretation(RelativeDate) week_rule = rule(INT.interpretation(RelativeDate.relWeek), rule(normalized('неделя'))).interpretation(RelativeDate) month_rule = rule(INT.interpretation(RelativeDate.relMonth), rule(normalized('месяц'))).interpretation(RelativeDate) year_rule = rule(INT.interpretation(RelativeDate.relYear), rule(or_(normalized('год'), normalized('лет')))).interpretation(RelativeDate) date_rule = rule(or_(day_rule, week_rule, month_rule, year_rule)).interpretation(RelativeDate) rel_date_parser = Parser(date_rule)
), rule( caseless('северо'), DASH.optional(), dictionary({ 'западный', 'кавказский' }) ) ).interpretation( Region.name ) FED_OKRUG_WORDS = or_( rule( normalized('федеральный'), normalized('округ') ), rule(caseless('фо')) ).interpretation( Region.type.const('федеральный округ') ) FED_OKRUG = rule( FED_OKRUG_WORDS, FED_OKRUG_NAME ).interpretation( Region )
from typing import Union, List, NamedTuple, Optional import yargy from yargy import rule, Parser, or_ from yargy.predicates import normalized from tg_dobby.grammar.natural_dates import RULE_MOMENT from tg_dobby.grammar.yargy_utils import FactDefinition class ReminderPreamble(FactDefinition): target: str RULE_REMINDER_PREAMBLE = rule( normalized("напомни"), normalized("мне").optional().interpretation(ReminderPreamble.target), ).interpretation(ReminderPreamble) class TokenFact(FactDefinition): nested_fact: FactDefinition class PhraseToken: def __init__(self, text, match=None): self._match = match # type: yargy.parser.Match self._text = text @property def text(self) -> str:
'subsection', 'n6', 'section', 'n7', 'chapter', 'n8', 'type', 'codex'] ) COURT_ = fact( 'Court', ['smth', 'type', 'court', 'rf'] ) NUM = and_(gte(1), lte(10000)) NUMBERS = rule(NUM, rule(eq('.').optional(), NUM).repeatable().optional()) CODEX = rule( or_(rule(normalized('пункт')), rule('п', eq('.').optional()) ).repeatable().optional().interpretation(COD.point), NUMBERS.repeatable().optional().interpretation(COD.n1), or_(rule(normalized('подпункт')), rule('пп', eq('.').optional()) ).repeatable().optional().interpretation(COD.subpoint), NUMBERS.repeatable().optional().interpretation(COD.n2), or_(rule(normalized('часть')), rule('ч', eq('.').optional()) ).repeatable().optional().interpretation(COD.part),
return dsl.Range(min, max) DOT = eq('.') INT = type('INT') ######## # # CURRENCY # ########## EURO = or_( normalized('евро'), eq('€') ).interpretation( const(dsl.EURO) ) DOLLARS = or_( normalized('доллар'), eq('$') ).interpretation( const(dsl.DOLLARS) ) RUBLES = or_( rule(normalized('рубль')), rule(
gte(1), lte(12) ).interpretation( Date.month.custom(int) ) DAY = and_( gte(1), lte(31) ).interpretation( Date.day.custom(int) ) YEAR_WORD = or_( rule('г', eq('.').optional()), rule(normalized('год')) ) YEAR = and_( gte(1000), lte(2100) ).interpretation( Date.year.custom(int) ) YEAR_SHORT = and_( length_eq(2), gte(0), lte(99) ).interpretation( Date.year.custom(lambda _: 1900 + int(_))
from yargy import Parser, rule, and_, or_ from yargy.predicates import gram, is_capitalized, dictionary, in_, normalized, eq, caseless, type from yargy.interpretation import fact DOT = eq('.') POSITION = rule( or_(eq('врид'), eq('врио')).optional(), or_(rule('пом', DOT.optional()), rule('зам', DOT.optional()), rule('ст', DOT.optional()), rule(dictionary({'заместитель'}))).optional(), normalized('полковой').optional(), or_( rule('сотр', DOT.optional()), rule( dictionary({ 'оперуполномоченный', 'сотрудник', 'командир', 'уполномоченный', 'шофер', 'следователь', 'наркома', 'работник', 'инспектор', 'комендант', 'разведчик', 'начальник', 'секретарь', 'особоуполномоченный', 'председатель', 'фельдъегерь', 'сотрудница', 'лейтенант', 'референт', 'слушатель', 'руководитель', 'переводчик', 'управляющий' })), rule(caseless('нач'), DOT), rule(normalized('министра'), 'внутренних', 'дел')), or_(rule(eq('контрразведки')), rule(eq('особой'), eq('роты'))).optional()) pos_parser = Parser(POSITION) def parse(d): for match in pos_parser.findall(d):
amount: Union[int, Attribute] class Moment(FactDefinition): effective_date: Union[RelativeDay, DayOfWeek, RelativeInterval, Attribute] # RULES RULE_DAY_TIME = rule( rule("в").optional(), or_( rule( HOUR_OF_A_DAY.interpretation(DayTime.hour.normalized().custom( lambda val: int(WORDS_HOUR_OF_A_DAY.get(val, val)))), normalized("час").optional(), AM_PM.optional().interpretation( DayTime.am_pm.normalized().custom(normalize_am_pm))), rule( and_( gte(0), lte(23), ).interpretation(DayTime.hour.custom(int)), eq(":").interpretation( DayTime.strict_format.custom(lambda _: True)), and_(gte(0), lte(59)).interpretation( DayTime.minute.normalized().custom(int)), rule( ":", and_(gte(0), lte(59)).interpretation(DayTime.second.custom( int))).optional()))).interpretation(DayTime)
'восемьсот': 800, 'девятьсот': 900, 'тысяча': 10**3, 'миллион': 10**6, 'миллиард': 10**9, 'триллион': 10**12, } DOT = eq('.') INT = type('INT') THOUSANDTH = rule(caseless_pipeline(['тысячных', 'тысячная'])).interpretation(const(10**-3)) HUNDREDTH = rule(caseless_pipeline(['сотых', 'сотая'])).interpretation(const(10**-2)) TENTH = rule(caseless_pipeline(['десятых', 'десятая'])).interpretation(const(10**-1)) THOUSAND = or_( rule(caseless('т'), DOT), rule(caseless('тыс'), DOT.optional()), rule(normalized('тысяча')), rule(normalized('тыща')) ).interpretation(const(10**3)) MILLION = or_( rule(caseless('млн'), DOT.optional()), rule(normalized('миллион')) ).interpretation(const(10**6)) MILLIARD = or_( rule(caseless('млрд'), DOT.optional()), rule(normalized('миллиард')) ).interpretation(const(10**9)) TRILLION = or_( rule(caseless('трлн'), DOT.optional()), rule(normalized('триллион')) ).interpretation(const(10**12)) MULTIPLIER = or_(
from yargy import rule, and_, or_, not_ from yargy.predicates import eq, type as _type, normalized, custom from yargy.pipelines import morph_pipeline from yargy.interpretation import fact from .Building import BuildingRule AppartmentFact = fact( 'appartment', ['appartment'] ) AppartmentRule = or_( rule( BuildingRule, _type('INT').interpretation(AppartmentFact.appartment) ), rule( normalized('квартира'), _type('INT').interpretation(AppartmentFact.appartment) ) ).interpretation( AppartmentFact )
'сентябрь': 9, 'октябрь': 10, 'ноябрь': 11, 'декабрь': 12, } MONTH_NAME = dictionary(MONTHS).interpretation(Date.month.normalized()) MONTH = and_(gte(1), lte(12)).interpretation(Date.month) DAY = and_(gte(1), lte(31)).interpretation(Date.day) YEAR_WORD = or_( rule('г', eq('.').optional()), rule(normalized('г.'), eq('.').optional()), rule(normalized('год')), rule(normalized('гг')), rule(')'), ) YEAR_PREFIX = or_( rule('в '), rule('c '), rule(', '), rule('('), ) YEAR_POSTFIX = or_( rule('е'), rule('го'),
def __init__(self): super(NerTimeCount, self).__init__() self.last_request = None self.last_result = [None, None, None] self.name = 'TimeCount' # переводим числа от 1 до 59 в текст n2t60 = [n2t(i) for i in range(1, 60)] # для поиска порядковых числительных def not_coll_numbers(x): return ('NUMR' in str(morph.parse(x)[0].tag) and ('Coll' not in str(morph.parse(x)[0].tag))) or x == 'один' # часы в словах hours_t = and_(dictionary(n2t60[:24] + ["полтора", "полдень"]), custom(not_coll_numbers)) # минуты в словах minutes_t = dictionary(n2t60) coll_numbers_dic = dictionary( ["двое", "трое", "четверо", "пятеро", "шестеро", "семеро"]) list_0n = {"00", "01", "02", "03", "04", "05", "06", "08", "09"} # часы в цифрах hours_n = or_(and_(gte(1), lte(23)), in_(list_0n)) # минуты в цифрах minutes_n = or_(and_(gte(1), lte(59)), in_(list_0n)) # разделитель в чч_мм two_points = dictionary([":"]) separator = dictionary([":", "."]) # определяем предлоги pr_v = rule("в") pr_ok = rule("около") pr_vrayone = morph_pipeline(["В районе"]) pr_k = rule("к") pr_na = rule("на") pr_c = rule("с") start_prepositions = or_(pr_ok, pr_v, pr_k, pr_na, pr_c, pr_vrayone) pr_vtech = morph_pipeline(["в течение"]) pr_do = rule("до") pr_po = rule("по") duration_prepositions = or_(pr_vtech, pr_do, pr_po) # отрезки времени суток day_periods = or_(rule(normalized("утро")), rule(normalized("день")), rule(normalized("вечер"))) # час - особый случай, т.к. сам обозначает определённое время или длительность(аналогично "человк") hour = rule(normalized("час")) people = rule(normalized("человек")) # слова перед временем начала start_syn = dictionary([ "начало", "старт", "встреча", "переговорную", "переговорку", "пропуск" ]) start_verbs = dictionary([ "начать", "прийти", "заказать", "забронировать", "выделить", "состоится" ]) # слова перед продолжительнотью duration_verbs = dictionary(["займёт", "продлится"]) # слова перед временем конца end_verbs = dictionary(["закончить", "уйдём", "завершим"]) end_syn = dictionary(["конец", "окончание", "завершение"]) # для поиска времени начала, которое выделяется с помощью : или - start_with_separator = or_(rule("начало"), rule("старт"), rule("время"), morph_pipeline(["начало встречи"]), morph_pipeline(["старт встречи"]), morph_pipeline(["время встречи"])) duration_with_separator = or_( rule("продолжительность"), morph_pipeline(["продолжительность встречи"])) end_with_separator = or_(rule("конец"), rule("окончание"), rule("завершение"), morph_pipeline(["конец встречи"]), morph_pipeline(["окончание встречи"]), morph_pipeline(["завершение встречи"])) # относительные указатели на день(относительно сегодняшнего) day_pointer = or_(rule("понедельник"), morph_pipeline(["пн."]), rule("пн"), rule("вторник"), morph_pipeline(["вт."]), rule("вт"), rule("среда"), rule("среду"), morph_pipeline(["ср."]), rule("ср"), rule("четверг"), morph_pipeline(["чт."]), rule("чт"), rule("пятница"), rule("пятницу"), morph_pipeline(["пт."]), rule("пт"), rule("суббота"), rule("субботу"), morph_pipeline(["сб."]), rule("сб"), rule("воскресение"), rule("воскресенье"), morph_pipeline(["вс."]), rule("вс"), rule("завтра"), rule("послезавтра"), rule("сегодня")) # чужие слова self._foreignWords = [ "этаж", "январь", "февраль", "март", "апрель", "май", "июнь", "июль", "август", "сентябрь", "октябрь", "ноябрь", "декабрь" ] # количественные числительные в числа self._Counts = { "человек": 1, "1": 1, "один": 1, "два": 2, "2": 2, "двое": 2, "вдвоём": 2, "трое": 3, "три": 3, "3": 3, "втроём": 3, "четверо": 4, "четыре": 4, "4": 4, "вчетвером": 4, "5": 5, "пять": 5, "пятеро": 5, "впятером": 5, "6": 6, "шесть": 6, "шестеро": 6, "7": 7, "семь": 7, "семеро": 7, "8": 7, "восемь": 8, "9": 7, "девять": 9, "10": 7, "десять": 10 } # приведение времени к номальной форме self._ToNormalHours = { "08.00": "08:00", "8": "08:00", "восемь": "08:00", "09.00": "09:00", "9": "09:00", "девять": "09:00", "10.00": "10:00", "10": "10:00", "десять": "10:00", "11.00": "11:00", "11": "11:00", "одиннадцать": "11:00", "12.00": "12:00", "12": "12:00", "двенадцать": "12:00", "полдень": "12:00", "13.00": "13:00", "1": "13:00", "13": "13:00", "один": "13:00", "час": "13:00", "часу": "13:00", "14.00": "14:00", "2": "14:00", "14": "14:00", "два": "14:00", "15.00": "15:00", "3": "15:00", "15": "15:00", "три": "15:00", "16.00": "16:00", "4": "16:00", "16": "16:00", "четыре": "16:00", "17.00": "17:00", "5": "17:00", "17": "17:00", "пять": "17:00", "18.00": "18:00", "6": "18:00", "18": "18:00", "шесть": "18:00", "19.00": "19:00", "7": "19:00", "19": "19:00", "семь": "19:00" } # приведение промежутка времени к нормальной форме self._ToNormalDelta = { "1": "01:00", "один": "01:00", "час": "01:00", "1:5": "01:30", "полтора": "01:30", "2": "02:00", "два": "02:00", "3": "03:00", "три": "03:00", "4": "04:00", "четыре": "04:00", "5": "05:00", "пять": "05:00", "6": "06:00", "шесть": "06:00", "7": "7:00", "семь": "07:00" } # правила для времени в формате from time to time self._rulesFromTO = [ # from time to time rule(start_prepositions, or_(hour, rule(or_(hours_t, hours_n))), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional(), duration_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional()), # чч:мм - чч:мм rule(hours_n, separator, minutes_n, "-", hours_n, separator, minutes_n), # day time to time rule(day_pointer, rule(or_(hours_t, hours_n)), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional(), duration_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional()) ] # правила для времени в формате from time on time self._rulesFromOn = [ # from time on n hour rule(start_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional(), pr_na, or_(hours_t, hours_n), hour.optional()), # from time on hour rule(start_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional(), pr_na, hour) ] # правила для времени в формате on time from time self._rulesOnFrom = [ # on n hour from time rule(pr_na, or_(hours_t, hours_n), hour, start_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional()), # on hour from time rule(pr_na, hour, start_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional(), or_(day_periods, hour).optional()) ] # правила для времени в формате from time self._rulesFrom = [ # day or start or start verb in time rule(or_(day_pointer, rule(start_syn), rule(start_verbs)), start_prepositions, or_(hours_t, hours_n), separator.optional(), minutes_n.optional()), # start with separator rule(start_with_separator, two_points, or_(rule(hours_t), rule(hours_n)), separator.optional(), minutes_n.optional()), # since time day or hour rule(pr_c, or_(rule(hours_t), rule(hours_n)), separator.optional(), minutes_n.optional(), or_(day_periods, hour)), # since hour rule(pr_c, hour), # on n часов day rule(pr_na, or_(hours_t, hours_n), hour.optional(), day_periods), # on час day rule(pr_na, hour, day_periods) ] # правила для времени окончания и продолжительности self._rulesTo = [ # end or end verb in time rule(or_(end_syn, end_verbs), start_prepositions, or_(rule(hours_t), rule(hours_n), hour), separator.optional(), minutes_n.optional()), # duration verb time-time rule(duration_verbs, hours_n.optional(), dictionary(["."]).optional(), minutes_n.optional(), "-", hours_n.optional(), dictionary(["."]).optional(), hour), # duration verb time rule(duration_verbs, or_(hours_t, hours_n), dictionary(["."]).optional(), minutes_n.optional(), hour), # end with separation rule(end_with_separator, two_points, or_(rule(hours_t), rule(hours_n)), separator.optional(), minutes_n.optional()), # duration with separation rule(duration_with_separator, two_points, or_(rule(hours_t), rule(hours_n)), separator.optional(), minutes_n.optional()) ] # общие правила для начального, конечного времени и продолжительности self._rulesCommon = [ # in time + hour or day period rule(or_(pr_v, pr_vrayone, pr_k), or_(hours_t, hours_n), or_(hour, day_periods)), # on time + day period rule(pr_na, or_(hours_t, hours_n), or_(day_periods)), # in hh:mm rule(pr_v, hours_n, separator, minutes_n), # hh:mm rule(hours_n, two_points, minutes_n, or_(day_periods, hour).optional()), # on n hour rule(pr_na, or_(hours_t, hours_n), hour), # on hour rule(pr_na, hour) ] # правила для количества людей self._rulesCount = [ # coll number rule(coll_numbers_dic), # n people rule(or_(hours_t, hours_n).optional()) ] # правила используемые в повторных запросах self._rulesTime = [ # всевозможные форматы времени rule(or_(rule(hours_t), rule(hours_n), hour), separator.optional(), minutes_n.optional()) ] self._rulesPeriod = [ # всевозможные интервалы времени rule(or_(rule(hours_t), rule(hours_n), hour), dictionary(["."]).optional(), minutes_n.optional()) ] self._rulesCountPeople = [ # количественные числительные rule(coll_numbers_dic), # n человек rule(or_(hours_t, hours_n).optional(), people) ]
) BASIC = rule( ADJF_PREFIX, TYPE, ) NAMED = rule( or_( QUOTED, QUOTED_WITH_ADJF_PREFIX, BASIC, ), GENT_GROUP, or_( rule(normalized('имя')), rule(caseless('им'), eq('.').optional()), ), or_( NAME, PERSON, ), ) LATIN = rule( TYPE, or_( rule( and_( type('LATIN'), is_capitalized(),