def tryParse(t: 'Token', loc_onto: 'IntOntologyCollection') -> 'NamedItemToken': if (t is None): return None if (isinstance(t, ReferentToken)): r = t.getReferent() if ((r.type_name == "PERSON" or r.type_name == "PERSONPROPERTY" or (isinstance(r, GeoReferent))) or r.type_name == "ORGANIZATION"): return NamedItemToken._new1635(t, t, r, t.morph) return None typ = NamedItemToken.__m_types.tryParse(t, TerminParseAttr.NO) nam = NamedItemToken.__m_names.tryParse(t, TerminParseAttr.NO) if (typ is not None): if (not ((isinstance(t, TextToken)))): return None res = NamedItemToken._new1636(typ.begin_token, typ.end_token, typ.morph, typ.chars) res.kind = (Utils.valToEnum(typ.termin.tag, NamedEntityKind)) res.type_value = typ.termin.canonic_text if ((nam is not None and nam.end_token == typ.end_token and not t.chars.is_all_lower) and (Utils.valToEnum( nam.termin.tag, NamedEntityKind)) == res.kind): res.name_value = nam.termin.canonic_text res.is_wellknown = True return res if (nam is not None): if (nam.begin_token.chars.is_all_lower): return None res = NamedItemToken._new1636(nam.begin_token, nam.end_token, nam.morph, nam.chars) res.kind = (Utils.valToEnum(nam.termin.tag, NamedEntityKind)) res.name_value = nam.termin.canonic_text ok = True if (not t.is_whitespace_before and t.previous is not None): ok = False elif (not t.is_whitespace_after and t.next0_ is not None): if (t.next0_.isCharOf(",.;!?") and t.next0_.is_whitespace_after): pass else: ok = False if (ok): res.is_wellknown = True res.type_value = (Utils.asObjectOrNull(nam.termin.tag2, str)) return res adj = MiscLocationHelper.tryAttachNordWest(t) if (adj is not None): if (adj.morph.class0_.is_noun): if (adj.end_token.isValue("ВОСТОК", None)): if (adj.begin_token == adj.end_token): return None re = NamedItemToken._new1638(t, adj.end_token, adj.morph) re.kind = NamedEntityKind.LOCATION re.name_value = MiscHelper.getTextValue( t, adj.end_token, GetTextAttr.FIRSTNOUNGROUPTONOMINATIVE) re.is_wellknown = True return re return None if (adj.whitespaces_after_count > 2): return None if ((isinstance(adj.end_token.next0_, ReferentToken)) and (isinstance(adj.end_token.next0_.getReferent(), GeoReferent))): re = NamedItemToken._new1638(t, adj.end_token.next0_, adj.end_token.next0_.morph) re.kind = NamedEntityKind.LOCATION re.name_value = MiscHelper.getTextValue( t, adj.end_token.next0_, GetTextAttr.FIRSTNOUNGROUPTONOMINATIVE) re.is_wellknown = True re.ref = adj.end_token.next0_.getReferent() return re res = NamedItemToken.tryParse(adj.end_token.next0_, loc_onto) if (res is not None and res.kind == NamedEntityKind.LOCATION): s = adj.getNormalCaseText(MorphClass.ADJECTIVE, True, res.morph.gender, False) if (s is not None): if (res.name_value is None): res.name_value = s.upper() else: res.name_value = "{0} {1}".format( s.upper(), res.name_value) res.type_value = (None) res.begin_token = t res.chars = t.chars res.is_wellknown = True return res if (t.chars.is_capital_upper and not MiscHelper.canBeStartOfSentence(t)): npt = NounPhraseHelper.tryParse(t, NounPhraseParseAttr.NO, 0) if (npt is not None and len(npt.adjectives) > 0): test = NamedItemToken.tryParse(npt.noun.begin_token, loc_onto) if (test is not None and test.end_token == npt.end_token and test.type_value is not None): test.begin_token = t tmp = io.StringIO() for a in npt.adjectives: s = a.getNormalCaseText(MorphClass.ADJECTIVE, True, test.morph.gender, False) if (tmp.tell() > 0): print(' ', end="", file=tmp) print(s, end="", file=tmp) test.name_value = Utils.toStringStringIO(tmp) test.chars = t.chars if (test.kind == NamedEntityKind.LOCATION): test.is_wellknown = True return test if ((BracketHelper.isBracket(t, True) and t.next0_ is not None and t.next0_.chars.is_letter) and not t.next0_.chars.is_all_lower): br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100) if (br is not None): res = NamedItemToken(t, br.end_token) res.is_in_bracket = True res.name_value = MiscHelper.getTextValue( t, br.end_token, GetTextAttr.NO) nam = NamedItemToken.__m_names.tryParse( t.next0_, TerminParseAttr.NO) if (nam is not None and nam.end_token == br.end_token.previous): res.kind = (Utils.valToEnum(nam.termin.tag, NamedEntityKind)) res.is_wellknown = True res.name_value = nam.termin.canonic_text return res if (((isinstance(t, TextToken)) and t.chars.is_letter and not t.chars.is_all_lower) and t.length_char > 2): res = NamedItemToken._new1638(t, t, t.morph) str0_ = (t).term if (str0_.endswith("О") or str0_.endswith("И") or str0_.endswith("Ы")): res.name_value = str0_ else: res.name_value = t.getNormalCaseText(None, False, MorphGender.UNDEFINED, False) res.chars = t.chars if (((not t.is_whitespace_after and t.next0_ is not None and t.next0_.is_hiphen) and (isinstance(t.next0_.next0_, TextToken)) and not t.next0_.next0_.is_whitespace_after) and t.chars.is_cyrillic_letter == t.next0_.next0_.chars.is_cyrillic_letter): res.end_token = t.next0_.next0_ t = res.end_token res.name_value = "{0}-{1}".format( res.name_value, t.getNormalCaseText(None, False, MorphGender.UNDEFINED, False)) return res return None
def process(self, kit: 'AnalysisKit') -> None: ad = kit.getAnalyzerData(self) models = TerminCollection() objs_by_model = dict() obj_by_names = TerminCollection() t = kit.first_token first_pass3158 = True while True: if first_pass3158: first_pass3158 = False else: t = t.next0_ if (not (t is not None)): break its = WeaponItemToken.tryParseList(t, 10) if (its is None): continue rts = self.__tryAttach(its, False) if (rts is not None): for rt in rts: rt.referent = ad.registerReferent(rt.referent) kit.embedToken(rt) t = (rt) for s in rt.referent.slots: if (s.type_name == WeaponReferent.ATTR_MODEL): mod = str(s.value) for k in range(2): if (not str.isdigit(mod[0])): li = [] wrapli2638 = RefOutArgWrapper(None) inoutres2639 = Utils.tryGetValue( objs_by_model, mod, wrapli2638) li = wrapli2638.value if (not inoutres2639): li = list() objs_by_model[mod] = li if (not rt.referent in li): li.append(rt.referent) models.addStr(mod, li, None, False) if (k > 0): break brand = rt.referent.getStringValue( WeaponReferent.ATTR_BRAND) if (brand is None): break mod = "{0} {1}".format(brand, mod) elif (s.type_name == WeaponReferent.ATTR_NAME): obj_by_names.add( Termin._new117(str(s.value), rt.referent)) if (len(objs_by_model) == 0 and len(obj_by_names.termins) == 0): return t = kit.first_token first_pass3159 = True while True: if first_pass3159: first_pass3159 = False else: t = t.next0_ if (not (t is not None)): break br = BracketHelper.tryParse(t, BracketParseAttr.NO, 10) if (br is not None): toks = obj_by_names.tryParse(t.next0_, TerminParseAttr.NO) if (toks is not None and toks.end_token.next0_ == br.end_token): rt0 = ReferentToken( Utils.asObjectOrNull(toks.termin.tag, Referent), br.begin_token, br.end_token) kit.embedToken(rt0) t = (rt0) continue if (not ((isinstance(t, TextToken)))): continue if (not t.chars.is_letter): continue tok = models.tryParse(t, TerminParseAttr.NO) if (tok is None): if (not t.chars.is_all_lower): tok = obj_by_names.tryParse(t, TerminParseAttr.NO) if (tok is None): continue if (not tok.is_whitespace_after): if (tok.end_token.next0_ is None or not tok.end_token.next0_.isCharOf(",.)")): if (not BracketHelper.isBracket(tok.end_token.next0_, False)): continue tr = None li = Utils.asObjectOrNull(tok.termin.tag, list) if (li is not None and len(li) == 1): tr = li[0] else: tr = (Utils.asObjectOrNull(tok.termin.tag, Referent)) if (tr is not None): tit = WeaponItemToken.tryParse(tok.begin_token.previous, None, False, True) if (tit is not None and tit.typ == WeaponItemToken.Typs.BRAND): tr.addSlot(WeaponReferent.ATTR_BRAND, tit.value, False, 0) tok.begin_token = tit.begin_token rt0 = ReferentToken(tr, tok.begin_token, tok.end_token) kit.embedToken(rt0) t = (rt0) continue
def createNickname(pr : 'PersonReferent', t : 'Token') -> 'Token': """ Выделить кличку Args: pr(PersonReferent): t(Token): начальный токен Returns: Token: если не null, то последний токен клички, а в pr запишет саму кличку """ has_keyw = False is_br = False first_pass3097 = True while True: if first_pass3097: first_pass3097 = False else: t = t.next0_ if (not (t is not None)): break if (t.is_hiphen or t.is_comma or t.isCharOf(".:;")): continue if (t.morph.class0_.is_preposition): continue if (t.isChar('(')): is_br = True continue if ((t.isValue("ПРОЗВИЩЕ", "ПРІЗВИСЬКО") or t.isValue("КЛИЧКА", None) or t.isValue("ПСЕВДОНИМ", "ПСЕВДОНІМ")) or t.isValue("ПСЕВДО", None) or t.isValue("ПОЗЫВНОЙ", "ПОЗИВНИЙ")): has_keyw = True continue break if (not has_keyw or t is None): return None if (BracketHelper.isBracket(t, True)): br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100) if (br is not None): ni = MiscHelper.getTextValue(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO) if (ni is not None): pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0) t = br.end_token tt = t.next0_ first_pass3098 = True while True: if first_pass3098: first_pass3098 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_comma_and): continue if (not BracketHelper.isBracket(tt, True)): break br = BracketHelper.tryParse(tt, BracketParseAttr.NO, 100) if (br is None): break ni = MiscHelper.getTextValue(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO) if (ni is not None): pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0) tt = br.end_token t = tt if (is_br and t.next0_ is not None and t.next0_.isChar(')')): t = t.next0_ return t else: pli = PersonItemToken.tryAttachList(t, None, PersonItemToken.ParseAttr.NO, 10) if (pli is not None and ((len(pli) == 1 or len(pli) == 2))): ni = MiscHelper.getTextValue(pli[0].begin_token, pli[len(pli) - 1].end_token, GetTextAttr.NO) if (ni is not None): pr.addSlot(PersonReferent.ATTR_NICKNAME, ni, False, 0) t = pli[len(pli) - 1].end_token if (is_br and t.next0_ is not None and t.next0_.isChar(')')): t = t.next0_ return t return None
def __try_parse(t : 'Token', prev : 'WeaponItemToken', after_conj : bool, attach_high : bool=False) -> 'WeaponItemToken': if (t is None): return None if (BracketHelper.is_bracket(t, True)): wit = WeaponItemToken.__try_parse(t.next0_, prev, after_conj, attach_high) if (wit is not None): if (wit.end_token.next0_ is None): wit.begin_token = t return wit if (BracketHelper.is_bracket(wit.end_token.next0_, True)): wit.begin_token = t wit.end_token = wit.end_token.next0_ return wit tok = WeaponItemToken.M_ONTOLOGY.try_parse(t, TerminParseAttr.NO) if (tok is not None): res = WeaponItemToken(t, tok.end_token) res.typ = (Utils.valToEnum(tok.termin.tag, WeaponItemToken.Typs)) if (res.typ == WeaponItemToken.Typs.NOUN): res.value = tok.termin.canonic_text if (tok.termin.tag2 is not None): res.is_doubt = True tt = res.end_token.next0_ first_pass3426 = True while True: if first_pass3426: first_pass3426 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.whitespaces_before_count > 2): break wit = WeaponItemToken.__try_parse(tt, None, False, False) if (wit is not None): if (wit.typ == WeaponItemToken.Typs.BRAND): res.__inner_tokens.append(wit) tt = wit.end_token res.end_token = tt continue break if (not (isinstance(tt, TextToken))): break mc = tt.get_morph_class_in_dictionary() if (mc == MorphClass.ADJECTIVE): if (res.alt_value is None): res.alt_value = res.value if (res.alt_value.endswith(res.value)): res.alt_value = res.alt_value[0:0+len(res.alt_value) - len(res.value)] res.alt_value = "{0}{1} {2}".format(res.alt_value, tt.term, res.value) res.end_token = tt continue break return res if (res.typ == WeaponItemToken.Typs.BRAND or res.typ == WeaponItemToken.Typs.NAME): res.value = tok.termin.canonic_text return res if (res.typ == WeaponItemToken.Typs.MODEL): res.value = tok.termin.canonic_text if (isinstance(tok.termin.tag2, list)): li = Utils.asObjectOrNull(tok.termin.tag2, list) for to in li: wit = WeaponItemToken._new2758(t, tok.end_token, Utils.valToEnum(to.tag, WeaponItemToken.Typs), to.canonic_text, tok.begin_token == tok.end_token) res.__inner_tokens.append(wit) if (to.additional_vars is not None and len(to.additional_vars) > 0): wit.alt_value = to.additional_vars[0].canonic_text res.__correct_model() return res nnn = MiscHelper.check_number_prefix(t) if (nnn is not None): tit = TransItemToken._attach_number(nnn, True) if (tit is not None): res = WeaponItemToken._new2759(t, tit.end_token, WeaponItemToken.Typs.NUMBER) res.value = tit.value res.alt_value = tit.alt_value return res if (((isinstance(t, TextToken)) and t.chars.is_letter and t.chars.is_all_upper) and (t.length_char < 4)): if ((t.next0_ is not None and ((t.next0_.is_hiphen or t.next0_.is_char('.'))) and (t.next0_.whitespaces_after_count < 2)) and (isinstance(t.next0_.next0_, NumberToken))): res = WeaponItemToken._new2760(t, t.next0_, WeaponItemToken.Typs.MODEL, True) res.value = t.term res.__correct_model() return res if ((isinstance(t.next0_, NumberToken)) and not t.is_whitespace_after): res = WeaponItemToken._new2760(t, t, WeaponItemToken.Typs.MODEL, True) res.value = t.term res.__correct_model() return res if (t.term == "СП" and (t.whitespaces_after_count < 3) and (isinstance(t.next0_, TextToken))): pp = WeaponItemToken.__try_parse(t.next0_, None, False, False) if (pp is not None and ((pp.typ == WeaponItemToken.Typs.MODEL or pp.typ == WeaponItemToken.Typs.BRAND))): res = WeaponItemToken._new2759(t, t, WeaponItemToken.Typs.NOUN) res.value = "ПИСТОЛЕТ" res.alt_value = "СЛУЖЕБНЫЙ ПИСТОЛЕТ" return res if (((isinstance(t, TextToken)) and t.chars.is_letter and not t.chars.is_all_lower) and t.length_char > 2): ok = False if (prev is not None and ((prev.typ == WeaponItemToken.Typs.NOUN or prev.typ == WeaponItemToken.Typs.MODEL or prev.typ == WeaponItemToken.Typs.BRAND))): ok = True elif (prev is None and t.previous is not None and t.previous.is_comma_and): ok = True if (ok): res = WeaponItemToken._new2760(t, t, WeaponItemToken.Typs.NAME, True) res.value = t.term if ((t.next0_ is not None and t.next0_.is_hiphen and (isinstance(t.next0_.next0_, TextToken))) and t.next0_.next0_.chars == t.chars): res.value = "{0}-{1}".format(res.value, t.next0_.next0_.term) res.end_token = t.next0_.next0_ if (prev is not None and prev.typ == WeaponItemToken.Typs.NOUN): res.typ = WeaponItemToken.Typs.BRAND if (res.end_token.next0_ is not None and res.end_token.next0_.is_hiphen and (isinstance(res.end_token.next0_.next0_, NumberToken))): res.typ = WeaponItemToken.Typs.MODEL res.__correct_model() elif (not res.end_token.is_whitespace_after and (isinstance(res.end_token.next0_, NumberToken))): res.typ = WeaponItemToken.Typs.MODEL res.__correct_model() return res if (t.is_value("МАРКА", None)): res = WeaponItemToken.__try_parse(t.next0_, prev, after_conj, False) if (res is not None and res.typ == WeaponItemToken.Typs.BRAND): res.begin_token = t return res if (BracketHelper.can_be_start_of_sequence(t.next0_, True, False)): br = BracketHelper.try_parse(t.next0_, BracketParseAttr.NO, 100) if (br is not None): return WeaponItemToken._new2764(t, br.end_token, WeaponItemToken.Typs.BRAND, MiscHelper.get_text_value(br.begin_token, br.end_token, GetTextAttr.NO)) if (((isinstance(t, TextToken)) and (isinstance(t.next0_, TextToken)) and t.next0_.length_char > 1) and not t.next0_.chars.is_all_lower): return WeaponItemToken._new2764(t, t.next0_, WeaponItemToken.Typs.BRAND, t.term) if (t.is_value("КАЛИБР", "КАЛІБР")): tt1 = t.next0_ if (tt1 is not None and ((tt1.is_hiphen or tt1.is_char(':')))): tt1 = tt1.next0_ num = NumbersWithUnitToken.try_parse(tt1, None, False, False, False, False) if (num is not None and num.single_val is not None): return WeaponItemToken._new2764(t, num.end_token, WeaponItemToken.Typs.CALIBER, NumberHelper.double_to_string(num.single_val)) if (isinstance(t, NumberToken)): num = NumbersWithUnitToken.try_parse(t, None, False, False, False, False) if (num is not None and num.single_val is not None): if (len(num.units) == 1 and num.units[0].unit is not None and num.units[0].unit.name_cyr == "мм"): return WeaponItemToken._new2764(t, num.end_token, WeaponItemToken.Typs.CALIBER, NumberHelper.double_to_string(num.single_val)) if (num.end_token.next0_ is not None and num.end_token.next0_.is_value("КАЛИБР", "КАЛІБР")): return WeaponItemToken._new2764(t, num.end_token.next0_, WeaponItemToken.Typs.CALIBER, NumberHelper.double_to_string(num.single_val)) if (t.is_value("ПРОИЗВОДСТВО", "ВИРОБНИЦТВО")): tt1 = t.next0_ if (tt1 is not None and ((tt1.is_hiphen or tt1.is_char(':')))): tt1 = tt1.next0_ if (isinstance(tt1, ReferentToken)): if ((isinstance(tt1.get_referent(), OrganizationReferent)) or (isinstance(tt1.get_referent(), GeoReferent))): return WeaponItemToken._new2769(t, tt1, WeaponItemToken.Typs.DEVELOPER, tt1.get_referent()) return None
def tryParse( self, t0: 'Token', pars: 'TerminParseAttr' = TerminParseAttr.NO) -> 'TerminToken': """ Попробовать привязать термин Args: t0(Token): fullWordsOnly: """ from pullenti.ner.core.MiscHelper import MiscHelper from pullenti.ner.core.BracketHelper import BracketHelper if (t0 is None): return None term = None if (isinstance(t0, TextToken)): term = (t0).term if (self.acronym_smart is not None and (((pars) & (TerminParseAttr.FULLWORDSONLY))) == (TerminParseAttr.NO) and term is not None): if (self.acronym_smart == term): if (t0.next0_ is not None and t0.next0_.isChar('.') and not t0.is_whitespace_after): return TerminToken._new606(t0, t0.next0_, self) else: return TerminToken._new606(t0, t0, self) t1 = Utils.asObjectOrNull(t0, TextToken) tt = Utils.asObjectOrNull(t0, TextToken) i = 0 while i < len(self.acronym): if (tt is None): break term1 = tt.term if (len(term1) != 1 or tt.is_whitespace_after): break if (i > 0 and tt.is_whitespace_before): break if (term1[0] != self.acronym[i]): break if (tt.next0_ is None or not tt.next0_.isChar('.')): break t1 = (Utils.asObjectOrNull(tt.next0_, TextToken)) tt = (Utils.asObjectOrNull(tt.next0_.next0_, TextToken)) i += 1 if (i >= len(self.acronym)): return TerminToken._new606(t0, t1, self) if (self.acronym is not None and term is not None and self.acronym == term): if (t0.chars.is_all_upper or self.acronym_can_be_lower or ((not t0.chars.is_all_lower and len(term) >= 3))): return TerminToken._new606(t0, t0, self) if (self.acronym is not None and t0.chars.is_last_lower and t0.length_char > 3): if (t0.isValue(self.acronym, None)): return TerminToken._new606(t0, t0, self) cou = 0 i = 0 while i < len(self.terms): if (self.terms[i].is_hiphen): cou -= 1 else: cou += 1 i += 1 if (len(self.terms) > 0 and ((not self.ignore_terms_order or cou == 1))): t1 = t0 tt = t0 e0_ = None eup = None ok = True mc = None dont_change_mc = False i = 0 first_pass2812 = True while True: if first_pass2812: first_pass2812 = False else: i += 1 if (not (i < len(self.terms))): break if (self.terms[i].is_hiphen): continue if (tt is not None and tt.is_hiphen and i > 0): tt = tt.next0_ if (i > 0 and tt is not None): if ((((pars) & (TerminParseAttr.IGNOREBRACKETS))) != (TerminParseAttr.NO) and not tt.chars.is_letter and BracketHelper.isBracket(tt, False)): tt = tt.next0_ if (((((pars) & (TerminParseAttr.CANBEGEOOBJECT))) != (TerminParseAttr.NO) and i > 0 and (isinstance(tt, ReferentToken))) and tt.getReferent().type_name == "GEO"): tt = tt.next0_ if ((isinstance(tt, ReferentToken)) and e0_ is None): eup = tt e0_ = (tt).end_token tt = (tt).begin_token if (tt is None): ok = False break if (not self.terms[i].checkByToken(tt)): if (tt.next0_ is not None and tt.isChar('.') and self.terms[i].checkByToken(tt.next0_)): tt = tt.next0_ elif (((i > 0 and tt.next0_ is not None and (isinstance(tt, TextToken))) and ((tt.morph.class0_.is_preposition or MiscHelper.isEngArticle(tt))) and self.terms[i].checkByToken(tt.next0_)) and not self.terms[i - 1].is_pattern_any): tt = tt.next0_ else: ok = False if (((i + 2) < len(self.terms)) and self.terms[i + 1].is_hiphen and self.terms[i + 2].checkByPrefToken( self.terms[i], Utils.asObjectOrNull(tt, TextToken))): i += 2 ok = True elif (((not tt.is_whitespace_after and tt.next0_ is not None and (isinstance(tt, TextToken))) and (tt).length_char == 1 and tt.next0_.isCharOf("\"'`’“”")) and not tt.next0_.is_whitespace_after and (isinstance(tt.next0_.next0_, TextToken))): if (self.terms[i].checkByStrPrefToken( (tt).term, Utils.asObjectOrNull( tt.next0_.next0_, TextToken))): ok = True tt = tt.next0_.next0_ if (not ok): if (i > 0 and (((pars) & (TerminParseAttr.IGNORESTOPWORDS))) != (TerminParseAttr.NO)): if (isinstance(tt, TextToken)): if (not tt.chars.is_letter): tt = tt.next0_ i -= 1 continue mc1 = tt.getMorphClassInDictionary() if (mc1.is_conjunction or mc1.is_preposition): tt = tt.next0_ i -= 1 continue if (isinstance(tt, NumberToken)): tt = tt.next0_ i -= 1 continue break if (tt.morph.items_count > 0 and not dont_change_mc): mc = MorphCollection(tt.morph) if (((mc.class0_.is_noun or mc.class0_.is_verb)) and not mc.class0_.is_adjective): if (((i + 1) < len(self.terms)) and self.terms[i + 1].is_hiphen): pass else: dont_change_mc = True if (tt.morph.class0_.is_preposition or tt.morph.class0_.is_conjunction): dont_change_mc = True if (tt == e0_): tt = eup eup = (None) e0_ = (None) if (e0_ is None): t1 = tt tt = tt.next0_ if (ok and i >= len(self.terms)): if (t1.next0_ is not None and t1.next0_.isChar('.') and self.abridges is not None): for a in self.abridges: if (a.tryAttach(t0) is not None): t1 = t1.next0_ break if (t0 != t1 and t0.morph.class0_.is_adjective): npt = NounPhraseHelper.tryParse(t0, NounPhraseParseAttr.NO, 0) if (npt is not None and npt.end_char <= t1.end_char): mc = npt.morph return TerminToken._new611(t0, t1, mc) if (len(self.terms) > 1 and self.ignore_terms_order): terms_ = list(self.terms) t1 = t0 tt = t0 while len(terms_) > 0: if (tt != t0 and tt is not None and tt.is_hiphen): tt = tt.next0_ if (tt is None): break j = 0 while j < len(terms_): if (terms_[j].checkByToken(tt)): break j += 1 if (j >= len(terms_)): if (tt != t0 and (((pars) & (TerminParseAttr.IGNORESTOPWORDS))) != (TerminParseAttr.NO)): if (isinstance(tt, TextToken)): if (not tt.chars.is_letter): tt = tt.next0_ continue mc1 = tt.getMorphClassInDictionary() if (mc1.is_conjunction or mc1.is_preposition): tt = tt.next0_ continue if (isinstance(tt, NumberToken)): tt = tt.next0_ continue break del terms_[j] t1 = tt tt = tt.next0_ for i in range(len(terms_) - 1, -1, -1): if (terms_[i].is_hiphen): del terms_[i] if (len(terms_) == 0): return TerminToken(t0, t1) if (self.abridges is not None and (((pars) & (TerminParseAttr.FULLWORDSONLY))) == (TerminParseAttr.NO)): res = None for a in self.abridges: r = a.tryAttach(t0) if (r is None): continue if (r.abridge_without_point and len(self.terms) > 0): if (not ((isinstance(t0, TextToken)))): continue if (a.parts[0].value != (t0).term): continue if (res is None or (res.length_char < r.length_char)): res = r if (res is not None): return res return None
def __try_parse_ru(first: 'Token', typ: 'NounPhraseParseAttr', max_char_pos: int, def_noun: 'NounPhraseItem' = None) -> 'NounPhraseToken': if (first is None): return None items = None adverbs = None prep = None kak = False t0 = first if ((((typ) & (NounPhraseParseAttr.PARSEPREPOSITION))) != (NounPhraseParseAttr.NO) and t0.is_value("КАК", None)): t0 = t0.next0_ prep = PrepositionHelper.try_parse(t0) if (prep is not None): t0 = prep.end_token.next0_ kak = True internal_noun_prase = None conj_before = False t = t0 first_pass3041 = True while True: if first_pass3041: first_pass3041 = False else: t = t.next0_ if (not (t is not None)): break if (max_char_pos > 0 and t.begin_char > max_char_pos): break if ((t.morph.class0_.is_conjunction and not t.morph.class0_.is_adjective and not t.morph.class0_.is_pronoun) and not t.morph.class0_.is_noun): if (conj_before): break if ((((typ) & (NounPhraseParseAttr.CANNOTHASCOMMAAND))) != (NounPhraseParseAttr.NO)): break if (items is not None and ((t.is_and or t.is_or))): conj_before = True if ((t.next0_ is not None and t.next0_.is_char_of("\\/") and t.next0_.next0_ is not None) and t.next0_.next0_.is_or): t = t.next0_.next0_ if (((t.next0_ is not None and t.next0_.is_char('(') and t.next0_.next0_ is not None) and t.next0_.next0_.is_or and t.next0_.next0_.next0_ is not None) and t.next0_.next0_.next0_.is_char(')')): t = t.next0_.next0_.next0_ continue break elif (t.is_comma): if (conj_before or items is None): break if ((((typ) & (NounPhraseParseAttr.CANNOTHASCOMMAAND))) != (NounPhraseParseAttr.NO)): break mc = t.previous.get_morph_class_in_dictionary() if (mc.is_proper_surname or mc.is_proper_secname): break conj_before = True if (kak and t.next0_ is not None and t.next0_.is_value("ТАК", None)): t = t.next0_ if (t.next0_ is not None and t.next0_.is_and): t = t.next0_ pr = PrepositionHelper.try_parse(t.next0_) if (pr is not None): t = pr.end_token if (items[len(items) - 1].can_be_noun and items[len(items) - 1].end_token.morph.class0_.is_pronoun): break continue elif (t.is_char('(')): if (items is None): return None brr = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (brr is None): break if (brr.length_char > 100): break t = brr.end_token continue if (isinstance(t, ReferentToken)): if ((((typ) & (NounPhraseParseAttr.REFERENTCANBENOUN))) == ( NounPhraseParseAttr.NO)): break elif (t.chars.is_latin_letter): break it = NounPhraseItem.try_parse(t, items, typ) if (it is None or ((not it.can_be_adj and not it.can_be_noun))): if (((it is not None and items is not None and t.chars.is_capital_upper) and (t.whitespaces_before_count < 3) and t.length_char > 3) and not t.get_morph_class_in_dictionary().is_noun and not t.get_morph_class_in_dictionary().is_adjective): it.can_be_noun = True items.append(it) break if ((((typ) & (NounPhraseParseAttr.PARSEADVERBS))) != (NounPhraseParseAttr.NO) and (isinstance(t, TextToken)) and t.morph.class0_.is_adverb): if (adverbs is None): adverbs = list() adverbs.append(Utils.asObjectOrNull(t, TextToken)) continue break it.conj_before = conj_before conj_before = False if (not it.can_be_adj and not it.can_be_noun): break if (t.is_newline_before and t != first): if ((((typ) & (NounPhraseParseAttr.MULTILINES))) != (NounPhraseParseAttr.NO)): pass elif (items is not None and t.chars != items[len(items) - 1].chars): if (t.chars.is_all_lower and items[len(items) - 1].chars.is_capital_upper): pass else: break if (items is None): items = list() else: it0 = items[len(items) - 1] if (it0.can_be_noun and it0.is_personal_pronoun): if (it.is_pronoun): break if ((it0.begin_token.previous is not None and it0.begin_token.previous. get_morph_class_in_dictionary().is_verb and not it0.begin_token.previous. get_morph_class_in_dictionary().is_adjective) and not it0.begin_token.previous. get_morph_class_in_dictionary().is_preposition): if (t.morph.case_.is_nominative or t.morph.case_.is_accusative): pass else: break if (it.can_be_noun and it.is_verb): if (it0.previous is None): pass elif ((isinstance(it0.previous, TextToken)) and not it0.previous.chars.is_letter): pass else: break items.append(it) t = it.end_token if (t.is_newline_after and not t.chars.is_all_lower): mc = t.get_morph_class_in_dictionary() if (mc.is_proper_surname): break if (t.morph.class0_.is_proper_surname and mc.is_undefined): break if (items is None): return None tt1 = None if (len(items) == 1 and items[0].can_be_adj): and0_ = False tt1 = items[0].end_token.next0_ first_pass3042 = True while True: if first_pass3042: first_pass3042 = False else: tt1 = tt1.next0_ if (not (tt1 is not None)): break if (tt1.is_and or tt1.is_or): and0_ = True break if (tt1.is_comma or tt1.is_value("НО", None) or tt1.is_value("ТАК", None)): continue break if (and0_): if (items[0].can_be_noun and items[0].is_personal_pronoun): and0_ = False if (and0_): tt2 = tt1.next0_ if (tt2 is not None and tt2.morph.class0_.is_preposition): tt2 = tt2.next0_ npt1 = _NounPraseHelperInt.__try_parse_ru( tt2, typ, max_char_pos, None) if (npt1 is not None and len(npt1.adjectives) > 0): ok1 = False for av in items[0].adj_morph: for v in npt1.noun.noun_morph: if (v.check_accord(av, False, False)): items[0].morph.add_item(av) ok1 = True if (ok1): npt1.begin_token = items[0].begin_token npt1.end_token = tt1.previous npt1.adjectives.clear() npt1.adjectives.append(items[0]) return npt1 if (def_noun is not None): items.append(def_noun) last1 = items[len(items) - 1] check = True for it in items: if (not it.can_be_adj): check = False break elif (it.can_be_noun and it.is_personal_pronoun): check = False break tt1 = last1.end_token.next0_ if ((tt1 is not None and check and ((tt1.morph.class0_.is_preposition or tt1.morph.case_.is_instrumental))) and (tt1.whitespaces_before_count < 2)): inp = NounPhraseHelper.try_parse( tt1, Utils.valToEnum((typ) | (NounPhraseParseAttr.PARSEPREPOSITION), NounPhraseParseAttr), max_char_pos, None) if (inp is not None): tt1 = inp.end_token.next0_ npt1 = _NounPraseHelperInt.__try_parse_ru( tt1, typ, max_char_pos, None) if (npt1 is not None): ok = True ii = 0 first_pass3043 = True while True: if first_pass3043: first_pass3043 = False else: ii += 1 if (not (ii < len(items))): break it = items[ii] if (NounPhraseItem.try_accord_adj_and_noun( it, Utils.asObjectOrNull(npt1.noun, NounPhraseItem))): continue if (ii > 0): inp2 = NounPhraseHelper.try_parse( it.begin_token, typ, max_char_pos, None) if (inp2 is not None and inp2.end_token == inp.end_token): del items[ii:ii + len(items) - ii] inp = inp2 break ok = False break if (ok): if (npt1.morph.case_.is_genitive and not inp.morph.case_.is_instrumental): ok = False if (ok): i = 0 while i < len(items): npt1.adjectives.insert(i, items[i]) i += 1 npt1.internal_noun = inp mmm = MorphCollection(npt1.morph) for it in items: mmm.remove_items(it.adj_morph[0], False) if (mmm.gender != MorphGender.UNDEFINED or mmm.number != MorphNumber.UNDEFINED or not mmm.case_.is_undefined): npt1.morph = mmm if (adverbs is not None): if (npt1.adverbs is None): npt1.adverbs = adverbs else: npt1.adverbs[0:0] = adverbs npt1.begin_token = first return npt1 if (tt1 is not None and tt1.morph.class0_.is_noun and not tt1.morph.case_.is_genitive): it = NounPhraseItem.try_parse(tt1, items, typ) if (it is not None and it.can_be_noun): internal_noun_prase = inp inp.begin_token = items[0].end_token.next0_ items.append(it) i = 0 first_pass3044 = True while True: if first_pass3044: first_pass3044 = False else: i += 1 if (not (i < len(items))): break if (items[i].can_be_adj and items[i].begin_token.morph.class0_.is_verb): it = items[i].begin_token if (not it.get_morph_class_in_dictionary().is_verb): continue if (it.is_value("УПОЛНОМОЧЕННЫЙ", None)): continue if ((((typ) & (NounPhraseParseAttr.PARSEVERBS))) == ( NounPhraseParseAttr.NO)): continue inp = _NounPraseHelperInt.__try_parse_ru( items[i].end_token.next0_, NounPhraseParseAttr.NO, max_char_pos, None) if (inp is None): continue if (inp.anafor is not None and i == (len(items) - 1) and NounPhraseItem.try_accord_adj_and_noun( items[i], Utils.asObjectOrNull(inp.noun, NounPhraseItem))): inp.begin_token = first ii = 0 while ii < len(items): inp.adjectives.insert(ii, items[ii]) ii += 1 return inp if (inp.end_token.whitespaces_after_count > 3): continue npt1 = _NounPraseHelperInt.__try_parse_ru( inp.end_token.next0_, NounPhraseParseAttr.NO, max_char_pos, None) if (npt1 is None): continue ok = True j = 0 while j <= i: if (not NounPhraseItem.try_accord_adj_and_noun( items[j], Utils.asObjectOrNull(npt1.noun, NounPhraseItem))): ok = False break j += 1 if (not ok): continue verb = VerbPhraseHelper.try_parse(it, True, False, False) if (verb is None): continue vlinks = SemanticHelper.try_create_links(verb, inp, None) nlinks = SemanticHelper.try_create_links(inp, npt1, None) if (len(vlinks) == 0 and len(nlinks) > 0): continue j = 0 while j <= i: npt1.adjectives.insert(j, items[j]) j += 1 items[i].end_token = inp.end_token mmm = MorphCollection(npt1.morph) bil = list() j = 0 while j <= i: bil.clear() for m in items[j].adj_morph: bil.append(m) mmm.remove_items_list_cla(bil, None) j += 1 if (mmm.gender != MorphGender.UNDEFINED or mmm.number != MorphNumber.UNDEFINED or not mmm.case_.is_undefined): npt1.morph = mmm if (adverbs is not None): if (npt1.adverbs is None): npt1.adverbs = adverbs else: npt1.adverbs[0:0] = adverbs npt1.begin_token = first return npt1 ok2 = False if ((len(items) == 1 and (((typ) & (NounPhraseParseAttr.ADJECTIVECANBELAST))) != (NounPhraseParseAttr.NO) and (items[0].whitespaces_after_count < 3)) and not items[0].is_adverb): if (not items[0].can_be_adj): ok2 = True elif (items[0].is_personal_pronoun and items[0].can_be_noun): ok2 = True if (ok2): it = NounPhraseItem.try_parse(items[0].end_token.next0_, None, typ) if (it is not None and it.can_be_adj and it.begin_token.chars.is_all_lower): ok2 = True if (it.is_adverb or it.is_verb): ok2 = False if (it.is_pronoun and items[0].is_pronoun): ok2 = False if (it.can_be_adj_for_personal_pronoun and items[0].is_personal_pronoun): ok2 = True if (ok2 and NounPhraseItem.try_accord_adj_and_noun( it, items[0])): npt1 = _NounPraseHelperInt.__try_parse_ru( it.begin_token, typ, max_char_pos, None) if (npt1 is not None and ((npt1.end_char > it.end_char or len(npt1.adjectives) > 0))): pass else: items.insert(0, it) noun = None adj_after = None for i in range(len(items) - 1, -1, -1): if (items[i].can_be_noun): if (items[i].conj_before): continue if (i > 0 and not items[i - 1].can_be_adj): continue if (i > 0 and items[i - 1].can_be_noun): if (items[i - 1].is_doubt_adjective): continue if (items[i - 1].is_pronoun and items[i].is_pronoun): if (items[i].is_pronoun and items[i - 1].can_be_adj_for_personal_pronoun): pass else: continue noun = items[i] del items[i:i + len(items) - i] if (adj_after is not None): items.append(adj_after) elif (len(items) > 0 and items[0].can_be_noun and not items[0].can_be_adj): noun = items[0] items.clear() break if (noun is None): return None res = NounPhraseToken._new466(first, noun.end_token, prep) if (adverbs is not None): for a in adverbs: if (a.begin_char < noun.begin_char): if (len(items) == 0 and prep is None): return None if (res.adverbs is None): res.adverbs = list() res.adverbs.append(a) res.noun = (noun) res.multi_nouns = noun.multi_nouns if (kak): res.multi_nouns = True res.internal_noun = internal_noun_prase for v in noun.noun_morph: noun.morph.add_item(v) res.morph = noun.morph if (res.morph.case_.is_nominative and first.previous is not None and first.previous.morph.class0_.is_preposition): res.morph.case_ = (res.morph.case_) ^ MorphCase.NOMINATIVE if ((((typ) & (NounPhraseParseAttr.PARSEPRONOUNS))) == (NounPhraseParseAttr.NO) and ((res.morph.class0_.is_pronoun or res.morph.class0_.is_personal_pronoun))): return None stat = None if (len(items) > 1): stat = dict() need_update_morph = False if (len(items) > 0): ok_list = list() is_num_not = False for vv in noun.noun_morph: i = 0 v = vv i = 0 while i < len(items): ok = False for av in items[i].adj_morph: if (v.check_accord(av, False, False)): ok = True if (not ((av.case_) & v.case_).is_undefined and av.case_ != v.case_): v.case_ = av.case_ = (av.case_) & v.case_ break if (not ok): if (items[i].can_be_numeric_adj and items[i].try_accord_var(v, False)): ok = True v1 = NounPhraseItemTextVar() v1.copy_from_item(v) v1.number = MorphNumber.PLURAL is_num_not = True v1.case_ = MorphCase() for a in items[i].adj_morph: v1.case_ = (v1.case_) | a.case_ v = v1 else: break i += 1 if (i >= len(items)): ok_list.append(v) if (len(ok_list) > 0 and (((len(ok_list) < res.morph.items_count) or is_num_not))): res.morph = MorphCollection() for v in ok_list: res.morph.add_item(v) if (not is_num_not): noun.morph = res.morph i = 0 first_pass3045 = True while True: if first_pass3045: first_pass3045 = False else: i += 1 if (not (i < len(items))): break for av in items[i].adj_morph: for v in noun.noun_morph: if (v.check_accord(av, False, False)): if (not ((av.case_) & v.case_).is_undefined and av.case_ != v.case_): v.case_ = av.case_ = (av.case_) & v.case_ need_update_morph = True items[i].morph.add_item(av) if (stat is not None and av.normal_value is not None and len(av.normal_value) > 1): last = av.normal_value[len(av.normal_value) - 1] if (not last in stat): stat[last] = 1 else: stat[last] += 1 if (items[i].is_pronoun or items[i].is_personal_pronoun): res.anafor = items[i].begin_token if ((((typ) & (NounPhraseParseAttr.PARSEPRONOUNS))) == ( NounPhraseParseAttr.NO)): continue tt = Utils.asObjectOrNull(items[i].begin_token, TextToken) if (tt is not None and not tt.term.startswith("ВЫСШ")): err = False for wf in tt.morph.items: if (wf.class0_.is_adjective): if (wf.contains_attr("прев.", None)): if ((((typ) & (NounPhraseParseAttr.IGNOREADJBEST))) != (NounPhraseParseAttr.NO)): err = True if (wf.contains_attr("к.ф.", None) and tt.morph.class0_.is_personal_pronoun): return None if (err): continue if (res.morph.case_.is_nominative): v = MiscHelper.get_text_value_of_meta_token( items[i], GetTextAttr.KEEPQUOTES) if (not Utils.isNullOrEmpty(v)): if (items[i].get_normal_case_text( None, MorphNumber.UNDEFINED, MorphGender.UNDEFINED, False) != v): wf = NounPhraseItemTextVar(items[i].morph, None) wf.normal_value = v wf.class0_ = MorphClass.ADJECTIVE wf.case_ = res.morph.case_ if (res.morph.case_.is_prepositional or res.morph.gender == MorphGender.NEUTER or res.morph.gender == MorphGender.FEMINIE): items[i].morph.add_item(wf) else: items[i].morph.insert_item(0, wf) res.adjectives.append(items[i]) if (items[i].end_char > res.end_char): res.end_token = items[i].end_token i = 0 first_pass3046 = True while True: if first_pass3046: first_pass3046 = False else: i += 1 if (not (i < (len(res.adjectives) - 1))): break if (res.adjectives[i].whitespaces_after_count > 5): if (res.adjectives[i].chars != res.adjectives[i + 1].chars): if (not res.adjectives[i + 1].chars.is_all_lower): return None if (res.adjectives[i].chars.is_all_upper and res.adjectives[i + 1].chars.is_capital_upper): return None if (res.adjectives[i].chars.is_capital_upper and res.adjectives[i + 1].chars.is_all_upper): return None if (res.adjectives[i].whitespaces_after_count > 10): if (res.adjectives[i].newlines_after_count == 1): if (res.adjectives[i].chars.is_capital_upper and i == 0 and res.adjectives[i + 1].chars.is_all_lower): continue if (res.adjectives[i].chars == res.adjectives[ i + 1].chars): continue return None if (need_update_morph): noun.morph = MorphCollection() for v in noun.noun_morph: noun.morph.add_item(v) res.morph = noun.morph if (len(res.adjectives) > 0): if (noun.begin_token.previous is not None): if (noun.begin_token.previous.is_comma_and): if (res.adjectives[0].begin_char > noun.begin_char): pass else: return None zap = 0 and0_ = 0 cou = 0 last_and = False i = 0 while i < (len(res.adjectives) - 1): te = res.adjectives[i].end_token.next0_ if (te is None): return None if (te.is_char('(')): pass elif (te.is_comma): zap += 1 last_and = False elif (te.is_and or te.is_or): and0_ += 1 last_and = True if (not res.adjectives[i].begin_token.morph.class0_.is_pronoun ): cou += 1 i += 1 if ((zap + and0_) > 0): if (and0_ > 1): return None elif (and0_ == 1 and not last_and): return None if ((zap + and0_) != cou): if (and0_ == 1): pass else: return None last = Utils.asObjectOrNull( res.adjectives[len(res.adjectives) - 1], NounPhraseItem) if (last.is_pronoun and not last_and): return None if (stat is not None): for adj in items: if (adj.morph.items_count > 1): w1 = Utils.asObjectOrNull(adj.morph.get_indexer_item(0), NounPhraseItemTextVar) w2 = Utils.asObjectOrNull(adj.morph.get_indexer_item(1), NounPhraseItemTextVar) if ((len(w1.normal_value) < 2) or (len(w2.normal_value) < 2)): break l1 = w1.normal_value[len(w1.normal_value) - 1] l2 = w2.normal_value[len(w2.normal_value) - 1] i1 = 0 i2 = 0 wrapi1468 = RefOutArgWrapper(0) Utils.tryGetValue(stat, l1, wrapi1468) i1 = wrapi1468.value wrapi2467 = RefOutArgWrapper(0) Utils.tryGetValue(stat, l2, wrapi2467) i2 = wrapi2467.value if (i1 < i2): adj.morph.remove_item(1) adj.morph.insert_item(0, w2) if (res.begin_token.get_morph_class_in_dictionary().is_verb and len(items) > 0): if (not res.begin_token.chars.is_all_lower or res.begin_token.previous is None): pass elif (res.begin_token.previous.morph.class0_.is_preposition): pass else: comma = False tt = res.begin_token.previous first_pass3047 = True while True: if first_pass3047: first_pass3047 = False else: tt = tt.previous if (not (tt is not None and tt.end_char <= res.end_char)): break if (tt.morph.class0_.is_adverb): continue if (tt.is_char_of(".;")): break if (tt.is_comma): comma = True continue if (tt.is_value("НЕ", None)): continue if (((tt.morph.class0_.is_noun or tt.morph.class0_.is_proper)) and comma): for it in res.begin_token.morph.items: if (it.class0_.is_verb and (isinstance(it, MorphWordForm))): if (tt.morph.check_accord(it, False, False)): if (res.morph.case_.is_instrumental): return None break if (res.begin_token == res.end_token): mc = res.begin_token.get_morph_class_in_dictionary() if (mc.is_adverb): if (res.begin_token.previous is not None and res.begin_token.previous.morph.class0_.is_preposition): pass elif (mc.is_noun and not mc.is_preposition and not mc.is_conjunction): pass elif (res.begin_token.is_value("ВЕСЬ", None)): pass else: return None if (def_noun is not None and def_noun.end_token == res.end_token and len(res.adjectives) > 0): res.end_token = res.adjectives[len(res.adjectives) - 1].end_token return res
def tryParse(t: 'Token', add_units: 'TerminCollection', can_be_set: bool = True, can_units_absent: bool = False) -> 'MeasureToken': """ Выделение вместе с наименованием Args: t(Token): """ if (not ((isinstance(t, TextToken)))): return None if (t.is_table_control_char): return None t0 = t whd = None minmax = 0 wrapminmax1516 = RefOutArgWrapper(minmax) tt = NumbersWithUnitToken._isMinOrMax(t0, wrapminmax1516) minmax = wrapminmax1516.value if (tt is not None): t = tt.next0_ npt = NounPhraseHelper.tryParse( t, Utils.valToEnum((NounPhraseParseAttr.PARSEPREPOSITION) | (NounPhraseParseAttr.IGNOREBRACKETS), NounPhraseParseAttr), 0) if (npt is None): whd = NumbersWithUnitToken._tryParseWHL(t) if (whd is not None): npt = NounPhraseToken(t0, whd.end_token) elif (t0.isValue("КПД", None)): npt = NounPhraseToken(t0, t0) elif ((isinstance(t0, TextToken)) and t0.length_char > 3 and t0.getMorphClassInDictionary().is_undefined): npt = NounPhraseToken(t0, t0) else: return None elif (NumberHelper.tryParseRealNumber(t, True) is not None): return None else: dtok = DateItemToken.tryAttach(t, None) if (dtok is not None): return None t1 = npt.end_token t = npt.end_token name_ = MetaToken._new561(npt.begin_token, npt.end_token, npt.morph) units = None units2 = None internals_ = list() not0_ = False tt = t1.next0_ first_pass3037 = True while True: if first_pass3037: first_pass3037 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_newline_before): break if (tt.is_table_control_char): break wrapminmax1510 = RefOutArgWrapper(minmax) tt2 = NumbersWithUnitToken._isMinOrMax(tt, wrapminmax1510) minmax = wrapminmax1510.value if (tt2 is not None): tt = tt2 t = tt t1 = t continue if ((tt.isValue("БЫТЬ", None) or tt.isValue("ДОЛЖЕН", None) or tt.isValue("ДОЛЖНЫЙ", None)) or tt.isValue("МОЖЕТ", None) or ((tt.isValue("СОСТАВЛЯТЬ", None) and not tt.getMorphClassInDictionary().is_adjective))): t = tt t1 = t if (tt.previous.isValue("НЕ", None)): not0_ = True continue www = NumbersWithUnitToken._tryParseWHL(tt) if (www is not None): whd = www tt = www.end_token t = tt t1 = t continue if (len(internals_) > 0 and tt.is_comma_and): continue if (tt.isValue("ПРИ", None) or len(internals_) > 0): mt1 = MeasureToken.tryParse(tt.next0_, add_units, False, False) if (mt1 is not None and mt1.reliable): internals_.append(mt1) tt = mt1.end_token t = tt t1 = t continue if ((isinstance(tt, NumberToken)) and (tt).typ == NumberSpellingType.WORDS): npt3 = NounPhraseHelper.tryParse( tt, NounPhraseParseAttr.PARSENUMERICASADJECTIVE, 0) if (npt3 is not None): tt = npt3.end_token t1 = tt if (len(internals_) == 0): name_.end_token = t1 continue mt0 = NumbersWithUnitToken.tryParse(tt, add_units, False, False) if (mt0 is not None): break if (((tt.is_comma or tt.isChar('('))) and tt.next0_ is not None): www = NumbersWithUnitToken._tryParseWHL(tt.next0_) if (www is not None): whd = www tt = www.end_token t = tt t1 = t if (tt.next0_ is not None and tt.next0_.is_comma): tt = tt.next0_ t1 = tt if (tt.next0_ is not None and tt.next0_.isChar(')')): tt = tt.next0_ t1 = tt continue uu = UnitToken.tryParseList(tt.next0_, add_units, False) if (uu is not None): t = uu[len(uu) - 1].end_token t1 = t units = uu if (tt.isChar('(') and t1.next0_ is not None and t1.next0_.isChar(')')): tt = t1.next0_ t = tt t1 = t continue elif (t1.next0_ is not None and t1.next0_.isChar('(')): uu = UnitToken.tryParseList(t1.next0_.next0_, add_units, False) if (uu is not None and uu[len(uu) - 1].end_token.next0_ is not None and uu[len(uu) - 1].end_token.next0_.isChar(')')): units2 = uu tt = uu[len(uu) - 1].end_token.next0_ t = tt t1 = t continue if (uu is not None and len(uu) > 0 and not uu[0].is_doubt): break if (BracketHelper.canBeStartOfSequence(tt, False, False)): br = BracketHelper.tryParse(tt, BracketParseAttr.NO, 100) if (br is not None): tt = br.end_token t = tt t1 = t continue if (tt.isValue("НЕ", None) and tt.next0_ is not None): mc = tt.next0_.getMorphClassInDictionary() if (mc.is_adverb or mc.is_misc): break continue if (tt.isValue("ЯМЗ", None)): pass npt2 = NounPhraseHelper.tryParse( tt, Utils.valToEnum((NounPhraseParseAttr.PARSEPREPOSITION) | (NounPhraseParseAttr.IGNOREBRACKETS), NounPhraseParseAttr), 0) if (npt2 is None): if (tt.morph.class0_.is_preposition or tt.morph.class0_.is_conjunction): to = NumbersWithUnitToken.M_TERMINS.tryParse( tt, TerminParseAttr.NO) if (to is not None): if ((isinstance(to.end_token.next0_, TextToken)) and to.end_token.next0_.is_letters): pass else: break t1 = tt continue mc = tt.getMorphClassInDictionary() if (((isinstance(tt, TextToken)) and tt.chars.is_letter and tt.length_char > 1) and (((tt.chars.is_all_upper or mc.is_adverb or mc.is_undefined) or mc.is_adjective))): uu = UnitToken.tryParseList(tt, add_units, False) if (uu is not None): if (uu[0].length_char > 2 or len(uu) > 1): units = uu t = uu[len(uu) - 1].end_token t1 = t break t = tt t1 = t if (len(internals_) == 0): name_.end_token = tt continue if (tt.is_comma): continue if (tt.isChar('.')): if (not MiscHelper.canBeStartOfSentence(tt.next0_)): continue uu = UnitToken.tryParseList(tt.next0_, add_units, False) if (uu is not None): if (uu[0].length_char > 2 or len(uu) > 1): units = uu t = uu[len(uu) - 1].end_token t1 = t break break tt = npt2.end_token t = tt t1 = t if (len(internals_) > 0): pass elif (t.isValue("ПРЕДЕЛ", None) or t.isValue("ГРАНИЦА", None) or t.isValue("ДИАПАЗОН", None)): pass elif (t.chars.is_letter): name_.end_token = t1 t1 = t1.next0_ first_pass3038 = True while True: if first_pass3038: first_pass3038 = False else: t1 = t1.next0_ if (not (t1 is not None)): break if (t1.is_table_control_char): pass elif (t1.isCharOf(":,_")): www = NumbersWithUnitToken._tryParseWHL(t1.next0_) if (www is not None): whd = www t = www.end_token t1 = t continue elif (t1.is_hiphen and t1.is_whitespace_after and t1.is_whitespace_before): pass else: break if (t1 is None): return None mts = NumbersWithUnitToken.tryParseMulti(t1, add_units, False, not0_) if (mts is None): return None mt = mts[0] if (name_.begin_token.morph.class0_.is_preposition): name_.begin_token = name_.begin_token.next0_ if (len(mts) > 1 and len(internals_) == 0): if (len(mt.units) == 0): if (units is not None): for m in mts: m.units = units res1 = MeasureToken._new1511(t0, mts[len(mts) - 1].end_token, name_.morph, True) res1.name = MiscHelper.getTextValueOfMetaToken( name_, GetTextAttr.FIRSTNOUNGROUPTONOMINATIVE) k = 0 while k < len(mts): ttt = MeasureToken._new1506(mts[k].begin_token, mts[k].end_token, mts[k]) if (whd is not None): nams = Utils.asObjectOrNull(whd.tag, list) if (k < len(nams)): ttt.name = nams[k] res1.internals.append(ttt) k += 1 tt1 = res1.end_token.next0_ if (tt1 is not None and tt1.isChar('±')): nn = NumbersWithUnitToken._tryParse(tt1, add_units, True, False) if (nn is not None and nn.plus_minus_percent): res1.end_token = nn.end_token res1.nums = nn return res1 if (not mt.is_whitespace_before): if (mt.begin_token.previous is None): return None if (mt.begin_token.previous.isCharOf(":),") or mt.begin_token.previous.is_table_control_char): pass else: return None if (len(mt.units) == 0 and units is not None): mt.units = units if (mt.div_num is not None and len(units) > 1 and len(mt.div_num.units) == 0): i = 1 while i < len(units): if (units[i].pow0_ == -1): j = i while j < len(units): mt.div_num.units.append(units[j]) units[j].pow0_ = (-units[j].pow0_) j += 1 del mt.units[i:i + len(units) - i] break i += 1 if ((minmax < 0) and mt.single_val is not None): mt.from_val = mt.single_val mt.from_include = True mt.single_val = (None) if (minmax > 0 and mt.single_val is not None): mt.to_val = mt.single_val mt.to_include = True mt.single_val = (None) if (len(mt.units) == 0): units = UnitToken.tryParseList(mt.end_token.next0_, add_units, True) if (units is None): if (can_units_absent): pass else: return None else: mt.units = units res = MeasureToken._new1513(t0, mt.end_token, name_.morph, internals_) if (((not t0.is_whitespace_before and t0.previous is not None and t0 == name_.begin_token) and t0.previous.is_hiphen and not t0.previous.is_whitespace_before) and (isinstance(t0.previous.previous, TextToken))): name_.begin_token = res.begin_token = name_.begin_token.previous.previous res.name = MiscHelper.getTextValueOfMetaToken( name_, GetTextAttr.FIRSTNOUNGROUPTONOMINATIVE) res.nums = mt for u in res.nums.units: if (u.keyword is not None): if (u.keyword.begin_char >= res.begin_char): res.reliable = True res.__parseInternals(add_units) if (len(res.internals) > 0 or not can_be_set): return res t1 = res.end_token.next0_ if (t1 is not None and t1.is_comma_and): t1 = t1.next0_ mts1 = NumbersWithUnitToken.tryParseMulti(t1, add_units, False, False) if ((mts1 is not None and len(mts1) == 1 and (t1.whitespaces_before_count < 3)) and len(mts1[0].units) > 0 and not UnitToken.canBeEquals(mts[0].units, mts1[0].units)): res.is_set = True res.nums = (None) res.internals.append( MeasureToken._new1506(mt.begin_token, mt.end_token, mt)) res.internals.append( MeasureToken._new1506(mts1[0].begin_token, mts1[0].end_token, mts1[0])) res.end_token = mts1[0].end_token return res
def __calc_rank_and_value(self, min_newlines_count: int) -> bool: self.rank = 0 if (self.begin_token.chars.is_all_lower): self.rank -= 30 words = 0 up_words = 0 notwords = 0 line_number = 0 tstart = self.begin_token tend = self.end_token t = self.begin_token first_pass3396 = True while True: if first_pass3396: first_pass3396 = False else: t = t.next0_ if (not (t != self.end_token.next0_ and t is not None and t.end_char <= self.end_token.end_char)): break if (t.is_newline_before): pass tit = TitleItemToken.try_attach(t) if (tit is not None): if (tit.typ == TitleItemToken.Types.THEME or tit.typ == TitleItemToken.Types.TYPANDTHEME): if (t != self.begin_token): if (line_number > 0): return False notwords = 0 up_words = notwords words = up_words tstart = tit.end_token.next0_ t = tit.end_token if (t.next0_ is None): return False if (t.next0_.chars.is_letter and t.next0_.chars.is_all_lower): self.rank += 20 else: self.rank += 100 tstart = t.next0_ if (tit.typ == TitleItemToken.Types.TYPANDTHEME): self.type_value = tit.value continue if (tit.typ == TitleItemToken.Types.TYP): if (t == self.begin_token): if (tit.end_token.is_newline_after): self.type_value = tit.value self.rank += 5 tstart = tit.end_token.next0_ t = tit.end_token words += 1 if (tit.begin_token != tit.end_token): words += 1 if (tit.chars.is_all_upper): up_words += 1 continue if (tit.typ == TitleItemToken.Types.DUST or tit.typ == TitleItemToken.Types.SPECIALITY): if (t == self.begin_token): return False self.rank -= 20 if (tit.typ == TitleItemToken.Types.SPECIALITY): self.speciality = tit.value t = tit.end_token continue if (tit.typ == TitleItemToken.Types.CONSULTANT or tit.typ == TitleItemToken.Types.BOSS or tit.typ == TitleItemToken.Types.EDITOR): t = tit.end_token if (t.next0_ is not None and ((t.next0_.is_char_of(":") or t.next0_.is_hiphen or t.whitespaces_after_count > 4))): self.rank -= 10 else: self.rank -= 2 continue return False blt = BookLinkToken.try_parse(t, 0) if (blt is not None): if (blt.typ == BookLinkTyp.MISC or blt.typ == BookLinkTyp.N or blt.typ == BookLinkTyp.PAGES): self.rank -= 10 elif (blt.typ == BookLinkTyp.N or blt.typ == BookLinkTyp.PAGERANGE): self.rank -= 20 if (t == self.begin_token and BookLinkToken.try_parse_author( t, FioTemplateType.UNDEFINED) is not None): self.rank -= 20 if (t.is_newline_before and t != self.begin_token): line_number += 1 if (line_number > 4): return False if (t.chars.is_all_lower): self.rank += 10 elif (t.previous.is_char('.')): self.rank -= 10 elif (t.previous.is_char_of(",-")): self.rank += 10 else: npt = NounPhraseHelper.try_parse(t.previous, NounPhraseParseAttr.NO, 0, None) if (npt is not None and npt.end_char >= t.end_char): self.rank += 10 if (t != self.begin_token and t.newlines_before_count > min_newlines_count): self.rank -= (t.newlines_before_count - min_newlines_count) bst = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (bst is not None and bst.is_quote_type and bst.end_token.end_char <= self.end_token.end_char): if (words == 0): tstart = bst.begin_token self.rank += 10 if (bst.end_token == self.end_token): tend = self.end_token self.rank += 10 rli = t.get_referents() if (rli is not None): for r in rli: if (isinstance(r, OrganizationReferent)): if (t.is_newline_before): self.rank -= 10 else: self.rank -= 4 continue if ((isinstance(r, GeoReferent)) or (isinstance(r, PersonReferent))): if (t.is_newline_before): self.rank -= 5 if (t.is_newline_after or t.next0_ is None): self.rank -= 20 elif (t.next0_.is_hiphen or (isinstance(t.next0_, NumberToken)) or (isinstance(t.next0_.get_referent(), DateReferent))): self.rank -= 20 elif (t != self.begin_token): self.rank -= 20 continue if ((isinstance(r, GeoReferent)) or (isinstance(r, DenominationReferent))): continue if ((isinstance(r, UriReferent)) or (isinstance(r, PhoneReferent))): return False if (t.is_newline_before): self.rank -= 4 else: self.rank -= 2 if (t == self.begin_token and (isinstance( self.end_token.get_referent(), PersonReferent))): self.rank -= 10 words += 1 if (t.chars.is_all_upper): up_words += 1 if (t == self.begin_token): if (t.is_newline_after): self.rank -= 10 elif (t.next0_ is not None and t.next0_.is_char('.') and t.next0_.is_newline_after): self.rank -= 10 continue if (isinstance(t, NumberToken)): if (t.typ == NumberSpellingType.WORDS): words += 1 if (t.chars.is_all_upper): up_words += 1 else: notwords += 1 continue pat = PersonAttrToken.try_attach( t, None, PersonAttrToken.PersonAttrAttachAttrs.NO) if (pat is not None): if (t.is_newline_before): if (not pat.morph.case_.is_undefined and not pat.morph.case_.is_nominative): pass elif (pat.chars.is_all_upper): pass else: self.rank -= 20 elif (t.chars.is_all_lower): self.rank -= 1 while t is not None: words += 1 if (t.chars.is_all_upper): up_words += 1 if (t == pat.end_token): break t = t.next0_ continue oitt = OrgItemTypeToken.try_attach(t, True, None) if (oitt is not None): if (oitt.morph.number != MorphNumber.PLURAL and not oitt.is_doubt_root_word): if (not oitt.morph.case_.is_undefined and not oitt.morph.case_.is_nominative): words += 1 if (t.chars.is_all_upper): up_words += 1 else: self.rank -= 4 if (t == self.begin_token): self.rank -= 5 else: words += 1 if (t.chars.is_all_upper): up_words += 1 t = oitt.end_token continue tt = Utils.asObjectOrNull(t, TextToken) if (tt is not None): if (tt.is_char('©')): self.rank -= 10 if (tt.is_char('_')): self.rank -= 1 if (tt.chars.is_letter): if (tt.length_char > 2): words += 1 if (t.chars.is_all_upper): up_words += 1 elif (not tt.is_char(',')): notwords += 1 if (tt.is_pure_verb): self.rank -= 30 words -= 1 break if (tt == self.end_token): if (tt.morph.class0_.is_preposition or tt.morph.class0_.is_conjunction): self.rank -= 10 elif (tt.is_char('.')): self.rank += 5 elif (tt.is_char_of("._")): self.rank -= 5 self.rank += words self.rank -= notwords if ((words < 1) and (self.rank < 50)): return False if (tstart is None or tend is None): return False if (tstart.end_char > tend.end_char): return False tit1 = TitleItemToken.try_attach(self.end_token.next0_) if (tit1 is not None and ((tit1.typ == TitleItemToken.Types.TYP or tit1.typ == TitleItemToken.Types.SPECIALITY))): if (tit1.end_token.is_newline_after): self.rank += 15 else: self.rank += 10 if (tit1.typ == TitleItemToken.Types.SPECIALITY): self.speciality = tit1.value if (up_words > 4 and up_words > (math.floor((0.8 * (words))))): if (tstart.previous is not None and (isinstance(tstart.previous.get_referent(), PersonReferent))): self.rank += (5 + up_words) self.begin_name_token = tstart self.end_name_token = tend return True
def try_attach_requisites(t : 'Token', cur : 'InstrumentParticipantReferent', other : 'InstrumentParticipantReferent', cant_be_empty : bool=False) -> 'ReferentToken': if (t is None or cur is None): return None if (t.is_table_control_char): return None err = 0 spec_chars = 0 rt = None t0 = t is_in_tab_cell = False cou = 0 tt = t.next0_ while tt is not None and (cou < 300): if (tt.is_table_control_char): is_in_tab_cell = True break tt = tt.next0_; cou += 1 first_pass3286 = True while True: if first_pass3286: first_pass3286 = False else: t = t.next0_ if (not (t is not None)): break if (t.begin_char == 8923): pass if (t.is_table_control_char): if (t != t0): if (rt is not None): rt.end_token = t.previous elif (not cant_be_empty): rt = ReferentToken(cur, t0, t.previous) break else: continue if ((t.is_char_of(":.") or t.is_value("М", None) or t.is_value("M", None)) or t.is_value("П", None)): if (rt is not None): rt.end_token = t continue pp = ParticipantToken.try_attach_to_exist(t, cur, other) if (pp is not None): if (pp.referent != cur): break if (rt is None): rt = ReferentToken(cur, t, t) rt.end_token = pp.end_token err = 0 continue if (t.is_newline_before): iii = InstrToken.parse(t, 0, None) if (iii is not None): if (iii.typ == ILTypes.APPENDIX): break if (t.whitespaces_before_count > 25 and not is_in_tab_cell): if (t != t0): if (t.previous is not None and t.previous.is_char_of(",;")): pass elif (t.newlines_before_count > 1): break if ((isinstance(t.get_referent(), PersonReferent)) or (isinstance(t.get_referent(), OrganizationReferent))): if (not cur._contains_ref(t.get_referent())): break if ((t.is_char_of(";:,.") or t.is_hiphen or t.morph.class0_.is_preposition) or t.morph.class0_.is_conjunction): continue if (t.is_char_of("_/\\")): spec_chars += 1 if (spec_chars > 10 and rt is None): rt = ReferentToken(cur, t0, t) if (rt is not None): rt.end_token = t continue if (t.is_newline_before and (isinstance(t, NumberToken))): break if (t.is_value("ОФИС", None)): if (BracketHelper.can_be_start_of_sequence(t.next0_, True, False)): br = BracketHelper.try_parse(t.next0_, BracketParseAttr.NO, 100) if (br is not None): t = br.end_token continue if ((isinstance(t.next0_, TextToken)) and not t.next0_.chars.is_all_lower): t = t.next0_ continue r = t.get_referent() if ((((isinstance(r, PersonReferent)) or (isinstance(r, AddressReferent)) or (isinstance(r, UriReferent))) or (isinstance(r, OrganizationReferent)) or (isinstance(r, PhoneReferent))) or (isinstance(r, PersonIdentityReferent)) or (isinstance(r, BankDataReferent))): if (other is not None and other.find_slot(None, r, True) is not None): if (not (isinstance(r, UriReferent))): break if (rt is None): rt = ReferentToken(cur, t, t) if (cur.find_slot(InstrumentParticipantReferent.ATTR_DELEGATE, r, True) is not None): pass else: cur.add_slot(InstrumentParticipantReferent.ATTR_REF, r, False, 0) rt.end_token = t err = 0 else: if ((isinstance(t, TextToken)) and t.length_char > 1): err += 1 if (is_in_tab_cell and rt is not None): if (err > 300): break elif (err > 4): break return rt
def _process(begin : 'Token', max_char_pos : int, kit : 'AnalysisKit', end_token : 'Token') -> 'TitlePageReferent': end_token.value = begin res = TitlePageReferent() term = None lines = Line.parse(begin, 30, 1500, max_char_pos) if (len(lines) < 1): return None cou = len(lines) min_newlines_count = 10 lines_count_stat = dict() i = 0 while i < len(lines): if (TitleNameToken.can_be_start_of_text_or_content(lines[i].begin_token, lines[i].end_token)): cou = i break j = lines[i].newlines_before_count if (i > 0 and j > 0): if (not j in lines_count_stat): lines_count_stat[j] = 1 else: lines_count_stat[j] += 1 i += 1 max0_ = 0 for kp in lines_count_stat.items(): if (kp[1] > max0_): max0_ = kp[1] min_newlines_count = kp[0] end_char = (lines[cou - 1].end_char if cou > 0 else 0) if (max_char_pos > 0 and end_char > max_char_pos): end_char = max_char_pos names = list() i = 0 while i < cou: if (i == 6): pass j = i while (j < cou) and (j < (i + 5)): if (i == 6 and j == 8): pass if (j > i): if (lines[j - 1].is_pure_en and lines[j].is_pure_ru): break if (lines[j - 1].is_pure_ru and lines[j].is_pure_en): break if (lines[j].newlines_before_count >= (min_newlines_count * 2)): break ttt = TitleNameToken.try_parse(lines[i].begin_token, lines[j].end_token, min_newlines_count) if (ttt is not None): if (lines[i].is_pure_en): ttt.morph.language = MorphLang.EN elif (lines[i].is_pure_ru): ttt.morph.language = MorphLang.RU names.append(ttt) j += 1 i += 1 TitleNameToken.sort(names) name_rt = None if (len(names) > 0): i0 = 0 if (names[i0].morph.language.is_en): ii = 1 while ii < len(names): if (names[ii].morph.language.is_ru and names[ii].rank > 0): i0 = ii break ii += 1 term = res._add_name(names[i0].begin_name_token, names[i0].end_name_token) if (names[i0].type_value is not None): res._add_type(names[i0].type_value) if (names[i0].speciality is not None): res.speciality = names[i0].speciality rt = ReferentToken(res, names[i0].begin_token, names[i0].end_token) if (kit is not None): kit.embed_token(rt) else: res.add_occurence(TextAnnotation(rt.begin_token, rt.end_token)) end_token.value = rt.end_token name_rt = rt if (begin.begin_char == rt.begin_char): begin = (rt) if (term is not None and kit is not None): t = kit.first_token first_pass3397 = True while True: if first_pass3397: first_pass3397 = False else: t = t.next0_ if (not (t is not None)): break tok = term.try_parse(t, TerminParseAttr.NO) if (tok is None): continue t0 = t t1 = tok.end_token if (t1.next0_ is not None and t1.next0_.is_char('.')): t1 = t1.next0_ if (BracketHelper.can_be_start_of_sequence(t0.previous, False, False) and BracketHelper.can_be_end_of_sequence(t1.next0_, False, None, False)): t0 = t0.previous t1 = t1.next0_ rt = ReferentToken(res, t0, t1) kit.embed_token(rt) t = (rt) pr = PersonRelations() pers_typ = TitleItemToken.Types.UNDEFINED pers_types = pr.rel_types t = begin first_pass3398 = True while True: if first_pass3398: first_pass3398 = False else: t = t.next0_ if (not (t is not None)): break if (max_char_pos > 0 and t.begin_char > max_char_pos): break if (t == name_rt): continue tpt = TitleItemToken.try_attach(t) if (tpt is not None): pers_typ = TitleItemToken.Types.UNDEFINED if (tpt.typ == TitleItemToken.Types.TYP): if (len(res.types) == 0): res._add_type(tpt.value) elif (len(res.types) == 1): ty = res.types[0].upper() if (ty == "РЕФЕРАТ"): res._add_type(tpt.value) elif (ty == "АВТОРЕФЕРАТ"): if (tpt.value == "КАНДИДАТСКАЯ ДИССЕРТАЦИЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат кандидатской диссертации", True, 0) elif (tpt.value == "ДОКТОРСКАЯ ДИССЕРТАЦИЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат докторской диссертации", True, 0) elif (tpt.value == "МАГИСТЕРСКАЯ ДИССЕРТАЦИЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат магистерской диссертации", True, 0) elif (tpt.value == "КАНДИДАТСЬКА ДИСЕРТАЦІЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат кандидатської дисертації", True, 0) elif (tpt.value == "ДОКТОРСЬКА ДИСЕРТАЦІЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат докторської дисертації", True, 0) elif (tpt.value == "МАГІСТЕРСЬКА ДИСЕРТАЦІЯ"): res.add_slot(TitlePageReferent.ATTR_TYPE, "автореферат магістерської дисертації", True, 0) else: res._add_type(tpt.value) elif (tpt.value == "РЕФЕРАТ" or tpt.value == "АВТОРЕФЕРАТ"): if (not tpt.value in ty): res._add_type(tpt.value) elif (tpt.typ == TitleItemToken.Types.SPECIALITY): if (res.speciality is None): res.speciality = tpt.value elif (tpt.typ in pers_types): pers_typ = tpt.typ t = tpt.end_token if (t.end_char > end_token.value.end_char): end_token.value = t if (t.next0_ is not None and t.next0_.is_char_of(":-")): t = t.next0_ continue if (t.end_char > end_char): break rli = t.get_referents() if (rli is None): continue if (not t.is_newline_before and (isinstance(t.previous, TextToken))): s = t.previous.term if (s == "ИМЕНИ" or s == "ИМ"): continue if (s == "." and t.previous.previous is not None and t.previous.previous.is_value("ИМ", None)): continue for r in rli: if (isinstance(r, PersonReferent)): if (r != rli[0]): continue p = Utils.asObjectOrNull(r, PersonReferent) if (pers_typ != TitleItemToken.Types.UNDEFINED): if (t.previous is not None and t.previous.is_char('.')): pers_typ = TitleItemToken.Types.UNDEFINED typ = pr.calc_typ_from_attrs(p) if (typ != TitleItemToken.Types.UNDEFINED): pr.add(p, typ, 1) pers_typ = typ elif (pers_typ != TitleItemToken.Types.UNDEFINED): pr.add(p, pers_typ, 1) elif (t.previous is not None and t.previous.is_char('©')): pers_typ = TitleItemToken.Types.WORKER pr.add(p, pers_typ, 1) else: tt = t.next0_ first_pass3399 = True while True: if first_pass3399: first_pass3399 = False else: tt = tt.next0_ if (not (tt is not None)): break rr = tt.get_referent() if (rr == res): pers_typ = TitleItemToken.Types.WORKER break if (isinstance(rr, PersonReferent)): if (pr.calc_typ_from_attrs(Utils.asObjectOrNull(r, PersonReferent)) != TitleItemToken.Types.UNDEFINED): break else: continue if (rr is not None): break tpt = TitleItemToken.try_attach(tt) if (tpt is not None): if (tpt.typ != TitleItemToken.Types.TYP and tpt.typ != TitleItemToken.Types.TYPANDTHEME): break tt = tpt.end_token if (tt.end_char > end_token.value.end_char): end_token.value = tt continue if (pers_typ == TitleItemToken.Types.UNDEFINED): tt = t.previous while tt is not None: rr = tt.get_referent() if (rr == res): pers_typ = TitleItemToken.Types.WORKER break if (rr is not None): break if ((tt.is_value("СТУДЕНТ", None) or tt.is_value("СТУДЕНТКА", None) or tt.is_value("СЛУШАТЕЛЬ", None)) or tt.is_value("ДИПЛОМНИК", None) or tt.is_value("ИСПОЛНИТЕЛЬ", None)): pers_typ = TitleItemToken.Types.WORKER break tpt = TitleItemToken.try_attach(tt) if (tpt is not None and tpt.typ != TitleItemToken.Types.TYP): break tt = tt.previous if (pers_typ != TitleItemToken.Types.UNDEFINED): pr.add(p, pers_typ, 1) else: pr.add(p, pers_typ, 0.5) if (t.end_char > end_token.value.end_char): end_token.value = t continue if (r == rli[0]): pers_typ = TitleItemToken.Types.UNDEFINED if (isinstance(r, DateReferent)): if (res.date is None): res.date = Utils.asObjectOrNull(r, DateReferent) if (t.end_char > end_token.value.end_char): end_token.value = t elif (isinstance(r, GeoReferent)): if (res.city is None and r.is_city): res.city = Utils.asObjectOrNull(r, GeoReferent) if (t.end_char > end_token.value.end_char): end_token.value = t if (isinstance(r, OrganizationReferent)): org0_ = Utils.asObjectOrNull(r, OrganizationReferent) if ("курс" in org0_.types and org0_.number is not None): i = 0 wrapi2673 = RefOutArgWrapper(0) inoutres2674 = Utils.tryParseInt(org0_.number, wrapi2673) i = wrapi2673.value if (inoutres2674): if (i > 0 and (i < 8)): res.student_year = i while org0_.higher is not None: if (org0_.kind != OrganizationKind.DEPARTMENT): break org0_ = org0_.higher if (org0_.kind != OrganizationKind.DEPARTMENT): if (res.org0_ is None): res.org0_ = org0_ elif (OrganizationReferent.can_be_higher(res.org0_, org0_)): res.org0_ = org0_ if (t.end_char > end_token.value.end_char): end_token.value = t if ((isinstance(r, UriReferent)) or (isinstance(r, GeoReferent))): if (t.end_char > end_token.value.end_char): end_token.value = t for ty in pers_types: for p in pr.get_persons(ty): if (pr.get_attr_name_for_type(ty) is not None): res.add_slot(pr.get_attr_name_for_type(ty), p, False, 0) if (res.get_slot_value(TitlePageReferent.ATTR_AUTHOR) is None): for p in pr.get_persons(TitleItemToken.Types.UNDEFINED): res.add_slot(TitlePageReferent.ATTR_AUTHOR, p, False, 0) break if (res.city is None and res.org0_ is not None): s = res.org0_.find_slot(OrganizationReferent.ATTR_GEO, None, True) if (s is not None and (isinstance(s.value, GeoReferent))): if (s.value.is_city): res.city = Utils.asObjectOrNull(s.value, GeoReferent) if (res.date is None): t = begin first_pass3400 = True while True: if first_pass3400: first_pass3400 = False else: t = t.next0_ if (not (t is not None and t.end_char <= end_char)): break city = Utils.asObjectOrNull(t.get_referent(), GeoReferent) if (city is None): continue if (isinstance(t.next0_, TextToken)): if (t.next0_.is_char_of(":,") or t.next0_.is_hiphen): t = t.next0_ rt = t.kit.process_referent(DateAnalyzer.ANALYZER_NAME, t.next0_) if (rt is not None): rt.save_to_local_ontology() res.date = Utils.asObjectOrNull(rt.referent, DateReferent) if (kit is not None): kit.embed_token(rt) break if (len(res.slots) == 0): return None else: return res
def create_nickname(pr : 'PersonReferent', t : 'Token') -> 'Token': has_keyw = False is_br = False first_pass3367 = True while True: if first_pass3367: first_pass3367 = False else: t = t.next0_ if (not (t is not None)): break if (t.is_hiphen or t.is_comma or t.is_char_of(".:;")): continue if (t.morph.class0_.is_preposition): continue if (t.is_char('(')): is_br = True continue if ((t.is_value("ПРОЗВИЩЕ", "ПРІЗВИСЬКО") or t.is_value("КЛИЧКА", None) or t.is_value("ПСЕВДОНИМ", "ПСЕВДОНІМ")) or t.is_value("ПСЕВДО", None) or t.is_value("ПОЗЫВНОЙ", "ПОЗИВНИЙ")): has_keyw = True continue break if (not has_keyw or t is None): return None if (BracketHelper.is_bracket(t, True)): br = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (br is not None): ni = MiscHelper.get_text_value(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO) if (ni is not None): pr.add_slot(PersonReferent.ATTR_NICKNAME, ni, False, 0) t = br.end_token tt = t.next0_ first_pass3368 = True while True: if first_pass3368: first_pass3368 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_comma_and): continue if (not BracketHelper.is_bracket(tt, True)): break br = BracketHelper.try_parse(tt, BracketParseAttr.NO, 100) if (br is None): break ni = MiscHelper.get_text_value(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO) if (ni is not None): pr.add_slot(PersonReferent.ATTR_NICKNAME, ni, False, 0) tt = br.end_token t = tt if (is_br and t.next0_ is not None and t.next0_.is_char(')')): t = t.next0_ return t else: ret = None first_pass3369 = True while True: if first_pass3369: first_pass3369 = False else: t = t.next0_ if (not (t is not None)): break if (t.is_comma_and): continue if (ret is not None and t.chars.is_all_lower): break if (t.whitespaces_before_count > 2): break pli = PersonItemToken.try_attach_list(t, None, PersonItemToken.ParseAttr.NO, 10) if (pli is not None and ((len(pli) == 1 or len(pli) == 2))): ni = MiscHelper.get_text_value(pli[0].begin_token, pli[len(pli) - 1].end_token, GetTextAttr.NO) if (ni is not None): pr.add_slot(PersonReferent.ATTR_NICKNAME, ni, False, 0) t = pli[len(pli) - 1].end_token if (is_br and t.next0_ is not None and t.next0_.is_char(')')): t = t.next0_ ret = t continue if ((isinstance(t, ReferentToken)) and not t.chars.is_all_lower and t.begin_token == t.end_token): val = MiscHelper.get_text_value_of_meta_token(Utils.asObjectOrNull(t, ReferentToken), GetTextAttr.NO) pr.add_slot(PersonReferent.ATTR_NICKNAME, val, False, 0) if (is_br and t.next0_ is not None and t.next0_.is_char(')')): t = t.next0_ ret = t continue break return ret return None
def __tryParse(t: 'Token', is_in_lit: bool, max_char: int = 0) -> typing.List['ReferentToken']: if (t is None): return None is_bracket_regime = False if (t.previous is not None and t.previous.isChar('(')): is_bracket_regime = True blt = BookLinkToken.tryParse(t, 0) if (blt is None): blt = BookLinkToken.tryParseAuthor(t, FioTemplateType.UNDEFINED) if (blt is None and not is_bracket_regime): return None t0 = t coef = 0 is_electr_res = False decree = None regtyp = BookLinkAnalyzer.RegionTyp.UNDEFINED num = None spec_see = None book_prev = None if (is_bracket_regime): regtyp = BookLinkAnalyzer.RegionTyp.AUTHORS elif (blt.typ == BookLinkTyp.PERSON): if (not is_in_lit): return None regtyp = BookLinkAnalyzer.RegionTyp.AUTHORS elif (blt.typ == BookLinkTyp.NUMBER): num = blt.value t = blt.end_token.next0_ if (t is None or t.is_newline_before): return None if (not t.is_whitespace_before): if (isinstance(t, NumberToken)): n = (t).value if ((((n == "3" or n == "0")) and not t.is_whitespace_after and (isinstance(t.next0_, TextToken))) and t.next0_.chars.is_all_lower): pass else: return None elif (not ((isinstance(t, TextToken))) or t.chars.is_all_lower): r = t.getReferent() if (isinstance(r, PersonReferent)): pass elif (is_in_lit and r is not None and r.type_name == "DECREE"): pass else: return None first_pass2757 = True while True: if first_pass2757: first_pass2757 = False else: t = t.next0_ if (not (t is not None)): break if (isinstance(t, NumberToken)): break if (not ((isinstance(t, TextToken)))): break if (BracketHelper.canBeStartOfSequence(t, True, False)): break if (not t.chars.is_letter): continue bbb = BookLinkToken.tryParse(t, 0) if (bbb is not None): if (bbb.typ == BookLinkTyp.TAMZE): spec_see = bbb t = bbb.end_token.next0_ break if (bbb.typ == BookLinkTyp.SEE): t = bbb.end_token continue break if (spec_see is not None and spec_see.typ == BookLinkTyp.TAMZE): coef += 1 max0_ = 1000 tt = t0 while tt is not None and max0_ > 0: if (isinstance(tt.getReferent(), BookLinkRefReferent)): book_prev = (tt.getReferent()).book break tt = tt.previous max0_ -= 1 blt1 = BookLinkToken.tryParseAuthor(t, FioTemplateType.UNDEFINED) if (blt1 is not None and blt1.typ == BookLinkTyp.PERSON): regtyp = BookLinkAnalyzer.RegionTyp.AUTHORS else: ok = False tt = t first_pass2758 = True while True: if first_pass2758: first_pass2758 = False else: tt = (None if tt is None else tt.next0_) if (not (tt is not None)): break if (tt.is_newline_before): break if (is_in_lit and tt.getReferent() is not None and tt.getReferent().type_name == "DECREE"): ok = True decree = tt break bbb = BookLinkToken.tryParse(tt, 0) if (bbb is None): continue if (bbb.typ == BookLinkTyp.ELECTRONRES): is_electr_res = True ok = True break if (bbb.typ == BookLinkTyp.DELIMETER): tt = bbb.end_token.next0_ if (BookLinkToken.tryParseAuthor( tt, FioTemplateType.UNDEFINED) is not None): ok = True break bbb = BookLinkToken.tryParse(tt, 0) if (bbb is not None): if (bbb.typ == BookLinkTyp.EDITORS or bbb.typ == BookLinkTyp.TRANSLATE or bbb.typ == BookLinkTyp.SOSTAVITEL): ok = True break if (not ok and not is_in_lit): if (BookLinkToken.checkLinkBefore(t0, num)): pass else: return None regtyp = BookLinkAnalyzer.RegionTyp.NAME else: return None res = BookLinkReferent() corr_authors = list() t00 = t blt00 = None start_of_name = None prev_pers_templ = FioTemplateType.UNDEFINED if (regtyp == BookLinkAnalyzer.RegionTyp.AUTHORS): first_pass2759 = True while True: if first_pass2759: first_pass2759 = False else: t = t.next0_ if (not (t is not None)): break if (max_char > 0 and t.begin_char >= max_char): break if (t.isCharOf(".;") or t.is_comma_and): continue if (t.isChar('/')): break if ((t.isChar('(') and t.next0_ is not None and t.next0_.isValue("EDS", None)) and t.next0_.next0_ is not None and t.next0_.next0_.isChar(')')): t = t.next0_.next0_.next0_ break blt = BookLinkToken.tryParseAuthor(t, prev_pers_templ) if (blt is None and t.previous is not None and t.previous.is_and): blt = BookLinkToken.tryParseAuthor( t.previous, FioTemplateType.UNDEFINED) if (blt is None): if ((isinstance(t.getReferent(), OrganizationReferent)) and blt00 is not None): bbb2 = BookLinkToken.tryParse(t.next0_, 0) if (bbb2 is not None): if (bbb2.typ == BookLinkTyp.YEAR): res.addSlot(BookLinkReferent.ATTR_AUTHOR, t.getReferent(), False, 0) res.year = int(bbb2.value) coef += .5 t = bbb2.end_token.next0_ break if (blt.typ == BookLinkTyp.PERSON): tt2 = blt.end_token.next0_ bbb2 = BookLinkToken.tryParse(tt2, 0) if (bbb2 is not None): if (bbb2.typ == BookLinkTyp.YEAR): res.year = int(bbb2.value) coef += .5 blt.end_token = bbb2.end_token blt00 = (None) if (blt00 is not None and ((blt00.end_token.next0_ == blt.begin_token or blt.begin_token.previous.isChar('.')))): tt11 = blt.end_token.next0_ nex = BookLinkToken.tryParse(tt11, 0) if (nex is not None and nex.typ == BookLinkTyp.ANDOTHERS): pass else: if (tt11 is None): break if (tt11.isChar('/') and tt11.next0_ is not None and tt11.next0_.isChar('/')): break if (tt11.isChar(':')): break if ((str(blt).find('.') < 0) and str(blt00).find('.') > 0): break if ((isinstance(tt11, TextToken)) and tt11.chars.is_all_lower): break if (tt11.isCharOf(",.;") and tt11.next0_ is not None): tt11 = tt11.next0_ nex = BookLinkToken.tryParse(tt11, 0) if (nex is not None and nex.typ != BookLinkTyp.PERSON and nex.typ != BookLinkTyp.ANDOTHERS): break elif ( (blt00 is not None and blt00.person_template != FioTemplateType.UNDEFINED and blt.person_template != blt00.person_template) and blt.person_template == FioTemplateType.NAMESURNAME): if (blt.end_token.next0_ is None or not blt.end_token.next0_.is_comma_and): break if (BookLinkToken.tryParseAuthor( blt.end_token.next0_.next0_, FioTemplateType.UNDEFINED) is not None): pass else: break if (blt00 is None and blt.person_template == FioTemplateType.NAMESURNAME): tt = blt.end_token.next0_ if (tt is not None and tt.is_hiphen): tt = tt.next0_ if (isinstance(tt, NumberToken)): break BookLinkAnalyzer.__addAuthor(res, blt) coef += 1 t = blt.end_token if (isinstance(t.getReferent(), PersonReferent)): corr_authors.append( Utils.asObjectOrNull(t, ReferentToken)) blt00 = blt prev_pers_templ = blt.person_template start_of_name = blt.start_of_name if ((start_of_name) is not None): t = t.next0_ break continue if (blt.typ == BookLinkTyp.ANDOTHERS): coef += .5 t = blt.end_token.next0_ res.authors_and_other = True break break if (t is None): return None if ((t.is_newline_before and t != t0 and num is None) and res.findSlot( BookLinkReferent.ATTR_AUTHOR, None, True) is None): return None if (start_of_name is None): if (t.chars.is_all_lower): coef -= (1) if (t.chars.is_latin_letter and not is_electr_res and num is None): if (res.getSlotValue(BookLinkReferent.ATTR_AUTHOR) is None): return None tn0 = t tn1 = None uri = None next_num = None wrapnn393 = RefOutArgWrapper(0) inoutres394 = Utils.tryParseInt(Utils.ifNotNull(num, ""), wrapnn393) nn = wrapnn393.value if (inoutres394): next_num = str((nn + 1)) br = (BracketHelper.tryParse( t, Utils.valToEnum( (BracketParseAttr.CANCONTAINSVERBS) | (BracketParseAttr.CANBEMANYLINES), BracketParseAttr), 100) if BracketHelper.canBeStartOfSequence(t, True, False) else None) if (br is not None): t = t.next0_ pages = None first_pass2760 = True while True: if first_pass2760: first_pass2760 = False else: t = t.next0_ if (not (t is not None)): break if (max_char > 0 and t.begin_char >= max_char): break if (br is not None and br.end_token == t): tn1 = t break tit = TitleItemToken.tryAttach(t) if (tit is not None): if ((tit.typ == TitleItemToken.Types.TYP and tn0 == t and br is None) and BracketHelper.canBeStartOfSequence( tit.end_token.next0_, True, False)): br = BracketHelper.tryParse(tit.end_token.next0_, BracketParseAttr.NO, 100) if (br is not None): coef += (1) if (num is not None): coef += 1 tn0 = br.begin_token tn1 = br.end_token res.typ = tit.value.lower() t = br.end_token.next0_ break if (t.is_newline_before and t != tn0): if (br is not None and (t.end_char < br.end_char)): pass elif (not MiscHelper.canBeStartOfSentence(t)): pass else: if (t.newlines_before_count > 1): break if ((isinstance(t, NumberToken)) and num is not None and (t).int_value is not None): if (num == str(((t).int_value - 1))): break elif (num is not None): pass else: nnn = NounPhraseHelper.tryParse( t.previous, Utils.valToEnum( ((NounPhraseParseAttr.PARSEPREPOSITION) | (NounPhraseParseAttr.PARSEADVERBS) | (NounPhraseParseAttr.PARSENUMERICASADJECTIVE)) | (NounPhraseParseAttr.MULTILINES), NounPhraseParseAttr), 0) if (nnn is not None and nnn.end_char >= t.end_char): pass else: break if (t.isCharOf(".;") and t.whitespaces_after_count > 0): tit = TitleItemToken.tryAttach(t.next0_) if ((tit) is not None): if (tit.typ == TitleItemToken.Types.TYP): break stop = True words = 0 notwords = 0 tt = t.next0_ first_pass2761 = True while True: if first_pass2761: first_pass2761 = False else: tt = tt.next0_ if (not (tt is not None)): break blt0 = BookLinkToken.tryParse(tt, 0) if (blt0 is None): if (tt.is_newline_before): break if ((isinstance(tt, TextToken)) and not tt.getMorphClassInDictionary().is_undefined ): words += 1 else: notwords += 1 if (words > 6 and words > (notwords * 4)): stop = False break continue if ((blt0.typ == BookLinkTyp.DELIMETER or blt0.typ == BookLinkTyp.TRANSLATE or blt0.typ == BookLinkTyp.TYPE) or blt0.typ == BookLinkTyp.GEO or blt0.typ == BookLinkTyp.PRESS): stop = False break if (br is not None and br.end_token.previous.end_char > t.end_char): stop = False if (stop): break if (t == decree): t = t.next0_ break blt = BookLinkToken.tryParse(t, 0) if (blt is None): tn1 = t continue if (blt.typ == BookLinkTyp.DELIMETER): break if (((blt.typ == BookLinkTyp.MISC or blt.typ == BookLinkTyp.TRANSLATE or blt.typ == BookLinkTyp.NAMETAIL) or blt.typ == BookLinkTyp.TYPE or blt.typ == BookLinkTyp.VOLUME) or blt.typ == BookLinkTyp.PAGERANGE or blt.typ == BookLinkTyp.PAGES): coef += 1 break if (blt.typ == BookLinkTyp.GEO or blt.typ == BookLinkTyp.PRESS): if (t.previous.is_hiphen or t.previous.isCharOf(".;") or blt.add_coef > 0): break if (blt.typ == BookLinkTyp.YEAR): if (t.previous is not None and t.previous.is_comma): break if (blt.typ == BookLinkTyp.ELECTRONRES): is_electr_res = True break if (blt.typ == BookLinkTyp.URL): if (t == tn0 or t.previous.isCharOf(":.")): is_electr_res = True break tn1 = t if (tn1 is None and start_of_name is None): if (is_electr_res): uri_re = BookLinkReferent() rt0 = ReferentToken(uri_re, t00, t) rts0 = list() bref0 = BookLinkRefReferent._new389(uri_re) if (num is not None): bref0.number = num rt01 = ReferentToken(bref0, t0, rt0.end_token) ok = False while t is not None: if (t.is_newline_before): break blt0 = BookLinkToken.tryParse(t, 0) if (blt0 is not None): if (isinstance(blt0.ref, UriReferent)): uri_re.addSlot( BookLinkReferent.ATTR_URL, Utils.asObjectOrNull(blt0.ref, UriReferent), False, 0) ok = True t = blt0.end_token rt0.end_token = rt01.end_token = t t = t.next0_ if (ok): rts0.append(rt01) rts0.append(rt0) return rts0 if (decree is not None and num is not None): rts0 = list() bref0 = BookLinkRefReferent._new389(decree.getReferent()) if (num is not None): bref0.number = num rt01 = ReferentToken(bref0, t0, decree) t = decree.next0_ while t is not None: if (t.is_newline_before): break if (isinstance(t, TextToken)): if ((t).is_pure_verb): return None rt01.end_token = t t = t.next0_ rts0.append(rt01) return rts0 if (book_prev is not None): tt = t while tt is not None and ((tt.isCharOf(",.") or tt.is_hiphen)): tt = tt.next0_ blt0 = BookLinkToken.tryParse(tt, 0) if (blt0 is not None and blt0.typ == BookLinkTyp.PAGERANGE): rts0 = list() bref0 = BookLinkRefReferent._new389(book_prev) if (num is not None): bref0.number = num bref0.pages = blt0.value rt00 = ReferentToken(bref0, t0, blt0.end_token) rts0.append(rt00) return rts0 return None if (br is not None and ((tn1 == br.end_token or tn1 == br.end_token.previous))): tn0 = tn0.next0_ tn1 = tn1.previous if (start_of_name is None): while tn0 is not None: if (tn0.isCharOf(":,~")): tn0 = tn0.next0_ else: break while tn1 is not None and tn1.begin_char > tn0.begin_char: if (tn1.isCharOf(".;,:(~") or tn1.is_hiphen or tn1.isValue("РЕД", None)): pass else: break tn1 = tn1.previous nam = MiscHelper.getTextValue( tn0, tn1, Utils.valToEnum( (GetTextAttr.KEEPQUOTES) | (GetTextAttr.KEEPREGISTER), GetTextAttr)) if (start_of_name is not None): if (nam is None or (len(nam) < 3)): nam = start_of_name else: nam = "{0}{1}{2}".format( start_of_name, (" " if tn0.is_whitespace_before else ""), nam) if (nam is None): return None res.name = nam if (num is None and not is_in_lit): if (len(nam) < 20): return None coef -= (2) if (len(nam) > 500): coef -= (math.floor(len(nam) / 500)) if (is_bracket_regime): coef -= 1 if (len(nam) > 200): if (num is None): return None if (res.findSlot(BookLinkReferent.ATTR_AUTHOR, None, True) is None and not BookLinkToken.checkLinkBefore(t0, num)): return None en = 0 ru = 0 ua = 0 cha = 0 nocha = 0 chalen = 0 lt0 = tn0 lt1 = tn1 if (tn1 is None): if (t is None): return None lt0 = t0 lt1 = t tn1 = t.previous tt = lt0 while tt is not None and tt.end_char <= lt1.end_char: if ((isinstance(tt, TextToken)) and tt.chars.is_letter): if (tt.chars.is_latin_letter): en += 1 elif (tt.morph.language.is_ua): ua += 1 elif (tt.morph.language.is_ru): ru += 1 if (tt.length_char > 2): cha += 1 chalen += tt.length_char elif (not ((isinstance(tt, ReferentToken)))): nocha += 1 tt = tt.next0_ if (ru > (ua + en)): res.lang = "RU" elif (ua > (ru + en)): res.lang = "UA" elif (en > (ru + ua)): res.lang = "EN" if (nocha > 3 and nocha > cha and start_of_name is None): if (nocha > (math.floor(chalen / 3))): coef -= (2) if (res.lang == "EN"): tt = tn0.next0_ first_pass2762 = True while True: if first_pass2762: first_pass2762 = False else: tt = tt.next0_ if (not (tt is not None and (tt.end_char < tn1.end_char))): break if (tt.is_comma and tt.next0_ is not None and ((not tt.next0_.chars.is_all_lower or (isinstance(tt.next0_, ReferentToken))))): if (tt.next0_.next0_ is not None and tt.next0_.next0_.is_comma_and): if (isinstance(tt.next0_, ReferentToken)): pass else: continue nam = MiscHelper.getTextValue( tn0, tt.previous, Utils.valToEnum((GetTextAttr.KEEPQUOTES) | (GetTextAttr.KEEPREGISTER), GetTextAttr)) if (nam is not None and len(nam) > 15): res.name = nam break rt = ReferentToken(res, t00, tn1) authors = True edits = False br = (None) first_pass2763 = True while True: if first_pass2763: first_pass2763 = False else: t = t.next0_ if (not (t is not None)): break if (max_char > 0 and t.begin_char >= max_char): break if (BracketHelper.canBeStartOfSequence(t, False, False)): br = BracketHelper.tryParse(t, BracketParseAttr.CANBEMANYLINES, 100) if (br is not None and br.length_char > 300): br = (None) blt = BookLinkToken.tryParse(t, 0) if (t.is_newline_before and not t.isChar('/') and not t.previous.isChar('/')): if (blt is not None and blt.typ == BookLinkTyp.NUMBER): break if (t.previous.isCharOf(":")): pass elif (blt is not None and (( ((blt.typ == BookLinkTyp.DELIMETER or blt.typ == BookLinkTyp.PAGERANGE or blt.typ == BookLinkTyp.PAGES) or blt.typ == BookLinkTyp.GEO or blt.typ == BookLinkTyp.PRESS) or blt.typ == BookLinkTyp.N))): pass elif (num is not None and BookLinkToken.tryParseAuthor( t, FioTemplateType.UNDEFINED) is not None): pass elif (num is not None and blt is not None and blt.typ != BookLinkTyp.NUMBER): pass elif (br is not None and (t.end_char < br.end_char) and t.begin_char > br.begin_char): pass else: ok = False mmm = 50 tt = t.next0_ while tt is not None and mmm > 0: if (tt.is_newline_before): blt2 = BookLinkToken.tryParse(tt, 0) if (blt2 is not None and blt2.typ == BookLinkTyp.NUMBER and blt2.value == next_num): ok = True break if (blt2 is not None): if (blt2.typ == BookLinkTyp.PAGES or blt2.typ == BookLinkTyp.GEO or blt2.typ == BookLinkTyp.PRESS): ok = True break tt = tt.next0_ mmm -= 1 if (not ok): npt = NounPhraseHelper.tryParse( t.previous, Utils.valToEnum( ((NounPhraseParseAttr.MULTILINES) | (NounPhraseParseAttr.PARSEADVERBS) | (NounPhraseParseAttr.PARSEPREPOSITION)) | (NounPhraseParseAttr.PARSEVERBS) | (NounPhraseParseAttr.PARSEPRONOUNS), NounPhraseParseAttr), 0) if (npt is not None and npt.end_char >= t.end_char): ok = True if (not ok): break rt.end_token = t if (blt is not None): rt.end_token = blt.end_token if (t.isCharOf(".,") or t.is_hiphen): continue if (t.isValue("С", None)): pass if (regtyp == BookLinkAnalyzer.RegionTyp.FIRST and blt is not None and blt.typ == BookLinkTyp.EDITORS): edits = True t = blt.end_token coef += 1 continue if (regtyp == BookLinkAnalyzer.RegionTyp.FIRST and blt is not None and blt.typ == BookLinkTyp.SOSTAVITEL): edits = False t = blt.end_token coef += 1 continue if (regtyp == BookLinkAnalyzer.RegionTyp.FIRST and authors): blt2 = BookLinkToken.tryParseAuthor(t, prev_pers_templ) if (blt2 is not None and blt2.typ == BookLinkTyp.PERSON): prev_pers_templ = blt2.person_template if (not edits): BookLinkAnalyzer.__addAuthor(res, blt2) coef += 1 t = blt2.end_token continue if (blt2 is not None and blt2.typ == BookLinkTyp.ANDOTHERS): if (not edits): res.authors_and_other = True coef += 1 t = blt2.end_token continue authors = False if (blt is None): continue if (blt.typ == BookLinkTyp.ELECTRONRES or blt.typ == BookLinkTyp.URL): is_electr_res = True if (blt.typ == BookLinkTyp.ELECTRONRES): coef += 1.5 else: coef += .5 if (isinstance(blt.ref, UriReferent)): res.addSlot(BookLinkReferent.ATTR_URL, Utils.asObjectOrNull(blt.ref, UriReferent), False, 0) elif (blt.typ == BookLinkTyp.YEAR): if (res.year == 0): res.year = int(blt.value) coef += .5 elif (blt.typ == BookLinkTyp.DELIMETER): coef += 1 if (blt.length_char == 2): regtyp = BookLinkAnalyzer.RegionTyp.SECOND else: regtyp = BookLinkAnalyzer.RegionTyp.FIRST elif ( (((blt.typ == BookLinkTyp.MISC or blt.typ == BookLinkTyp.TYPE or blt.typ == BookLinkTyp.PAGES) or blt.typ == BookLinkTyp.NAMETAIL or blt.typ == BookLinkTyp.TRANSLATE) or blt.typ == BookLinkTyp.PRESS or blt.typ == BookLinkTyp.VOLUME) or blt.typ == BookLinkTyp.N): coef += 1 elif (blt.typ == BookLinkTyp.PAGERANGE): pages = blt coef += 1 if (is_bracket_regime and blt.end_token.next0_ is not None and blt.end_token.next0_.isChar(')')): coef += (2) if (res.name is not None and res.findSlot(BookLinkReferent.ATTR_AUTHOR, None, True) is not None): coef = (10) elif (blt.typ == BookLinkTyp.GEO and ((regtyp == BookLinkAnalyzer.RegionTyp.SECOND or regtyp == BookLinkAnalyzer.RegionTyp.FIRST))): coef += 1 elif (blt.typ == BookLinkTyp.GEO and t.previous is not None and t.previous.isChar('.')): coef += 1 elif (blt.typ == BookLinkTyp.ANDOTHERS): coef += 1 if (authors): res.authors_and_other = True coef += blt.add_coef t = blt.end_token if ((coef < 2.5) and num is not None): if (BookLinkToken.checkLinkBefore(t0, num)): coef += (2) elif (BookLinkToken.checkLinkAfter(rt.end_token, num)): coef += (1) if (rt.length_char > 500): return None if (is_in_lit): coef += 1 if (coef < 2.5): if (is_electr_res and uri is not None): pass elif (coef >= 2 and is_in_lit): pass else: return None for rr in corr_authors: pits0 = PersonItemToken.tryAttachList( rr.begin_token, None, PersonItemToken.ParseAttr.CANINITIALBEDIGIT, 10) if (pits0 is None or (len(pits0) < 2)): continue if (pits0[0].typ == PersonItemToken.ItemType.VALUE): exi = False for i in range(len(rr.referent.slots) - 1, -1, -1): s = rr.referent.slots[i] if (s.type_name == PersonReferent.ATTR_LASTNAME): ln = Utils.asObjectOrNull(s.value, str) if (ln is None): continue if (ln == pits0[0].value): exi = True continue if (ln.find('-') > 0): ln = ln[0:0 + ln.find('-')] if (pits0[0].begin_token.isValue(ln, None)): del rr.referent.slots[i] if (not exi): rr.referent.addSlot(PersonReferent.ATTR_LASTNAME, pits0[0].value, False, 0) rts = list() bref = BookLinkRefReferent._new389(res) if (num is not None): bref.number = num rt1 = ReferentToken(bref, t0, rt.end_token) if (pages is not None): if (pages.value is not None): bref.pages = pages.value rt.end_token = pages.begin_token.previous rts.append(rt1) rts.append(rt) return rts
def process(self, kit: 'AnalysisKit') -> None: ad = kit.getAnalyzerData(self) is_lit_block = 0 refs_by_num = dict() rts = [] t = kit.first_token first_pass2754 = True while True: if first_pass2754: first_pass2754 = False else: t = t.next0_ if (not (t is not None)): break if (t.isChar('(')): br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100) if (br is not None and br.length_char > 70 and (br.length_char < 400)): if (br.is_newline_after or ((br.end_token.next0_ is not None and br.end_token.next0_.isCharOf(".;")))): rts = BookLinkAnalyzer.__tryParse( t.next0_, False, br.end_char) if (rts is not None and len(rts) >= 1): if (len(rts) > 1): rts[1].referent = ad.registerReferent( rts[1].referent) kit.embedToken(rts[1]) (rts[0].referent).book = Utils.asObjectOrNull( rts[1].referent, BookLinkReferent) if (rts[0].begin_char == rts[1].begin_char): rts[0].begin_token = rts[1] if (rts[0].end_char == rts[1].end_char): rts[0].end_token = rts[1] rts[0].begin_token = t rts[0].end_token = br.end_token (rts[0].referent).typ = BookLinkRefType.INLINE rts[0].referent = ad.registerReferent( rts[0].referent) kit.embedToken(rts[0]) t = (rts[0]) continue if (not t.is_newline_before): continue if (is_lit_block <= 0): tt = BookLinkToken.parseStartOfLitBlock(t) if (tt is not None): is_lit_block = 5 t = tt continue rts = BookLinkAnalyzer.__tryParse(t, is_lit_block > 0, 0) if (rts is None or (len(rts) < 1)): is_lit_block -= 1 if ((is_lit_block) < 0): is_lit_block = 0 continue is_lit_block += 1 if ((is_lit_block) > 5): is_lit_block = 5 if (len(rts) > 1): rts[1].referent = ad.registerReferent(rts[1].referent) kit.embedToken(rts[1]) (rts[0].referent).book = Utils.asObjectOrNull( rts[1].referent, BookLinkReferent) if (rts[0].begin_char == rts[1].begin_char): rts[0].begin_token = rts[1] if (rts[0].end_char == rts[1].end_char): rts[0].end_token = rts[1] re = Utils.asObjectOrNull(rts[0].referent, BookLinkRefReferent) re = (Utils.asObjectOrNull(ad.registerReferent(re), BookLinkRefReferent)) rts[0].referent = (re) kit.embedToken(rts[0]) t = (rts[0]) if (re.number is not None): li = [] wrapli385 = RefOutArgWrapper(None) inoutres386 = Utils.tryGetValue(refs_by_num, re.number, wrapli385) li = wrapli385.value if (not inoutres386): li = list() refs_by_num[re.number] = li li.append(re) t = kit.first_token first_pass2755 = True while True: if first_pass2755: first_pass2755 = False else: t = t.next0_ if (not (t is not None)): break if (not ((isinstance(t, TextToken)))): continue rt = BookLinkAnalyzer.__tryParseShortInline(t) if (rt is None): continue re = Utils.asObjectOrNull(rt.referent, BookLinkRefReferent) li = [] wrapli387 = RefOutArgWrapper(None) inoutres388 = Utils.tryGetValue(refs_by_num, Utils.ifNotNull(re.number, ""), wrapli387) li = wrapli387.value if (not inoutres388): continue i = 0 while i < len(li): if (t.begin_char < li[i].occurrence[0].begin_char): break i += 1 if (i >= len(li)): continue re.book = li[i].book if (re.pages is None): re.pages = li[i].pages re.typ = BookLinkRefType.INLINE re = (Utils.asObjectOrNull(ad.registerReferent(re), BookLinkRefReferent)) rt.referent = (re) kit.embedToken(rt) t = (rt)
def try_attach(self, t : 'Token', for_ontology : bool=False) -> 'ReferentToken': if (t is None): return None rt0 = self.__try_attach_spec(t) if (rt0 is not None): return rt0 if (t.chars.is_all_lower): if (not t.is_whitespace_after and (isinstance(t.next0_, NumberToken))): if (t.previous is None or t.is_whitespace_before or t.previous.is_char_of(",:")): pass else: return None else: return None tmp = io.StringIO() t1 = t hiph = False ok = True nums = 0 chars = 0 w = t1.next0_ first_pass3148 = True while True: if first_pass3148: first_pass3148 = False else: w = w.next0_ if (not (w is not None)): break if (w.is_whitespace_before and not for_ontology): break if (w.is_char_of("/\\_") or w.is_hiphen): hiph = True print('-', end="", file=tmp) continue hiph = False nt = Utils.asObjectOrNull(w, NumberToken) if (nt is not None): if (nt.typ != NumberSpellingType.DIGIT): break t1 = (nt) print(nt.get_source_text(), end="", file=tmp) nums += 1 continue tt = Utils.asObjectOrNull(w, TextToken) if (tt is None): break if (tt.length_char > 3): ok = False break if (not str.isalpha(tt.term[0])): if (tt.is_char_of(",:") or BracketHelper.can_be_end_of_sequence(tt, False, None, False)): break if (not tt.is_char_of("+*&^#@!")): ok = False break chars += 1 t1 = (tt) print(tt.get_source_text(), end="", file=tmp) if (not for_ontology): if ((tmp.tell() < 1) or not ok or hiph): return None if (tmp.tell() > 12): return None last = Utils.getCharAtStringIO(tmp, tmp.tell() - 1) if (last == '!'): return None if ((nums + chars) == 0): return None if (not self.__check_attach(t, t1)): return None new_dr = DenominationReferent() new_dr._add_value(t, t1) return ReferentToken(new_dr, t, t1)
def __try_attach(t: 'Token', prev: typing.List['DateItemToken'], detail_regime: bool) -> 'DateItemToken': from pullenti.ner.measure.internal.MeasureToken import MeasureToken if (t is None): return None nt = Utils.asObjectOrNull(t, NumberToken) begin = t end = t is_in_brack = False if ((BracketHelper.can_be_start_of_sequence(t, False, False) and t.next0_ is not None and (isinstance(t.next0_, NumberToken))) and BracketHelper.can_be_end_of_sequence( t.next0_.next0_, False, None, False)): nt = (Utils.asObjectOrNull(t.next0_, NumberToken)) end = t.next0_.next0_ is_in_brack = True if ((t.is_newline_before and BracketHelper.is_bracket(t, False) and (isinstance(t.next0_, NumberToken))) and BracketHelper.is_bracket(t.next0_.next0_, False)): nt = (Utils.asObjectOrNull(t.next0_, NumberToken)) end = t.next0_.next0_ is_in_brack = True if (nt is not None): if (nt.int_value is None): return None if (nt.typ == NumberSpellingType.WORDS): if (nt.morph.class0_.is_noun and not nt.morph.class0_.is_adjective): if (t.next0_ is not None and ((t.next0_.is_value("КВАРТАЛ", None) or t.next0_.is_value("ПОЛУГОДИЕ", None) or t.next0_.is_value("ПІВРІЧЧЯ", None)))): pass else: return None if (NumberHelper.try_parse_age(nt) is not None): return None tt = None res = DateItemToken._new628(begin, end, DateItemToken.DateItemType.NUMBER, nt.int_value, nt.morph) if ((res.int_value == 20 and (isinstance(nt.next0_, NumberToken)) and nt.next0_.int_value is not None) and nt.next0_.length_char == 2 and prev is not None): num = 2000 + nt.next0_.int_value if ((num < 2030) and len(prev) > 0 and prev[len(prev) - 1].typ == DateItemToken.DateItemType.MONTH): ok = False if (nt.whitespaces_after_count == 1): ok = True elif (nt.is_newline_after and nt.is_newline_after): ok = True if (ok): nt = (Utils.asObjectOrNull(nt.next0_, NumberToken)) res.end_token = nt res.int_value = num if (res.int_value == 20 or res.int_value == 201): tt = t.next0_ if (tt is not None and tt.is_char('_')): while tt is not None: if (not tt.is_char('_')): break tt = tt.next0_ tt = DateItemToken.__test_year_rus_word(tt, False) if (tt is not None): res.int_value = 0 res.end_token = tt res.typ = DateItemToken.DateItemType.YEAR return res if (res.int_value <= 12 and t.next0_ is not None and (t.whitespaces_after_count < 3)): tt = t.next0_ if (tt.is_value("ЧАС", None)): if (((isinstance(t.previous, TextToken)) and not t.previous.chars.is_letter and not t.is_whitespace_before) and (isinstance(t.previous.previous, NumberToken)) and not t.previous.is_whitespace_before): pass else: res.typ = DateItemToken.DateItemType.HOUR res.end_token = tt tt = tt.next0_ if (tt is not None and tt.is_char('.')): res.end_token = tt tt = tt.next0_ first_pass3072 = True while True: if first_pass3072: first_pass3072 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_value("УТРО", "РАНОК")): res.end_token = tt res.typ = DateItemToken.DateItemType.HOUR return res if (tt.is_value("ВЕЧЕР", "ВЕЧІР")): res.end_token = tt res.int_value += 12 res.typ = DateItemToken.DateItemType.HOUR return res if (tt.is_value("ДЕНЬ", None)): res.end_token = tt if (res.int_value < 10): res.int_value += 12 res.typ = DateItemToken.DateItemType.HOUR return res if (tt.is_value("НОЧЬ", "НІЧ")): res.end_token = tt if (res.int_value == 12): res.int_value = 0 elif (res.int_value > 9): res.int_value += 12 res.typ = DateItemToken.DateItemType.HOUR return res if (tt.is_comma or tt.morph.class0_.is_adverb): continue break if (res.typ == DateItemToken.DateItemType.HOUR): return res can_be_year_ = True if (prev is not None and len(prev) > 0 and prev[len(prev) - 1].typ == DateItemToken.DateItemType.MONTH): pass elif ((prev is not None and len(prev) >= 4 and prev[len(prev) - 1].typ == DateItemToken.DateItemType.DELIM) and prev[len(prev) - 2].can_by_month): pass elif (nt.next0_ is not None and ((nt.next0_.is_value("ГОД", None) or nt.next0_.is_value("РІК", None)))): if (res.int_value < 1000): can_be_year_ = False tt = DateItemToken.__test_year_rus_word(nt.next0_, False) if (tt is not None and DateItemToken.__is_new_age(tt.next0_)): res.typ = DateItemToken.DateItemType.YEAR res.end_token = tt elif (can_be_year_): if (res.can_be_year or res.typ == DateItemToken.DateItemType.NUMBER): tt = DateItemToken.__test_year_rus_word( nt.next0_, res.is_newline_before) if ((tt) is not None): if ((tt.is_value("Г", None) and not tt.is_whitespace_before and t.previous is not None) and ((t.previous.is_value("КОРПУС", None) or t.previous.is_value("КОРП", None)))): pass elif ( (((nt.next0_.is_value("Г", None) and (t.whitespaces_before_count < 3) and t.previous is not None) and t.previous.is_value("Я", None) and t.previous.previous is not None) and t.previous.previous.is_char_of("\\/") and t.previous.previous.previous is not None) and t.previous.previous.previous.is_value( "А", None)): return None elif (nt.next0_.length_char == 1 and not res.can_be_year and ((prev is None or ((len(prev) > 0 and prev[len(prev) - 1].typ != DateItemToken.DateItemType.DELIM))))): pass else: res.end_token = tt res.typ = DateItemToken.DateItemType.YEAR res.lang = tt.morph.language elif (tt is not None and (nt.whitespaces_after_count < 2) and (nt.end_char - nt.begin_char) == 1): res.end_token = tt res.typ = DateItemToken.DateItemType.YEAR res.lang = tt.morph.language if (nt.previous is not None): if (nt.previous.is_value("В", "У") or nt.previous.is_value("К", None) or nt.previous.is_value("ДО", None)): tt = DateItemToken.__test_year_rus_word(nt.next0_, False) if ((tt) is not None): ok = False if ((res.int_value < 100) and (isinstance(tt, TextToken)) and ((tt.term == "ГОДА" or tt.term == "РОКИ"))): pass else: ok = True if (nt.previous.is_value("ДО", None) and nt.next0_.is_value("Г", None)): cou = 0 ttt = nt.previous.previous while ttt is not None and (cou < 10): mt = MeasureToken.try_parse( ttt, None, False, False, False, False) if (mt is not None and mt.end_char > nt.end_char): ok = False break ttt = ttt.previous cou += 1 if (ok): res.end_token = tt res.typ = DateItemToken.DateItemType.YEAR res.lang = tt.morph.language res.begin_token = nt.previous elif (((nt.previous.is_value("IN", None) or nt.previous.is_value("SINCE", None))) and res.can_be_year): uu = (NumbersWithUnitToken.try_parse( nt, None, False, False, False, False) if nt.previous.is_value("IN", None) else None) if (uu is not None and len(uu.units) > 0): pass else: res.typ = DateItemToken.DateItemType.YEAR res.begin_token = nt.previous elif (nt.previous.is_value("NEL", None) or nt.previous.is_value("DEL", None)): if (res.can_be_year): res.typ = DateItemToken.DateItemType.YEAR res.lang = MorphLang.IT res.begin_token = nt.previous elif (nt.previous.is_value("IL", None) and res.can_be_day): res.lang = MorphLang.IT res.begin_token = nt.previous t1 = res.end_token.next0_ if (t1 is not None): if (t1.is_value("ЧАС", "ГОДИНА") or t1.is_value("HOUR", None)): if ((((prev is not None and len(prev) == 2 and prev[0].can_be_hour) and prev[1].typ == DateItemToken.DateItemType.DELIM and not prev[1].is_whitespace_after) and not prev[1].is_whitespace_after and res.int_value >= 0) and (res.int_value < 59)): prev[0].typ = DateItemToken.DateItemType.HOUR res.typ = DateItemToken.DateItemType.MINUTE res.end_token = t1 elif (res.int_value < 24): if (t1.next0_ is not None and t1.next0_.is_char('.')): t1 = t1.next0_ res.typ = DateItemToken.DateItemType.HOUR res.end_token = t1 elif ((res.int_value < 60) and ((t1.is_value("МИНУТА", "ХВИЛИНА") or t1.is_value( "МИН", None) or t.is_value("MINUTE", None)))): if (t1.next0_ is not None and t1.next0_.is_char('.')): t1 = t1.next0_ res.typ = DateItemToken.DateItemType.MINUTE res.end_token = t1 elif ( (res.int_value < 60) and ((t1.is_value("СЕКУНДА", None) or t1.is_value("СЕК", None) or t1.is_value("SECOND", None)))): if (t1.next0_ is not None and t1.next0_.is_char('.')): t1 = t1.next0_ res.typ = DateItemToken.DateItemType.SECOND res.end_token = t1 elif ((res.int_value < 30) and ((t1.is_value("ВЕК", "ВІК") or t1.is_value("СТОЛЕТИЕ", "СТОЛІТТЯ")))): res.typ = DateItemToken.DateItemType.CENTURY res.end_token = t1 elif (res.int_value <= 4 and t1.is_value("КВАРТАЛ", None)): res.typ = DateItemToken.DateItemType.QUARTAL res.end_token = t1 elif (res.int_value <= 2 and ((t1.is_value("ПОЛУГОДИЕ", None) or t1.is_value("ПІВРІЧЧЯ", None)))): res.typ = DateItemToken.DateItemType.HALFYEAR res.end_token = t1 return res t0 = Utils.asObjectOrNull(t, TextToken) if (t0 is None): return None txt = t0.get_source_text() if ((txt[0] == 'I' or txt[0] == 'X' or txt[0] == 'Х') or txt[0] == 'V'): lat = NumberHelper.try_parse_roman(t) if (lat is not None and lat.end_token.next0_ is not None and lat.int_value is not None): val = lat.int_value tt = lat.end_token.next0_ if (tt.is_value("КВАРТАЛ", None) and val > 0 and val <= 4): return DateItemToken._new629( t, tt, DateItemToken.DateItemType.QUARTAL, val) if (tt.is_value("ПОЛУГОДИЕ", "ПІВРІЧЧЯ") and val > 0 and val <= 2): return DateItemToken._new629( t, lat.end_token.next0_, DateItemToken.DateItemType.HALFYEAR, val) if (tt.is_value("ВЕК", "ВІК") or tt.is_value("СТОЛЕТИЕ", "СТОЛІТТЯ")): return DateItemToken._new629( t, lat.end_token.next0_, DateItemToken.DateItemType.CENTURY, val) if (tt.is_value("В", None) and tt.next0_ is not None and tt.next0_.is_char('.')): if (prev is not None and len(prev) > 0 and prev[len(prev) - 1].typ == DateItemToken.DateItemType.POINTER): return DateItemToken._new629( t, tt.next0_, DateItemToken.DateItemType.CENTURY, val) if (DateItemToken.__is_new_age(tt.next0_.next0_)): return DateItemToken._new629( t, tt.next0_, DateItemToken.DateItemType.CENTURY, val) if (tt.is_hiphen): lat2 = NumberHelper.try_parse_roman(tt.next0_) if (lat2 is not None and lat2.int_value is not None and lat2.end_token.next0_ is not None): if (lat2.end_token.next0_.is_value("ВЕК", "ВІК") or lat2.end_token.next0_.is_value( "СТОЛЕТИЕ", "СТОЛІТТЯ")): ddd = DateItemToken.try_attach( tt.next0_, None, False) return DateItemToken._new634( t, lat.end_token, DateItemToken.DateItemType.CENTURY, val, ((ddd.new_age if ddd is not None else 0))) if (t is not None and t.is_value("НАПРИКІНЦІ", None)): return DateItemToken._new635(t, t, DateItemToken.DateItemType.POINTER, "конец") if (t is not None and t.is_value("ДОНЕДАВНА", None)): return DateItemToken._new635(t, t, DateItemToken.DateItemType.POINTER, "сегодня") if (prev is None): if (t is not None): if (t.is_value("ОКОЛО", "БІЛЯ") or t.is_value("ПРИМЕРНО", "ПРИБЛИЗНО") or t.is_value("ABOUT", None)): return DateItemToken._new635( t, t, DateItemToken.DateItemType.POINTER, "около") if (t.is_value("ОК", None) or t.is_value("OK", None)): if (t.next0_ is not None and t.next0_.is_char('.')): return DateItemToken._new635( t, t.next0_, DateItemToken.DateItemType.POINTER, "около") return DateItemToken._new635( t, t, DateItemToken.DateItemType.POINTER, "около") tok = DateItemToken.M_SEASONS.try_parse(t, TerminParseAttr.NO) if ((tok is not None and (Utils.valToEnum(tok.termin.tag, DatePointerType)) == DatePointerType.SUMMER and t.morph.language.is_ru) and (isinstance(t, TextToken))): str0_ = t.term if (str0_ != "ЛЕТОМ" and str0_ != "ЛЕТА" and str0_ != "ЛЕТО"): tok = (None) if (tok is not None): return DateItemToken._new629( t, tok.end_token, DateItemToken.DateItemType.POINTER, Utils.valToEnum(tok.termin.tag, DatePointerType)) npt = NounPhraseHelper.try_parse(t, NounPhraseParseAttr.NO, 0, None) if (npt is not None): tok = DateItemToken.M_SEASONS.try_parse(npt.end_token, TerminParseAttr.NO) if ((tok is not None and (Utils.valToEnum(tok.termin.tag, DatePointerType)) == DatePointerType.SUMMER and t.morph.language.is_ru) and (isinstance(t, TextToken))): str0_ = t.term if (str0_ != "ЛЕТОМ" and str0_ != "ЛЕТА" and str0_ != "ЛЕТО"): tok = (None) if (tok is not None): return DateItemToken._new629( t, tok.end_token, DateItemToken.DateItemType.POINTER, Utils.valToEnum(tok.termin.tag, DatePointerType)) typ_ = DateItemToken.DateItemType.NUMBER if (npt.noun.is_value("КВАРТАЛ", None)): typ_ = DateItemToken.DateItemType.QUARTAL elif (npt.end_token.is_value("ПОЛУГОДИЕ", None) or npt.end_token.is_value("ПІВРІЧЧЯ", None)): typ_ = DateItemToken.DateItemType.HALFYEAR elif (npt.end_token.is_value("НАЧАЛО", None) or npt.end_token.is_value("ПОЧАТОК", None)): return DateItemToken._new635( t, npt.end_token, DateItemToken.DateItemType.POINTER, "начало") elif (npt.end_token.is_value("СЕРЕДИНА", None)): return DateItemToken._new635( t, npt.end_token, DateItemToken.DateItemType.POINTER, "середина") elif (npt.end_token.is_value("КОНЕЦ", None) or npt.end_token.is_value("КІНЕЦЬ", None) or npt.end_token.is_value("НАПРИКІНЕЦЬ", None)): return DateItemToken._new635( t, npt.end_token, DateItemToken.DateItemType.POINTER, "конец") elif (npt.end_token.is_value("ВРЕМЯ", None) and len(npt.adjectives) > 0 and npt.end_token.previous.is_value("НАСТОЯЩЕЕ", None)): return DateItemToken._new635( t, npt.end_token, DateItemToken.DateItemType.POINTER, "сегодня") elif (npt.end_token.is_value("ЧАС", None) and len(npt.adjectives) > 0 and npt.end_token.previous.is_value("ДАНИЙ", None)): return DateItemToken._new635( t, npt.end_token, DateItemToken.DateItemType.POINTER, "сегодня") if (typ_ != DateItemToken.DateItemType.NUMBER or detail_regime): delta = 0 if (len(npt.adjectives) > 0): if (npt.adjectives[0].is_value("ПОСЛЕДНИЙ", "ОСТАННІЙ")): return DateItemToken._new629( t0, npt.end_token, typ_, (4 if typ_ == DateItemToken.DateItemType.QUARTAL else 2)) if (npt.adjectives[0].is_value("ПРЕДЫДУЩИЙ", "ПОПЕРЕДНІЙ") or npt.adjectives[0].is_value("ПРОШЛЫЙ", None)): delta = -1 elif (npt.adjectives[0].is_value("СЛЕДУЮЩИЙ", None) or npt.adjectives[0].is_value("ПОСЛЕДУЮЩИЙ", None) or npt.adjectives[0].is_value("НАСТУПНИЙ", None)): delta = 1 else: return None cou = 0 tt = t.previous first_pass3073 = True while True: if first_pass3073: first_pass3073 = False else: tt = tt.previous if (not (tt is not None)): break if (cou > 200): break dr = Utils.asObjectOrNull(tt.get_referent(), DateRangeReferent) if (dr is None): continue if (typ_ == DateItemToken.DateItemType.QUARTAL): ii = dr.quarter_number if (ii < 1): continue ii += delta if ((ii < 1) or ii > 4): continue return DateItemToken._new629(t0, npt.end_token, typ_, ii) if (typ_ == DateItemToken.DateItemType.HALFYEAR): ii = dr.halfyear_number if (ii < 1): continue ii += delta if ((ii < 1) or ii > 2): continue return DateItemToken._new629(t0, npt.end_token, typ_, ii) term = t0.term if (not str.isalnum(term[0])): if (t0.is_char_of(".\\/:") or t0.is_hiphen): return DateItemToken._new635(t0, t0, DateItemToken.DateItemType.DELIM, term) elif (t0.is_char(',')): return DateItemToken._new635(t0, t0, DateItemToken.DateItemType.DELIM, term) else: return None if (term == "O" or term == "О"): if ((isinstance(t.next0_, NumberToken)) and not t.is_whitespace_after and len(t.next0_.value) == 1): return DateItemToken._new629(t, t.next0_, DateItemToken.DateItemType.NUMBER, t.next0_.int_value) if (str.isalpha(term[0])): inf = DateItemToken.M_MONTHES.try_parse(t, TerminParseAttr.NO) if (inf is not None and inf.termin.tag is None): inf = DateItemToken.M_MONTHES.try_parse( inf.end_token.next0_, TerminParseAttr.NO) if (inf is not None and (isinstance(inf.termin.tag, int))): return DateItemToken._new653(inf.begin_token, inf.end_token, DateItemToken.DateItemType.MONTH, inf.termin.tag, inf.termin.lang) return None
def attach_first(self, p : 'InstrumentParticipantReferent', min_char : int, max_char : int) -> 'ReferentToken': t = None tt0 = self.begin_token refs = list() t = tt0.previous first_pass3287 = True while True: if first_pass3287: first_pass3287 = False else: t = t.previous if (not (t is not None and t.begin_char >= min_char)): break if (t.is_newline_after): if (t.newlines_after_count > 1): break if (isinstance(t.next0_, NumberToken)): break tt = ParticipantToken.__try_attach_contract_ground(t, p, False) if (tt is not None): continue r = t.get_referent() if (((((isinstance(r, OrganizationReferent)) or (isinstance(r, PhoneReferent)) or (isinstance(r, PersonReferent))) or (isinstance(r, PersonPropertyReferent)) or (isinstance(r, AddressReferent))) or (isinstance(r, UriReferent)) or (isinstance(r, PersonIdentityReferent))) or (isinstance(r, BankDataReferent))): if (not r in refs): refs.insert(0, r) tt0 = t if (len(refs) > 0): for r in refs: if (r != refs[0] and (isinstance(refs[0], OrganizationReferent)) and (((isinstance(r, PersonReferent)) or (isinstance(r, PersonPropertyReferent))))): p.add_slot(InstrumentParticipantReferent.ATTR_DELEGATE, r, False, 0) else: p.add_slot(InstrumentParticipantReferent.ATTR_REF, r, False, 0) rt = ReferentToken(p, tt0, self.end_token) t = self.end_token.next0_ if (BracketHelper.is_bracket(t, False)): t = t.next0_ if (t is not None and t.is_char(',')): t = t.next0_ first_pass3288 = True while True: if first_pass3288: first_pass3288 = False else: t = t.next0_ if (not (t is not None and ((max_char == 0 or t.begin_char <= max_char)))): break if (t.is_value("СТОРОНА", None)): break r = t.get_referent() if (((((isinstance(r, OrganizationReferent)) or (isinstance(r, PhoneReferent)) or (isinstance(r, PersonReferent))) or (isinstance(r, PersonPropertyReferent)) or (isinstance(r, AddressReferent))) or (isinstance(r, UriReferent)) or (isinstance(r, PersonIdentityReferent))) or (isinstance(r, BankDataReferent))): if ((((isinstance(r, PersonPropertyReferent)) and t.next0_ is not None and t.next0_.is_comma) and (isinstance(t.next0_.next0_, ReferentToken)) and (isinstance(t.next0_.next0_.get_referent(), PersonReferent))) and not t.next0_.is_newline_after): pe = Utils.asObjectOrNull(t.next0_.next0_.get_referent(), PersonReferent) pe.add_slot(PersonReferent.ATTR_ATTR, r, False, 0) r = (pe) t = t.next0_.next0_ is_delegate = False if (t.previous.is_value("ЛИЦО", None) or t.previous.is_value("ИМЯ", None)): is_delegate = True if (t.previous.is_value("КОТОРЫЙ", None) and t.previous.previous is not None and ((t.previous.previous.is_value("ИМЯ", None) or t.previous.previous.is_value("ЛИЦО", None)))): is_delegate = True p.add_slot((InstrumentParticipantReferent.ATTR_DELEGATE if (((isinstance(r, PersonReferent)) or (isinstance(r, PersonPropertyReferent)))) and is_delegate else InstrumentParticipantReferent.ATTR_REF), r, False, 0) rt.end_token = t continue tt = ParticipantToken.__try_attach_contract_ground(t, p, False) if (tt is not None): rt.end_token = tt t = rt.end_token if (rt.begin_char == tt.begin_char): rt.begin_token = tt continue if (t.is_value("В", None) and t.next0_ is not None and t.next0_.is_value("ЛИЦО", None)): t = t.next0_ continue if (t.is_value("ОТ", None) and t.next0_ is not None and t.next0_.is_value("ИМЯ", None)): t = t.next0_ continue if (t.is_value("ПО", None) and t.next0_ is not None and t.next0_.is_value("ПОРУЧЕНИЕ", None)): t = t.next0_ continue if (t.is_newline_before): break if (t.get_morph_class_in_dictionary() == MorphClass.VERB): if ((not t.is_value("УДОСТОВЕРЯТЬ", None) and not t.is_value("ПРОЖИВАТЬ", None) and not t.is_value("ЗАРЕГИСТРИРОВАТЬ", None)) and not t.is_value("ДЕЙСТВОВАТЬ", None)): break if (t.is_and and t.previous is not None and t.previous.is_comma): break if (t.is_and and t.next0_.get_referent() is not None): if (isinstance(t.next0_.get_referent(), OrganizationReferent)): break pe = Utils.asObjectOrNull(t.next0_.get_referent(), PersonReferent) if (pe is not None): has_ip = False for s in pe.slots: if (s.type_name == PersonReferent.ATTR_ATTR): if (str(s.value).startswith("индивидуальный предприниматель")): has_ip = True break if (has_ip): break t = rt.begin_token while t is not None and t.end_char <= rt.end_char: tt = ParticipantToken.__try_attach_contract_ground(t, p, True) if (tt is not None): if (tt.end_char > rt.end_char): rt.end_token = tt t = tt t = t.next0_ return rt
def tryAttachOrg(t : 'Token', can_be_cyr : bool=False) -> 'ReferentToken': from pullenti.ner.org.internal.OrgItemNameToken import OrgItemNameToken if (t is None): return None br = False if (t.isChar('(') and t.next0_ is not None): t = t.next0_ br = True if (isinstance(t, NumberToken)): if ((t).typ == NumberSpellingType.WORDS and t.morph.class0_.is_adjective and t.chars.is_capital_upper): pass else: return None else: if (t.chars.is_all_lower): return None if ((t.length_char < 3) and not t.chars.is_letter): return None if (not t.chars.is_latin_letter): if (not can_be_cyr or not t.chars.is_cyrillic_letter): return None t0 = t t1 = t0 nam_wo = 0 tok = None geo_ = None add_typ = None first_pass3043 = True while True: if first_pass3043: first_pass3043 = False else: t = t.next0_ if (not (t is not None)): break if (t != t0 and t.whitespaces_before_count > 1): break if (t.isChar(')')): break if (t.isChar('(') and t.next0_ is not None): if ((isinstance(t.next0_.getReferent(), GeoReferent)) and t.next0_.next0_ is not None and t.next0_.next0_.isChar(')')): geo_ = (Utils.asObjectOrNull(t.next0_.getReferent(), GeoReferent)) t = t.next0_.next0_ continue typ = OrgItemTypeToken.tryAttach(t.next0_, True, None) if ((typ is not None and typ.end_token.next0_ is not None and typ.end_token.next0_.isChar(')')) and typ.chars.is_latin_letter): add_typ = typ t = typ.end_token.next0_ continue if (((isinstance(t.next0_, TextToken)) and t.next0_.next0_ is not None and t.next0_.next0_.isChar(')')) and t.next0_.chars.is_capital_upper): t = t.next0_.next0_ t1 = t continue break tok = OrgItemEngItem.tryAttach(t, can_be_cyr) if (tok is None and t.isCharOf(".,") and t.next0_ is not None): tok = OrgItemEngItem.tryAttach(t.next0_, can_be_cyr) if (tok is None and t.next0_.isCharOf(",.")): tok = OrgItemEngItem.tryAttach(t.next0_.next0_, can_be_cyr) if (tok is not None): if (tok.length_char == 1 and t0.chars.is_cyrillic_letter): return None break if (t.is_hiphen and not t.is_whitespace_after and not t.is_whitespace_before): continue if (t.isCharOf("&+") or t.is_and): continue if (t.isChar('.')): if (t.previous is not None and t.previous.length_char == 1): continue elif (MiscHelper.canBeStartOfSentence(t.next0_)): break if (not t.chars.is_latin_letter): if (not can_be_cyr or not t.chars.is_cyrillic_letter): break if (t.chars.is_all_lower): if (t.morph.class0_.is_preposition or t.morph.class0_.is_conjunction): continue if (br): continue break mc = t.getMorphClassInDictionary() if (mc.is_verb): if (t.next0_ is not None and t.next0_.morph.class0_.is_preposition): break if (t.next0_ is not None and t.next0_.isValue("OF", None)): break if (isinstance(t, TextToken)): nam_wo += 1 t1 = t if (tok is None): return None if (t0 == tok.begin_token): br2 = BracketHelper.tryParse(tok.end_token.next0_, BracketParseAttr.NO, 100) if (br2 is not None): org1 = OrganizationReferent() if (tok.short_value is not None): org1.addTypeStr(tok.short_value) org1.addTypeStr(tok.full_value) nam1 = MiscHelper.getTextValue(br2.begin_token, br2.end_token, GetTextAttr.NO) if (nam1 is not None): org1.addName(nam1, True, None) return ReferentToken(org1, t0, br2.end_token) return None org0_ = OrganizationReferent() te = tok.end_token if (tok.is_bank): t1 = tok.end_token if (tok.full_value == "company" and (tok.whitespaces_after_count < 3)): tok1 = OrgItemEngItem.tryAttach(tok.end_token.next0_, can_be_cyr) if (tok1 is not None): t1 = tok.end_token tok = tok1 te = tok.end_token if (tok.full_value == "company"): if (nam_wo == 0): return None nam = MiscHelper.getTextValue(t0, t1, GetTextAttr.IGNOREARTICLES) if (nam == "STOCK" and tok.full_value == "company"): return None alt_nam = None if (Utils.isNullOrEmpty(nam)): return None if (nam.find('(') > 0): i1 = nam.find('(') i2 = nam.find(')') if (i1 < i2): alt_nam = nam tai = None if ((i2 + 1) < len(nam)): tai = nam[i2:].strip() nam = nam[0:0+i1].strip() if (tai is not None): nam = "{0} {1}".format(nam, tai) if (tok.is_bank): org0_.addTypeStr(("bank" if tok.kit.base_language.is_en else "банк")) org0_.addProfile(OrgProfile.FINANCE) if ((t1.next0_ is not None and t1.next0_.isValue("OF", None) and t1.next0_.next0_ is not None) and t1.next0_.next0_.chars.is_latin_letter): nam0 = OrgItemNameToken.tryAttach(t1.next0_, None, False, False) if (nam0 is not None): te = nam0.end_token else: te = t1.next0_.next0_ nam = MiscHelper.getTextValue(t0, te, GetTextAttr.NO) if (isinstance(te.getReferent(), GeoReferent)): org0_._addGeoObject(Utils.asObjectOrNull(te.getReferent(), GeoReferent)) elif (t0 == t1): return None else: if (tok.short_value is not None): org0_.addTypeStr(tok.short_value) org0_.addTypeStr(tok.full_value) if (Utils.isNullOrEmpty(nam)): return None org0_.addName(nam, True, None) if (alt_nam is not None): org0_.addName(alt_nam, True, None) res = ReferentToken(org0_, t0, te) t = te while t.next0_ is not None: if (t.next0_.isCharOf(",.")): t = t.next0_ else: break if (t.whitespaces_after_count < 2): tok = OrgItemEngItem.tryAttach(t.next0_, can_be_cyr) if (tok is not None): if (tok.short_value is not None): org0_.addTypeStr(tok.short_value) org0_.addTypeStr(tok.full_value) res.end_token = tok.end_token if (geo_ is not None): org0_._addGeoObject(geo_) if (add_typ is not None): org0_.addType(add_typ, False) if (not br): return res t = res.end_token if (t.next0_ is None or t.next0_.isChar(')')): res.end_token = t.next0_ else: return None return res
def __try_attach_contract_ground(t : 'Token', ip : 'InstrumentParticipantReferent', can_be_passport : bool=False) -> 'Token': ok = False first_pass3289 = True while True: if first_pass3289: first_pass3289 = False else: t = t.next0_ if (not (t is not None)): break if (t.is_char(',') or t.morph.class0_.is_preposition): continue if (t.is_char('(')): br = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (br is not None): t = br.end_token continue if (t.is_value("ОСНОВАНИЕ", None) or t.is_value("ДЕЙСТВОВАТЬ", None) or t.is_value("ДЕЙСТВУЮЩИЙ", None)): ok = True if (t.next0_ is not None and t.next0_.is_char('(')): br = BracketHelper.try_parse(t.next0_, BracketParseAttr.NO, 100) if (br is not None and (br.length_char < 10)): t = br.end_token continue dr = Utils.asObjectOrNull(t.get_referent(), DecreeReferent) if (dr is not None): ip.ground = dr return t pir = Utils.asObjectOrNull(t.get_referent(), PersonIdentityReferent) if (pir is not None and can_be_passport): if (pir.typ is not None and not "паспорт" in pir.typ): ip.ground = pir return t if (t.is_value("УСТАВ", None)): ip.ground = t.get_normal_case_text(MorphClass.NOUN, MorphNumber.SINGULAR, MorphGender.UNDEFINED, False) return t if (t.is_value("ДОВЕРЕННОСТЬ", None)): dts = DecreeToken.try_attach_list(t.next0_, None, 10, False) if (dts is None): has_spec = False ttt = t.next0_ first_pass3290 = True while True: if first_pass3290: first_pass3290 = False else: ttt = ttt.next0_ if (not (ttt is not None and ((ttt.end_char - t.end_char) < 200))): break if (ttt.is_comma): continue if (ttt.is_value("УДОСТОВЕРИТЬ", None) or ttt.is_value("УДОСТОВЕРЯТЬ", None)): has_spec = True continue dt = DecreeToken.try_attach(ttt, None, False) if (dt is not None): if (dt.typ == DecreeToken.ItemType.DATE or dt.typ == DecreeToken.ItemType.NUMBER): dts = DecreeToken.try_attach_list(ttt, None, 10, False) break npt = NounPhraseHelper.try_parse(ttt, NounPhraseParseAttr.NO, 0, None) if (npt is not None): if (npt.end_token.is_value("НОТАРИУС", None)): ttt = npt.end_token has_spec = True continue if (ttt.get_referent() is not None): if (has_spec): continue break if (dts is not None and len(dts) > 0): t0 = t dr = DecreeReferent() dr.typ = "ДОВЕРЕННОСТЬ" for d in dts: if (d.typ == DecreeToken.ItemType.DATE): dr._add_date(d) t = d.end_token elif (d.typ == DecreeToken.ItemType.NUMBER): dr._add_number(d) t = d.end_token else: break ad = t.kit.get_analyzer_data_by_analyzer_name(InstrumentAnalyzer.ANALYZER_NAME) ip.ground = ad.register_referent(dr) rt = ReferentToken(Utils.asObjectOrNull(ip.ground, Referent), t0, t) t.kit.embed_token(rt) return rt ip.ground = "ДОВЕРЕННОСТЬ" return t break return None
def try_attach_territory( li: typing.List['TerrItemToken'], ad: 'AnalyzerData', attach_always: bool = False, cits: typing.List['CityItemToken'] = None, exists: typing.List['GeoReferent'] = None) -> 'ReferentToken': if (li is None or len(li) == 0): return None ex_obj = None new_name = None adj_list = list() noun = None add_noun = None rt = TerrAttachHelper.__try_attach_moscowao(li, ad) if (rt is not None): return rt if (li[0].termin_item is not None and li[0].termin_item.canonic_text == "ТЕРРИТОРИЯ"): res2 = TerrAttachHelper.__try_attach_pure_terr(li, ad) return res2 if (len(li) == 2): if (li[0].rzd is not None and li[1].rzd_dir is not None): rzd = GeoReferent() rzd._add_name(li[1].rzd_dir) rzd._add_typ_ter(li[0].kit.base_language) rzd.add_slot(GeoReferent.ATTR_REF, li[0].rzd.referent, False, 0) rzd.add_ext_referent(li[0].rzd) return ReferentToken(rzd, li[0].begin_token, li[1].end_token) if (li[1].rzd is not None and li[0].rzd_dir is not None): rzd = GeoReferent() rzd._add_name(li[0].rzd_dir) rzd._add_typ_ter(li[0].kit.base_language) rzd.add_slot(GeoReferent.ATTR_REF, li[1].rzd.referent, False, 0) rzd.add_ext_referent(li[1].rzd) return ReferentToken(rzd, li[0].begin_token, li[1].end_token) can_be_city_before = False adj_terr_before = False if (cits is not None): if (cits[0].typ == CityItemToken.ItemType.CITY): can_be_city_before = True elif (cits[0].typ == CityItemToken.ItemType.NOUN and len(cits) > 1): can_be_city_before = True k = 0 k = 0 while k < len(li): if (li[k].onto_item is not None): if (ex_obj is not None or new_name is not None): break if (noun is not None): if (k == 1): if (noun.termin_item.canonic_text == "РАЙОН" or noun.termin_item.canonic_text == "ОБЛАСТЬ" or noun.termin_item.canonic_text == "СОЮЗ"): if (isinstance(li[k].onto_item.referent, GeoReferent)): if (li[k].onto_item.referent.is_state): break ok = False tt = li[k].end_token.next0_ if (tt is None): ok = True elif (tt.is_char_of(",.")): ok = True if (not ok): ok = MiscLocationHelper.check_geo_object_before( li[0].begin_token) if (not ok): adr = AddressItemToken.try_parse( tt, None, False, False, None) if (adr is not None): if (adr.typ == AddressItemToken.ItemType.STREET): ok = True if (not ok): break if (li[k].onto_item is not None): if (noun.begin_token.is_value("МО", None) or noun.begin_token.is_value("ЛО", None)): return None ex_obj = li[k] elif (li[k].termin_item is not None): if (noun is not None): break if (li[k].termin_item.is_always_prefix and k > 0): break if (k > 0 and li[k].is_doubt): if (li[k].begin_token == li[k].end_token and li[k].begin_token.is_value("ЗАО", None)): break if (li[k].termin_item.is_adjective or li[k].is_geo_in_dictionary): adj_list.append(li[k]) else: if (ex_obj is not None): geo_ = Utils.asObjectOrNull(ex_obj.onto_item.referent, GeoReferent) if (geo_ is None): break if (ex_obj.is_adjective and ((li[k].termin_item.canonic_text == "СОЮЗ" or li[k].termin_item.canonic_text == "ФЕДЕРАЦИЯ"))): str0_ = str(ex_obj.onto_item) if (not li[k].termin_item.canonic_text in str0_): return None if (li[k].termin_item.canonic_text == "РАЙОН" or li[k].termin_item.canonic_text == "ОКРУГ" or li[k].termin_item.canonic_text == "КРАЙ"): tmp = io.StringIO() for s in geo_.slots: if (s.type_name == GeoReferent.ATTR_TYPE): print("{0};".format(s.value), end="", file=tmp, flush=True) if (not li[k].termin_item.canonic_text in Utils.toStringStringIO(tmp).upper()): if (k != 1 or new_name is not None): break new_name = li[0] new_name.is_adjective = True new_name.onto_item = (None) ex_obj = (None) noun = li[k] if (k == 0): tt = TerrItemToken.try_parse( li[k].begin_token.previous, None, True, False, None) if (tt is not None and tt.morph.class0_.is_adjective): adj_terr_before = True else: if (ex_obj is not None): break if (new_name is not None): break new_name = li[k] k += 1 name = None alt_name = None full_name = None morph_ = None if (ex_obj is not None): if (ex_obj.is_adjective and not ex_obj.morph.language.is_en and noun is None): if (attach_always and ex_obj.end_token.next0_ is not None): npt = NounPhraseHelper.try_parse(ex_obj.begin_token, NounPhraseParseAttr.NO, 0, None) if (ex_obj.end_token.next0_.is_comma_and): pass elif (npt is None): pass else: str0_ = StreetItemToken.try_parse( ex_obj.end_token.next0_, None, False, None, False) if (str0_ is not None): if (str0_.typ == StreetItemType.NOUN and str0_.end_token == npt.end_token): return None else: cit = CityItemToken.try_parse(ex_obj.end_token.next0_, None, False, None) if (cit is not None and ((cit.typ == CityItemToken.ItemType.NOUN or cit.typ == CityItemToken.ItemType.CITY))): npt = NounPhraseHelper.try_parse( ex_obj.begin_token, NounPhraseParseAttr.NO, 0, None) if (npt is not None and npt.end_token == cit.end_token): pass else: return None elif (ex_obj.begin_token.is_value("ПОДНЕБЕСНЫЙ", None)): pass else: return None if (noun is None and ex_obj.can_be_city): cit0 = CityItemToken.try_parse_back( ex_obj.begin_token.previous) if (cit0 is not None and cit0.typ != CityItemToken.ItemType.PROPERNAME): return None if (ex_obj.is_doubt and noun is None): ok2 = False if (TerrAttachHelper.__can_be_geo_after( ex_obj.end_token.next0_)): ok2 = True elif (not ex_obj.can_be_surname and not ex_obj.can_be_city): if ((ex_obj.end_token.next0_ is not None and ex_obj.end_token.next0_.is_char(')') and ex_obj.begin_token.previous is not None) and ex_obj.begin_token.previous.is_char('(')): ok2 = True elif (ex_obj.chars.is_latin_letter and ex_obj.begin_token.previous is not None): if (ex_obj.begin_token.previous.is_value("IN", None)): ok2 = True elif (ex_obj.begin_token.previous.is_value( "THE", None) and ex_obj.begin_token.previous.previous is not None and ex_obj.begin_token.previous.previous.is_value( "IN", None)): ok2 = True if (not ok2): cit0 = CityItemToken.try_parse_back( ex_obj.begin_token.previous) if (cit0 is not None and cit0.typ != CityItemToken.ItemType.PROPERNAME): pass elif (MiscLocationHelper.check_geo_object_before( ex_obj.begin_token.previous)): pass else: return None name = ex_obj.onto_item.canonic_text morph_ = ex_obj.morph elif (new_name is not None): if (noun is None): return None j = 1 while j < k: if (li[j].is_newline_before and not li[0].is_newline_before): if (BracketHelper.can_be_start_of_sequence( li[j].begin_token, False, False)): pass else: return None j += 1 morph_ = noun.morph if (new_name.is_adjective): if (noun.termin_item.acronym == "АО"): if (noun.begin_token != noun.end_token): return None if (new_name.morph.gender != MorphGender.FEMINIE): return None geo_before = None tt0 = li[0].begin_token.previous if (tt0 is not None and tt0.is_comma_and): tt0 = tt0.previous if (not li[0].is_newline_before and tt0 is not None): geo_before = (Utils.asObjectOrNull(tt0.get_referent(), GeoReferent)) if (Utils.indexOfList(li, noun, 0) < Utils.indexOfList( li, new_name, 0)): if (noun.termin_item.is_state): return None if (new_name.can_be_surname and geo_before is None): if (((noun.morph.case_) & new_name.morph.case_).is_undefined): return None if (MiscHelper.is_exists_in_dictionary( new_name.begin_token, new_name.end_token, (MorphClass.ADJECTIVE) | MorphClass.PRONOUN | MorphClass.VERB)): if (noun.begin_token != new_name.begin_token): if (geo_before is None): if (len(li) == 2 and TerrAttachHelper.__can_be_geo_after( li[1].end_token.next0_)): pass elif (len(li) == 3 and li[2].termin_item is not None and TerrAttachHelper.__can_be_geo_after( li[2].end_token.next0_)): pass elif (new_name.is_geo_in_dictionary): pass elif (new_name.end_token.is_newline_after): pass else: return None npt = NounPhraseHelper.try_parse( new_name.end_token, NounPhraseParseAttr.PARSEPRONOUNS, 0, None) if (npt is not None and npt.end_token != new_name.end_token): if (len(li) >= 3 and li[2].termin_item is not None and npt.end_token == li[2].end_token): add_noun = li[2] else: return None rtp = new_name.kit.process_referent( "PERSON", new_name.begin_token) if (rtp is not None): return None name = ProperNameHelper.get_name_ex( new_name.begin_token, new_name.end_token, MorphClass.ADJECTIVE, MorphCase.UNDEFINED, noun.termin_item.gender, False, False) else: ok = False if (((k + 1) < len(li)) and li[k].termin_item is None and li[k + 1].termin_item is not None): ok = True elif ((k < len(li)) and li[k].onto_item is not None): ok = True elif (k == len(li) and not new_name.is_adj_in_dictionary): ok = True elif (MiscLocationHelper.check_geo_object_before( li[0].begin_token) or can_be_city_before): ok = True elif (MiscLocationHelper.check_geo_object_after( li[k - 1].end_token, False)): ok = True elif (len(li) == 3 and k == 2): cit = CityItemToken.try_parse(li[2].begin_token, None, False, None) if (cit is not None): if (cit.typ == CityItemToken.ItemType.CITY or cit.typ == CityItemToken.ItemType.NOUN): ok = True elif (len(li) == 2): ok = TerrAttachHelper.__can_be_geo_after( li[len(li) - 1].end_token.next0_) if (not ok and not li[0].is_newline_before and not li[0].chars.is_all_lower): rt00 = li[0].kit.process_referent( "PERSONPROPERTY", li[0].begin_token.previous) if (rt00 is not None): ok = True if (noun.termin_item is not None and noun.termin_item.is_strong and new_name.is_adjective): ok = True if (noun.is_doubt and len(adj_list) == 0 and geo_before is None): return None name = ProperNameHelper.get_name_ex( new_name.begin_token, new_name.end_token, MorphClass.ADJECTIVE, MorphCase.UNDEFINED, noun.termin_item.gender, False, False) if (not ok and not attach_always): if (MiscHelper.is_exists_in_dictionary( new_name.begin_token, new_name.end_token, (MorphClass.ADJECTIVE) | MorphClass.PRONOUN | MorphClass.VERB)): if (exists is not None): for e0_ in exists: if (e0_.find_slot(GeoReferent.ATTR_NAME, name, True) is not None): ok = True break if (not ok): return None full_name = "{0} {1}".format( ProperNameHelper.get_name_ex(li[0].begin_token, noun.begin_token.previous, MorphClass.ADJECTIVE, MorphCase.UNDEFINED, noun.termin_item.gender, False, False), noun.termin_item.canonic_text) else: if (not attach_always or ((noun.termin_item is not None and noun.termin_item.canonic_text == "ФЕДЕРАЦИЯ"))): is_latin = noun.chars.is_latin_letter and new_name.chars.is_latin_letter if (Utils.indexOfList(li, noun, 0) > Utils.indexOfList( li, new_name, 0)): if (not is_latin): return None if (not new_name.is_district_name and not BracketHelper.can_be_start_of_sequence( new_name.begin_token, False, False)): if (len(adj_list) == 0 and MiscHelper.is_exists_in_dictionary( new_name.begin_token, new_name.end_token, (MorphClass.NOUN) | MorphClass.PRONOUN)): if (len(li) == 2 and noun.is_city_region and (noun.whitespaces_after_count < 2)): pass else: return None if (not is_latin): if ((noun.termin_item.is_region and not attach_always and ((not adj_terr_before or new_name.is_doubt))) and not noun.is_city_region and not noun.termin_item.is_specific_prefix): if (not MiscLocationHelper. check_geo_object_before( noun.begin_token)): if (not noun.is_doubt and noun.begin_token != noun.end_token): pass elif ((noun.termin_item.is_always_prefix and len(li) == 2 and li[0] == noun) and li[1] == new_name): pass else: return None if (noun.is_doubt and len(adj_list) == 0): if (noun.termin_item.acronym == "МО" or noun.termin_item.acronym == "ЛО"): if (k == (len(li) - 1) and li[k].termin_item is not None): add_noun = li[k] k += 1 elif (len(li) == 2 and noun == li[0] and str(new_name).endswith("совет")): pass else: return None else: return None pers = new_name.kit.process_referent( "PERSON", new_name.begin_token) if (pers is not None): return None name = MiscHelper.get_text_value(new_name.begin_token, new_name.end_token, GetTextAttr.NO) if (new_name.begin_token != new_name.end_token): ttt = new_name.begin_token.next0_ while ttt is not None and ttt.end_char <= new_name.end_char: if (ttt.chars.is_letter): ty = TerrItemToken.try_parse( ttt, None, False, False, None) if ((ty is not None and ty.termin_item is not None and noun is not None) and ((noun.termin_item.canonic_text in ty.termin_item.canonic_text or ty.termin_item.canonic_text in noun.termin_item.canonic_text))): name = MiscHelper.get_text_value( new_name.begin_token, ttt.previous, GetTextAttr.NO) break ttt = ttt.next0_ if (len(adj_list) > 0): npt = NounPhraseHelper.try_parse(adj_list[0].begin_token, NounPhraseParseAttr.NO, 0, None) if (npt is not None and npt.end_token == noun.end_token): alt_name = "{0} {1}".format( npt.get_normal_case_text(None, MorphNumber.UNDEFINED, MorphGender.UNDEFINED, False), name) else: if ((len(li) == 1 and noun is not None and noun.end_token.next0_ is not None) and (isinstance( noun.end_token.next0_.get_referent(), GeoReferent))): g = Utils.asObjectOrNull(noun.end_token.next0_.get_referent(), GeoReferent) if (noun.termin_item is not None): tyy = noun.termin_item.canonic_text.lower() ooo = False if (g.find_slot(GeoReferent.ATTR_TYPE, tyy, True) is not None): ooo = True elif (tyy.endswith("район") and g.find_slot( GeoReferent.ATTR_TYPE, "район", True) is not None): ooo = True if (ooo): return ReferentToken._new734(g, noun.begin_token, noun.end_token.next0_, noun.begin_token.morph) if ((len(li) == 1 and noun == li[0] and li[0].termin_item is not None) and TerrItemToken.try_parse(li[0].end_token.next0_, None, True, False, None) is None and TerrItemToken.try_parse(li[0].begin_token.previous, None, True, False, None) is None): if (li[0].morph.number == MorphNumber.PLURAL): return None cou = 0 str0_ = li[0].termin_item.canonic_text.lower() tt = li[0].begin_token.previous first_pass3158 = True while True: if first_pass3158: first_pass3158 = False else: tt = tt.previous if (not (tt is not None)): break if (tt.is_newline_after): cou += 10 else: cou += 1 if (cou > 500): break g = Utils.asObjectOrNull(tt.get_referent(), GeoReferent) if (g is None): continue ok = True cou = 0 tt = li[0].end_token.next0_ first_pass3159 = True while True: if first_pass3159: first_pass3159 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_newline_before): cou += 10 else: cou += 1 if (cou > 500): break tee = TerrItemToken.try_parse(tt, None, True, False, None) if (tee is None): continue ok = False break if (ok): ii = 0 while g is not None and (ii < 3): if (g.find_slot(GeoReferent.ATTR_TYPE, str0_, True) is not None): return ReferentToken._new734( g, li[0].begin_token, li[0].end_token, noun.begin_token.morph) g = g.higher ii += 1 break return None ter = None if (ex_obj is not None and (isinstance(ex_obj.tag, GeoReferent))): ter = (Utils.asObjectOrNull(ex_obj.tag, GeoReferent)) else: ter = GeoReferent() if (ex_obj is not None): geo_ = Utils.asObjectOrNull(ex_obj.onto_item.referent, GeoReferent) if (geo_ is not None and not geo_.is_city): ter._merge_slots2(geo_, li[0].kit.base_language) else: ter._add_name(name) if (noun is None and ex_obj.can_be_city): ter._add_typ_city(li[0].kit.base_language) else: pass elif (new_name is not None): ter._add_name(name) if (alt_name is not None): ter._add_name(alt_name) if (noun is not None): if (noun.termin_item.canonic_text == "АО"): ter._add_typ( ("АВТОНОМНИЙ ОКРУГ" if li[0].kit.base_language.is_ua else "АВТОНОМНЫЙ ОКРУГ")) elif (noun.termin_item.canonic_text == "МУНИЦИПАЛЬНОЕ СОБРАНИЕ" or noun.termin_item.canonic_text == "МУНІЦИПАЛЬНЕ ЗБОРИ"): ter._add_typ(("МУНІЦИПАЛЬНЕ УТВОРЕННЯ" if li[0].kit.base_language.is_ua else "МУНИЦИПАЛЬНОЕ ОБРАЗОВАНИЕ")) elif (noun.termin_item.acronym == "МО" and add_noun is not None): ter._add_typ(add_noun.termin_item.canonic_text) else: if (noun.termin_item.canonic_text == "СОЮЗ" and ex_obj is not None and ex_obj.end_char > noun.end_char): return ReferentToken._new734(ter, ex_obj.begin_token, ex_obj.end_token, ex_obj.morph) ter._add_typ(noun.termin_item.canonic_text) if (noun.termin_item.is_region and ter.is_state): ter._add_typ_reg(li[0].kit.base_language) if (ter.is_state and ter.is_region): for a in adj_list: if (a.termin_item.is_region): ter._add_typ_reg(li[0].kit.base_language) break if (ter.is_state): if (full_name is not None): ter._add_name(full_name) res = ReferentToken(ter, li[0].begin_token, li[k - 1].end_token) if (noun is not None and noun.morph.class0_.is_noun): res.morph = noun.morph else: res.morph = MorphCollection() ii = 0 while ii < k: for v in li[ii].morph.items: bi = MorphBaseInfo() bi.copy_from(v) if (noun is not None): if (bi.class0_.is_adjective): bi.class0_ = MorphClass.NOUN res.morph.add_item(bi) ii += 1 if (li[0].termin_item is not None and li[0].termin_item.is_specific_prefix): res.begin_token = li[0].end_token.next0_ if (add_noun is not None and add_noun.end_char > res.end_char): res.end_token = add_noun.end_token if ((isinstance(res.begin_token.previous, TextToken)) and (res.whitespaces_before_count < 2)): tt = Utils.asObjectOrNull(res.begin_token.previous, TextToken) if (tt.term == "АР"): for ty in ter.typs: if ("республика" in ty or "республіка" in ty): res.begin_token = tt break return res
def try_attach(t : 'Token', p1 : 'InstrumentParticipantReferent'=None, p2 : 'InstrumentParticipantReferent'=None, is_contract : bool=False) -> 'ParticipantToken': if (t is None): return None tt = t br = False if (p1 is None and p2 is None and is_contract): r1 = t.get_referent() if ((r1 is not None and t.next0_ is not None and t.next0_.is_comma_and) and (isinstance(t.next0_.next0_, ReferentToken))): r2 = t.next0_.next0_.get_referent() if (r1.type_name == r2.type_name): ttt = t.next0_.next0_.next0_ refs = list() refs.append(r1) refs.append(r2) first_pass3282 = True while True: if first_pass3282: first_pass3282 = False else: ttt = ttt.next0_ if (not (ttt is not None)): break if ((ttt.is_comma_and and ttt.next0_ is not None and ttt.next0_.get_referent() is not None) and ttt.next0_.get_referent().type_name == r1.type_name): ttt = ttt.next0_ if (not ttt.get_referent() in refs): refs.append(ttt.get_referent()) continue break first_pass3283 = True while True: if first_pass3283: first_pass3283 = False else: ttt = ttt.next0_ if (not (ttt is not None)): break if (ttt.is_comma or ttt.morph.class0_.is_preposition): continue if ((ttt.is_value("ИМЕНОВАТЬ", None) or ttt.is_value("ДАЛЬНЕЙШИЙ", None) or ttt.is_value("ДАЛЕЕ", None)) or ttt.is_value("ТЕКСТ", None)): continue if (ttt.is_value("ДОГОВАРИВАТЬСЯ", None)): continue npt = NounPhraseHelper.try_parse(ttt, NounPhraseParseAttr.NO, 0, None) if (npt is not None and npt.noun.is_value("СТОРОНА", None) and npt.morph.number != MorphNumber.SINGULAR): re = ParticipantToken._new1573(t, npt.end_token, ParticipantToken.Kinds.NAMEDASPARTS) re.parts = refs return re break if ((isinstance(r1, OrganizationReferent)) or (isinstance(r1, PersonReferent))): has_br = False has_named = False if (isinstance(r1, PersonReferent)): if (t.previous is not None and t.previous.is_value("ЛИЦО", None)): return None elif (t.previous is not None and ((t.previous.is_value("ВЫДАВАТЬ", None) or t.previous.is_value("ВЫДАТЬ", None)))): return None ttt = t.begin_token while ttt is not None and (ttt.end_char < t.end_char): if (ttt.is_char('(')): has_br = True elif ((ttt.is_value("ИМЕНОВАТЬ", None) or ttt.is_value("ДАЛЬНЕЙШИЙ", None) or ttt.is_value("ДАЛЕЕ", None)) or ttt.is_value("ТЕКСТ", None)): has_named = True elif ((ttt.is_comma or ttt.morph.class0_.is_preposition or ttt.is_hiphen) or ttt.is_char(':')): pass elif (isinstance(ttt, ReferentToken)): pass elif (has_br or has_named): npt = NounPhraseHelper.try_parse(ttt, NounPhraseParseAttr.REFERENTCANBENOUN, 0, None) if (npt is None): break if (has_br): if (npt.end_token.next0_ is None or not npt.end_token.next0_.is_char(')')): break if (not has_named): if (ParticipantToken.M_ONTOLOGY.try_parse(ttt, TerminParseAttr.NO) is None): break re = ParticipantToken._new1573(t, t, ParticipantToken.Kinds.NAMEDAS) re.typ = npt.get_normal_case_text(None, MorphNumber.SINGULAR, MorphGender.UNDEFINED, False) re.parts = list() re.parts.append(r1) return re ttt = ttt.next0_ has_br = False has_named = False end_side = None brr = None add_refs = None ttt = t.next0_ first_pass3284 = True while True: if first_pass3284: first_pass3284 = False else: ttt = ttt.next0_ if (not (ttt is not None)): break if ((isinstance(ttt, NumberToken)) and (isinstance(ttt.next0_, TextToken)) and ttt.next0_.term == "СТОРОНЫ"): ttt = ttt.next0_ end_side = ttt if (ttt.next0_ is not None and ttt.next0_.is_comma): ttt = ttt.next0_ if (ttt.next0_ is not None and ttt.next0_.is_and): break if (brr is not None and ttt.begin_char > brr.end_char): brr = (None) if (BracketHelper.can_be_start_of_sequence(ttt, False, False)): brr = BracketHelper.try_parse(ttt, BracketParseAttr.NO, 100) if (brr is not None and (brr.length_char < 7) and ttt.is_char('(')): ttt = brr.end_token brr = (None) continue elif ((ttt.is_value("ИМЕНОВАТЬ", None) or ttt.is_value("ДАЛЬНЕЙШИЙ", None) or ttt.is_value("ДАЛЕЕ", None)) or ttt.is_value("ТЕКСТ", None)): has_named = True elif ((ttt.is_comma or ttt.morph.class0_.is_preposition or ttt.is_hiphen) or ttt.is_char(':')): pass elif (brr is not None or has_named): if (BracketHelper.can_be_start_of_sequence(ttt, True, False)): ttt = ttt.next0_ npt = NounPhraseHelper.try_parse(ttt, NounPhraseParseAttr.REFERENTCANBENOUN, 0, None) typ22 = None if (npt is not None): ttt = npt.end_token if (npt.end_token.is_value("ДОГОВОР", None)): continue else: ttok = None if (isinstance(ttt, MetaToken)): ttok = ParticipantToken.M_ONTOLOGY.try_parse(ttt.begin_token, TerminParseAttr.NO) if (ttok is not None): typ22 = ttok.termin.canonic_text elif (has_named and ttt.morph.class0_.is_adjective): typ22 = ttt.get_normal_case_text(MorphClass.ADJECTIVE, MorphNumber.UNDEFINED, MorphGender.UNDEFINED, False) elif (brr is not None): continue else: break if (BracketHelper.can_be_end_of_sequence(ttt.next0_, True, None, False)): ttt = ttt.next0_ if (brr is not None): if (ttt.next0_ is None): ttt = brr.end_token continue ttt = ttt.next0_ if (not has_named and typ22 is None): if (ParticipantToken.M_ONTOLOGY.try_parse(npt.begin_token, TerminParseAttr.NO) is None): break re = ParticipantToken._new1573(t, ttt, ParticipantToken.Kinds.NAMEDAS) re.typ = (Utils.ifNotNull(typ22, npt.get_normal_case_text(None, MorphNumber.SINGULAR, MorphGender.UNDEFINED, False))) re.parts = list() re.parts.append(r1) return re elif ((ttt.is_value("ЗАРЕГИСТРИРОВАННЫЙ", None) or ttt.is_value("КАЧЕСТВО", None) or ttt.is_value("ПРОЖИВАЮЩИЙ", None)) or ttt.is_value("ЗАРЕГ", None)): pass elif (ttt.get_referent() == r1): pass elif ((isinstance(ttt.get_referent(), PersonIdentityReferent)) or (isinstance(ttt.get_referent(), AddressReferent))): if (add_refs is None): add_refs = list() add_refs.append(ttt.get_referent()) else: prr = ttt.kit.process_referent("PERSONPROPERTY", ttt) if (prr is not None): ttt = prr.end_token continue if (isinstance(ttt.get_referent(), GeoReferent)): continue npt = NounPhraseHelper.try_parse(ttt, NounPhraseParseAttr.NO, 0, None) if (npt is not None): if ((npt.noun.is_value("МЕСТО", None) or npt.noun.is_value("ЖИТЕЛЬСТВО", None) or npt.noun.is_value("ПРЕДПРИНИМАТЕЛЬ", None)) or npt.noun.is_value("ПОЛ", None) or npt.noun.is_value("РОЖДЕНИЕ", None)): ttt = npt.end_token continue if (ttt.is_newline_before): break if (ttt.length_char < 3): continue mc = ttt.get_morph_class_in_dictionary() if (mc.is_adverb or mc.is_adjective): continue if (ttt.chars.is_all_upper): continue break if (end_side is not None or ((add_refs is not None and t.previous is not None and t.previous.is_and))): re = ParticipantToken._new1573(t, Utils.ifNotNull(end_side, t), ParticipantToken.Kinds.NAMEDAS) re.typ = (None) re.parts = list() re.parts.append(r1) if (add_refs is not None): re.parts.extend(add_refs) return re too = ParticipantToken.M_ONTOLOGY.try_parse(t, TerminParseAttr.NO) if (too is not None): if ((isinstance(t.previous, TextToken)) and t.previous.is_value("ЛИЦО", None)): too = (None) if (too is not None and too.termin.tag is not None and too.termin.canonic_text != "СТОРОНА"): tt1 = too.end_token.next0_ if (tt1 is not None): if (tt1.is_hiphen or tt1.is_char(':')): tt1 = tt1.next0_ if (isinstance(tt1, ReferentToken)): r1 = tt1.get_referent() if ((isinstance(r1, PersonReferent)) or (isinstance(r1, OrganizationReferent))): re = ParticipantToken._new1573(t, tt1, ParticipantToken.Kinds.NAMEDAS) re.typ = too.termin.canonic_text re.parts = list() re.parts.append(r1) return re add_typ1 = (None if p1 is None else p1.typ) add_typ2 = (None if p2 is None else p2.typ) if (BracketHelper.can_be_start_of_sequence(tt, False, False) and tt.next0_ is not None): br = True tt = tt.next0_ term1 = None term2 = None if (add_typ1 is not None and add_typ1.find(' ') > 0 and not add_typ1.startswith("СТОРОНА")): term1 = Termin(add_typ1) if (add_typ2 is not None and add_typ2.find(' ') > 0 and not add_typ2.startswith("СТОРОНА")): term2 = Termin(add_typ2) named = False typ_ = None t1 = None t0 = tt first_pass3285 = True while True: if first_pass3285: first_pass3285 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.morph.class0_.is_preposition and typ_ is not None): continue if (tt.is_char_of("(:)") or tt.is_hiphen): continue if (tt.is_table_control_char): break if (tt.is_newline_before and tt != t0): if (isinstance(tt, NumberToken)): break if ((isinstance(tt, TextToken)) and (isinstance(tt.previous, TextToken))): if (tt.previous.is_value(tt.term, None)): break if (BracketHelper.is_bracket(tt, False)): continue tok = (ParticipantToken.M_ONTOLOGY.try_parse(tt, TerminParseAttr.NO) if ParticipantToken.M_ONTOLOGY is not None else None) if (tok is not None and (isinstance(tt.previous, TextToken))): if (tt.previous.is_value("ЛИЦО", None)): return None if (tok is None): if (add_typ1 is not None and ((MiscHelper.is_not_more_than_one_error(add_typ1, tt) or (((isinstance(tt, MetaToken)) and tt.begin_token.is_value(add_typ1, None)))))): if (typ_ is not None): if (not ParticipantToken.__is_types_equal(add_typ1, typ_)): break typ_ = add_typ1 t1 = tt continue if (add_typ2 is not None and ((MiscHelper.is_not_more_than_one_error(add_typ2, tt) or (((isinstance(tt, MetaToken)) and tt.begin_token.is_value(add_typ2, None)))))): if (typ_ is not None): if (not ParticipantToken.__is_types_equal(add_typ2, typ_)): break typ_ = add_typ2 t1 = tt continue if (tt.chars.is_letter): if (term1 is not None): tok1 = term1.try_parse(tt, TerminParseAttr.NO) if (tok1 is not None): if (typ_ is not None): if (not ParticipantToken.__is_types_equal(add_typ1, typ_)): break typ_ = add_typ1 tt = tok1.end_token t1 = tt continue if (term2 is not None): tok2 = term2.try_parse(tt, TerminParseAttr.NO) if (tok2 is not None): if (typ_ is not None): if (not ParticipantToken.__is_types_equal(add_typ2, typ_)): break typ_ = add_typ2 tt = tok2.end_token t1 = tt continue if (named and tt.get_morph_class_in_dictionary().is_noun): if (not tt.chars.is_all_lower or BracketHelper.is_bracket(tt.previous, True)): if (DecreeToken.is_keyword(tt, False) is None): val = tt.get_normal_case_text(MorphClass.NOUN, MorphNumber.SINGULAR, MorphGender.UNDEFINED, False) if (typ_ is not None): if (not ParticipantToken.__is_types_equal(typ_, val)): break typ_ = val t1 = tt continue if (named and typ_ is None and is_contract): if ((isinstance(tt, TextToken)) and tt.chars.is_cyrillic_letter and tt.chars.is_capital_upper): dc = tt.get_morph_class_in_dictionary() if (dc.is_undefined or dc.is_noun): dt = DecreeToken.try_attach(tt, None, False) ok = True if (dt is not None): ok = False elif (tt.is_value("СТОРОНА", None)): ok = False if (ok): typ_ = tt.lemma t1 = tt continue if (dc.is_adjective): npt = NounPhraseHelper.try_parse(tt, NounPhraseParseAttr.NO, 0, None) if (npt is not None and len(npt.adjectives) > 0 and npt.noun.get_morph_class_in_dictionary().is_noun): typ_ = npt.get_normal_case_text(None, MorphNumber.SINGULAR, MorphGender.UNDEFINED, False) t1 = npt.end_token continue if (tt == t): break if ((isinstance(tt, NumberToken)) or tt.is_char('.')): break if (tt.length_char < 4): if (typ_ is not None): continue break if (tok.termin.tag is None): named = True else: if (typ_ is not None): break if (tok.termin.canonic_text == "СТОРОНА"): tt1 = tt.next0_ if (tt1 is not None and tt1.is_hiphen): tt1 = tt1.next0_ if (not (isinstance(tt1, NumberToken))): break if (tt1.is_newline_before): break typ_ = "{0} {1}".format(tok.termin.canonic_text, tt1.value) t1 = tt1 else: typ_ = tok.termin.canonic_text t1 = tok.end_token break tt = tok.end_token if (typ_ is None): return None if (not named and t1 != t and not typ_.startswith("СТОРОНА")): if (not ParticipantToken.__is_types_equal(typ_, add_typ1) and not ParticipantToken.__is_types_equal(typ_, add_typ2)): return None if (BracketHelper.can_be_end_of_sequence(t1.next0_, False, None, False)): t1 = t1.next0_ if (not t.is_whitespace_before and BracketHelper.can_be_start_of_sequence(t.previous, False, False)): t = t.previous elif (BracketHelper.can_be_start_of_sequence(t, False, False) and BracketHelper.can_be_end_of_sequence(t1.next0_, True, t, True)): t1 = t1.next0_ if (br and t1.next0_ is not None and BracketHelper.can_be_end_of_sequence(t1.next0_, False, None, False)): t1 = t1.next0_ res = ParticipantToken._new1578(t, t1, (ParticipantToken.Kinds.NAMEDAS if named else ParticipantToken.Kinds.PURE), typ_) if (t.is_char(':')): res.begin_token = t.next0_ return res
def tryAttach(t : 'Token') -> 'TitleItemToken': tt = Utils.asObjectOrNull(t, TextToken) if (tt is not None): t1 = tt if (tt.term == "ТЕМА"): tit = TitleItemToken.tryAttach(tt.next0_) if (tit is not None and tit.typ == TitleItemToken.Types.TYP): t1 = tit.end_token if (t1.next0_ is not None and t1.next0_.isChar(':')): t1 = t1.next0_ return TitleItemToken._new2501(t, t1, TitleItemToken.Types.TYPANDTHEME, tit.value) if (tt.next0_ is not None and tt.next0_.isChar(':')): t1 = tt.next0_ return TitleItemToken(tt, t1, TitleItemToken.Types.THEME) if (tt.term == "ПО" or tt.term == "НА"): if (tt.next0_ is not None and tt.next0_.isValue("ТЕМА", None)): t1 = tt.next0_ if (t1.next0_ is not None and t1.next0_.isChar(':')): t1 = t1.next0_ return TitleItemToken(tt, t1, TitleItemToken.Types.THEME) if (tt.term == "ПЕРЕВОД" or tt.term == "ПЕР"): tt2 = tt.next0_ if (tt2 is not None and tt2.isChar('.')): tt2 = tt2.next0_ if (isinstance(tt2, TextToken)): if ((tt2).term == "C" or (tt2).term == "С"): tt2 = tt2.next0_ if (isinstance(tt2, TextToken)): return TitleItemToken(t, tt2, TitleItemToken.Types.TRANSLATE) if (tt.term == "СЕКЦИЯ" or tt.term == "SECTION" or tt.term == "СЕКЦІЯ"): t1 = tt.next0_ if (t1 is not None and t1.isChar(':')): t1 = t1.next0_ br = BracketHelper.tryParse(t1, BracketParseAttr.NO, 100) if (br is not None): t1 = br.end_token elif (t1 != tt.next0_): while t1 is not None: if (t1.is_newline_after): break t1 = t1.next0_ if (t1 is None): return None if (t1 != tt.next0_): return TitleItemToken(tt, t1, TitleItemToken.Types.DUST) t1 = (None) if (tt.isValue("СПЕЦИАЛЬНОСТЬ", "СПЕЦІАЛЬНІСТЬ")): t1 = tt.next0_ elif (tt.morph.class0_.is_preposition and tt.next0_ is not None and tt.next0_.isValue("СПЕЦИАЛЬНОСТЬ", "СПЕЦІАЛЬНІСТЬ")): t1 = tt.next0_.next0_ elif (tt.isChar('/') and tt.is_newline_before): t1 = tt.next0_ if (t1 is not None): if (t1.isCharOf(":") or t1.is_hiphen): t1 = t1.next0_ spec = TitleItemToken.__tryAttachSpeciality(t1, True) if (spec is not None): spec.begin_token = t return spec sss = TitleItemToken.__tryAttachSpeciality(t, False) if (sss is not None): return sss if (isinstance(t, ReferentToken)): return None npt = NounPhraseHelper.tryParse(t, NounPhraseParseAttr.NO, 0) if (npt is not None): s = npt.getNormalCaseText(None, False, MorphGender.UNDEFINED, False) tok = TitleItemToken.M_TERMINS.tryParse(npt.end_token, TerminParseAttr.NO) if (tok is not None): ty = Utils.valToEnum(tok.termin.tag, TitleItemToken.Types) if (ty == TitleItemToken.Types.TYP): tit = TitleItemToken.tryAttach(tok.end_token.next0_) if (tit is not None and tit.typ == TitleItemToken.Types.THEME): return TitleItemToken._new2501(npt.begin_token, tit.end_token, TitleItemToken.Types.TYPANDTHEME, s) if (s == "РАБОТА" or s == "РОБОТА" or s == "ПРОЕКТ"): return None t1 = tok.end_token if (s == "ДИССЕРТАЦИЯ" or s == "ДИСЕРТАЦІЯ"): err = 0 ttt = t1.next0_ first_pass3125 = True while True: if first_pass3125: first_pass3125 = False else: ttt = ttt.next0_ if (not (ttt is not None)): break if (ttt.morph.class0_.is_preposition): continue if (ttt.isValue("СОИСКАНИЕ", "")): continue npt1 = NounPhraseHelper.tryParse(ttt, NounPhraseParseAttr.NO, 0) if (npt1 is not None and npt1.noun.isValue("СТЕПЕНЬ", "СТУПІНЬ")): ttt = npt1.end_token t1 = ttt continue rt = t1.kit.processReferent("PERSON", ttt) if (rt is not None and (isinstance(rt.referent, PersonPropertyReferent))): ppr = Utils.asObjectOrNull(rt.referent, PersonPropertyReferent) if (ppr.name == "доктор наук"): t1 = rt.end_token s = "ДОКТОРСКАЯ ДИССЕРТАЦИЯ" break elif (ppr.name == "кандидат наук"): t1 = rt.end_token s = "КАНДИДАТСКАЯ ДИССЕРТАЦИЯ" break elif (ppr.name == "магистр"): t1 = rt.end_token s = "МАГИСТЕРСКАЯ ДИССЕРТАЦИЯ" break if (ttt.isValue("ДОКТОР", None) or ttt.isValue("КАНДИДАТ", None) or ttt.isValue("МАГИСТР", "МАГІСТР")): t1 = ttt npt1 = NounPhraseHelper.tryParse(ttt.next0_, NounPhraseParseAttr.NO, 0) if (npt1 is not None and npt1.end_token.isValue("НАУК", None)): t1 = npt1.end_token s = ("МАГИСТЕРСКАЯ ДИССЕРТАЦИЯ" if ttt.isValue("МАГИСТР", "МАГІСТР") else ("ДОКТОРСКАЯ ДИССЕРТАЦИЯ" if ttt.isValue("ДОКТОР", None) else "КАНДИДАТСКАЯ ДИССЕРТАЦИЯ")) break err += 1 if ((err) > 3): break if (t1.next0_ is not None and t1.next0_.isChar('.')): t1 = t1.next0_ if (s.endswith("ОТЧЕТ") and t1.next0_ is not None and t1.next0_.isValue("О", None)): npt1 = NounPhraseHelper.tryParse(t1.next0_, NounPhraseParseAttr.PARSEPREPOSITION, 0) if (npt1 is not None and npt1.morph.case_.is_prepositional): t1 = npt1.end_token return TitleItemToken._new2501(npt.begin_token, t1, ty, s) tok1 = TitleItemToken.M_TERMINS.tryParse(t, TerminParseAttr.NO) if (tok1 is not None): t1 = tok1.end_token re = TitleItemToken(tok1.begin_token, t1, Utils.valToEnum(tok1.termin.tag, TitleItemToken.Types)) return re if (BracketHelper.canBeStartOfSequence(t, False, False)): tok1 = TitleItemToken.M_TERMINS.tryParse(t.next0_, TerminParseAttr.NO) if (tok1 is not None and BracketHelper.canBeEndOfSequence(tok1.end_token.next0_, False, None, False)): t1 = tok1.end_token.next0_ return TitleItemToken(tok1.begin_token, t1, Utils.valToEnum(tok1.termin.tag, TitleItemToken.Types)) return None
def __TryParse(t: 'Token', prev: 'WeaponItemToken', after_conj: bool, attach_high: bool = False) -> 'WeaponItemToken': if (t is None): return None if (BracketHelper.isBracket(t, True)): wit = WeaponItemToken.__TryParse(t.next0_, prev, after_conj, attach_high) if (wit is not None): if (wit.end_token.next0_ is None): wit.begin_token = t return wit if (BracketHelper.isBracket(wit.end_token.next0_, True)): wit.begin_token = t wit.end_token = wit.end_token.next0_ return wit tok = WeaponItemToken.M_ONTOLOGY.tryParse(t, TerminParseAttr.NO) if (tok is not None): res = WeaponItemToken(t, tok.end_token) res.typ = (Utils.valToEnum(tok.termin.tag, WeaponItemToken.Typs)) if (res.typ == WeaponItemToken.Typs.NOUN): res.value = tok.termin.canonic_text if (tok.termin.tag2 is not None): res.is_doubt = True tt = res.end_token.next0_ first_pass3156 = True while True: if first_pass3156: first_pass3156 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.whitespaces_before_count > 2): break wit = WeaponItemToken.__TryParse(tt, None, False, False) if (wit is not None): if (wit.typ == WeaponItemToken.Typs.BRAND): res.__inner_tokens.append(wit) tt = wit.end_token res.end_token = tt continue break if (not ((isinstance(tt, TextToken)))): break mc = tt.getMorphClassInDictionary() if (mc == MorphClass.ADJECTIVE): if (res.alt_value is None): res.alt_value = res.value if (res.alt_value.endswith(res.value)): res.alt_value = res.alt_value[0:0 + len(res.alt_value) - len(res.value)] res.alt_value = "{0}{1} {2}".format( res.alt_value, (tt).term, res.value) res.end_token = tt continue break return res if (res.typ == WeaponItemToken.Typs.BRAND or res.typ == WeaponItemToken.Typs.NAME): res.value = tok.termin.canonic_text return res if (res.typ == WeaponItemToken.Typs.MODEL): res.value = tok.termin.canonic_text if (isinstance(tok.termin.tag2, list)): li = Utils.asObjectOrNull(tok.termin.tag2, list) for to in li: wit = WeaponItemToken._new2600( t, tok.end_token, Utils.valToEnum(to.tag, WeaponItemToken.Typs), to.canonic_text, tok.begin_token == tok.end_token) res.__inner_tokens.append(wit) if (to.additional_vars is not None and len(to.additional_vars) > 0): wit.alt_value = to.additional_vars[0].canonic_text res.__correctModel() return res nnn = MiscHelper.checkNumberPrefix(t) if (nnn is not None): tit = TransItemToken._attachNumber(nnn, True) if (tit is not None): res = WeaponItemToken._new2601(t, tit.end_token, WeaponItemToken.Typs.NUMBER) res.value = tit.value res.alt_value = tit.alt_value return res if (((isinstance(t, TextToken)) and t.chars.is_letter and t.chars.is_all_upper) and (t.length_char < 4)): if ((t.next0_ is not None and ((t.next0_.is_hiphen or t.next0_.isChar('.'))) and (t.next0_.whitespaces_after_count < 2)) and (isinstance(t.next0_.next0_, NumberToken))): res = WeaponItemToken._new2602(t, t.next0_, WeaponItemToken.Typs.MODEL, True) res.value = (t).term res.__correctModel() return res if ((isinstance(t.next0_, NumberToken)) and not t.is_whitespace_after): res = WeaponItemToken._new2602(t, t, WeaponItemToken.Typs.MODEL, True) res.value = (t).term res.__correctModel() return res if ((t).term == "СП" and (t.whitespaces_after_count < 3) and (isinstance(t.next0_, TextToken))): pp = WeaponItemToken.__TryParse(t.next0_, None, False, False) if (pp is not None and ((pp.typ == WeaponItemToken.Typs.MODEL or pp.typ == WeaponItemToken.Typs.BRAND))): res = WeaponItemToken._new2601(t, t, WeaponItemToken.Typs.NOUN) res.value = "ПИСТОЛЕТ" res.alt_value = "СЛУЖЕБНЫЙ ПИСТОЛЕТ" return res if (((isinstance(t, TextToken)) and t.chars.is_letter and not t.chars.is_all_lower) and t.length_char > 2): ok = False if (prev is not None and ((prev.typ == WeaponItemToken.Typs.NOUN or prev.typ == WeaponItemToken.Typs.MODEL or prev.typ == WeaponItemToken.Typs.BRAND))): ok = True elif (prev is None and t.previous is not None and t.previous.is_comma_and): ok = True if (ok): res = WeaponItemToken._new2602(t, t, WeaponItemToken.Typs.NAME, True) res.value = (t).term if ((t.next0_ is not None and t.next0_.is_hiphen and (isinstance(t.next0_.next0_, TextToken))) and t.next0_.next0_.chars == t.chars): res.value = "{0}-{1}".format(res.value, (t.next0_.next0_).term) res.end_token = t.next0_.next0_ if (prev is not None and prev.typ == WeaponItemToken.Typs.NOUN): res.typ = WeaponItemToken.Typs.BRAND if (res.end_token.next0_ is not None and res.end_token.next0_.is_hiphen and (isinstance(res.end_token.next0_.next0_, NumberToken))): res.typ = WeaponItemToken.Typs.MODEL res.__correctModel() elif (not res.end_token.is_whitespace_after and (isinstance(res.end_token.next0_, NumberToken))): res.typ = WeaponItemToken.Typs.MODEL res.__correctModel() return res return None
def try_parse_list(t : 'Token', max_count : int=10) -> typing.List['WeaponItemToken']: tr = WeaponItemToken.try_parse(t, None, False, False) if (tr is None): return None if (tr.typ == WeaponItemToken.Typs.CLASS or tr.typ == WeaponItemToken.Typs.DATE): return None tr0 = tr res = list() if (len(tr.__inner_tokens) > 0): res.extend(tr.__inner_tokens) if (res[0].begin_char > tr.begin_char): res[0].begin_token = tr.begin_token res.append(tr) t = tr.end_token.next0_ if (tr.typ == WeaponItemToken.Typs.NOUN): while t is not None: if (t.is_char(':') or t.is_hiphen): pass else: break t = t.next0_ and_conj = False first_pass3425 = True while True: if first_pass3425: first_pass3425 = False else: t = t.next0_ if (not (t is not None)): break if (max_count > 0 and len(res) >= max_count): break if (t.is_char(':')): continue if (tr0.typ == WeaponItemToken.Typs.NOUN): if (t.is_hiphen and t.next0_ is not None): t = t.next0_ tr = WeaponItemToken.try_parse(t, tr0, False, False) if (tr is None): if (BracketHelper.can_be_end_of_sequence(t, True, None, False) and t.next0_ is not None): if (tr0.typ == WeaponItemToken.Typs.MODEL or tr0.typ == WeaponItemToken.Typs.BRAND): tt1 = t.next0_ if (tt1 is not None and tt1.is_comma): tt1 = tt1.next0_ tr = WeaponItemToken.try_parse(tt1, tr0, False, False) if (tr is None and (isinstance(t, ReferentToken))): rt = Utils.asObjectOrNull(t, ReferentToken) if (rt.begin_token == rt.end_token and (isinstance(rt.begin_token, TextToken))): tr = WeaponItemToken.try_parse(rt.begin_token, tr0, False, False) if (tr is not None and tr.begin_token == tr.end_token): tr.begin_token = tr.end_token = t if (tr is None and t.is_char('(')): br = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (br is not None): tt = br.end_token.next0_ if (tt is not None and tt.is_comma): tt = tt.next0_ tr = WeaponItemToken.try_parse(tt, tr0, False, False) if (tr is not None and tr.typ == WeaponItemToken.Typs.NUMBER): pass else: tr = (None) if (tr is None and t.is_hiphen): if (tr0.typ == WeaponItemToken.Typs.BRAND or tr0.typ == WeaponItemToken.Typs.MODEL): tr = WeaponItemToken.try_parse(t.next0_, tr0, False, False) if (tr is None and t.is_comma): if ((tr0.typ == WeaponItemToken.Typs.NAME or tr0.typ == WeaponItemToken.Typs.BRAND or tr0.typ == WeaponItemToken.Typs.MODEL) or tr0.typ == WeaponItemToken.Typs.CLASS or tr0.typ == WeaponItemToken.Typs.DATE): tr = WeaponItemToken.try_parse(t.next0_, tr0, True, False) if (tr is not None): if (tr.typ == WeaponItemToken.Typs.NUMBER): pass else: tr = (None) if (tr is None): break if (t.is_newline_before): if (tr.typ != WeaponItemToken.Typs.NUMBER): break if (len(tr.__inner_tokens) > 0): res.extend(tr.__inner_tokens) res.append(tr) tr0 = tr t = tr.end_token if (and_conj): break i = 0 while i < (len(res) - 1): if (res[i].typ == WeaponItemToken.Typs.MODEL and res[i + 1].typ == WeaponItemToken.Typs.MODEL): res[i].end_token = res[i + 1].end_token res[i].value = "{0}{1}{2}".format(res[i].value, ('-' if res[i].end_token.next0_ is not None and res[i].end_token.next0_.is_hiphen else ' '), res[i + 1].value) del res[i + 1] i -= 1 i += 1 return res
def getNameEx(begin: 'Token', end: 'Token', cla: 'MorphClass', mc: 'MorphCase', gender: 'MorphGender' = MorphGender.UNDEFINED, ignore_brackets_and_hiphens: bool = False, ignore_geo_referent: bool = False) -> str: if (end is None or begin is None): return None if (begin.end_char > end.begin_char and begin != end): return None res = io.StringIO() prefix = None t = begin first_pass2809 = True while True: if first_pass2809: first_pass2809 = False else: t = t.next0_ if (not (t is not None and t.end_char <= end.end_char)): break if (res.tell() > 1000): break if (t.is_table_control_char): continue if (ignore_brackets_and_hiphens): if (BracketHelper.isBracket(t, False)): if (t == end): break if (t.isCharOf("(<[")): br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100) if (br is not None and br.end_char <= end.end_char): tmp = ProperNameHelper.getNameEx( br.begin_token.next0_, br.end_token.previous, MorphClass.UNDEFINED, MorphCase.UNDEFINED, MorphGender.UNDEFINED, ignore_brackets_and_hiphens, False) if (tmp is not None): if ((br.end_char == end.end_char and br.begin_token.next0_ == br.end_token.previous and not br.begin_token.next0_.chars.is_letter) and not ((isinstance( br.begin_token.next0_, ReferentToken)))): pass else: print(" {0}{1}{2}".format( t.getSourceText(), tmp, br.end_token.getSourceText()), end="", file=res, flush=True) t = br.end_token continue if (t.is_hiphen): if (t == end): break elif (t.is_whitespace_before or t.is_whitespace_after): continue tt = Utils.asObjectOrNull(t, TextToken) if (tt is not None): if (not ignore_brackets_and_hiphens): if ((tt.next0_ is not None and tt.next0_.is_hiphen and (isinstance(tt.next0_.next0_, TextToken))) and tt != end and tt.next0_ != end): if (prefix is None): prefix = tt.term else: prefix = "{0}-{1}".format(prefix, tt.term) t = tt.next0_ if (t == end): break else: continue s = None if (cla.value != (0) or not mc.is_undefined or gender != MorphGender.UNDEFINED): for wff in tt.morph.items: wf = Utils.asObjectOrNull(wff, MorphWordForm) if (wf is None): continue if (cla.value != (0)): if ((((wf.class0_.value) & (cla.value))) == 0): continue if (not mc.is_undefined): if (((wf.case_) & mc).is_undefined): continue if (gender != MorphGender.UNDEFINED): if ((((wf.gender) & (gender))) == (MorphGender.UNDEFINED)): continue if (s is None or wf.normal_case == tt.term): s = wf.normal_case if (s is None and gender != MorphGender.UNDEFINED): for wff in tt.morph.items: wf = Utils.asObjectOrNull(wff, MorphWordForm) if (wf is None): continue if (cla.value != (0)): if ((((wf.class0_.value) & (cla.value))) == 0): continue if (not mc.is_undefined): if (((wf.case_) & mc).is_undefined): continue if (s is None or wf.normal_case == tt.term): s = wf.normal_case if (s is None): s = tt.term if (tt.chars.is_last_lower and tt.length_char > 2): s = tt.getSourceText() for i in range(len(s) - 1, -1, -1): if (str.isupper(s[i])): s = s[0:0 + i + 1] break if (prefix is not None): delim = "-" if (ignore_brackets_and_hiphens): delim = " " s = "{0}{1}{2}".format(prefix, delim, s) prefix = (None) if (res.tell() > 0 and len(s) > 0): if (str.isalnum(s[0])): ch0 = Utils.getCharAtStringIO(res, res.tell() - 1) if (ch0 == '-'): pass else: print(' ', end="", file=res) elif (not ignore_brackets_and_hiphens and BracketHelper.canBeStartOfSequence( tt, False, False)): print(' ', end="", file=res) print(s, end="", file=res) elif (isinstance(t, NumberToken)): if (res.tell() > 0): if (not t.is_whitespace_before and Utils.getCharAtStringIO( res, res.tell() - 1) == '-'): pass else: print(' ', end="", file=res) nt = Utils.asObjectOrNull(t, NumberToken) if ((t.morph.class0_.is_adjective and nt.typ == NumberSpellingType.WORDS and nt.begin_token == nt.end_token) and (isinstance(nt.begin_token, TextToken))): print((nt.begin_token).term, end="", file=res) else: print(nt.value, end="", file=res) elif (isinstance(t, MetaToken)): if ((ignore_geo_referent and t != begin and t.getReferent() is not None) and t.getReferent().type_name == "GEO"): continue s = ProperNameHelper.getNameEx( (t).begin_token, (t).end_token, cla, mc, gender, ignore_brackets_and_hiphens, ignore_geo_referent) if (not Utils.isNullOrEmpty(s)): if (res.tell() > 0): if (not t.is_whitespace_before and Utils.getCharAtStringIO( res, res.tell() - 1) == '-'): pass else: print(' ', end="", file=res) print(s, end="", file=res) if (t == end): break if (res.tell() == 0): return None return Utils.toStringStringIO(res)
def try_attach(t: 'Token') -> 'ParenthesisToken': if (t is None): return None tok = ParenthesisToken.__m_termins.try_parse(t, TerminParseAttr.NO) if (tok is not None): res = ParenthesisToken(t, tok.end_token) return res if (not (isinstance(t, TextToken))): return None mc = t.get_morph_class_in_dictionary() ok = False t1 = None if (mc.is_adverb): ok = True elif (mc.is_adjective): if (t.morph.contains_attr("сравн.", None) and t.morph.contains_attr("кач.прил.", None)): ok = True if (ok and t.next0_ is not None): if (t.next0_.is_char(',')): return ParenthesisToken(t, t) t1 = t.next0_ if (t1.get_morph_class_in_dictionary() == MorphClass.VERB): if (t1.morph.contains_attr("н.вр.", None) and t1.morph.contains_attr("нес.в.", None) and t1.morph.contains_attr("дейст.з.", None)): return ParenthesisToken(t, t1) t1 = (None) if ((t.is_value("В", None) and t.next0_ is not None and t.next0_.is_value("СООТВЕТСТВИЕ", None)) and t.next0_.next0_ is not None and t.next0_.next0_.morph.class0_.is_preposition): t1 = t.next0_.next0_.next0_ elif (t.is_value("СОГЛАСНО", None)): t1 = t.next0_ elif (t.is_value("В", None) and t.next0_ is not None): if (t.next0_.is_value("СИЛА", None)): t1 = t.next0_.next0_ elif (t.next0_.morph.class0_.is_adjective or t.next0_.morph.class0_.is_pronoun): npt = NounPhraseHelper.try_parse(t.next0_, NounPhraseParseAttr.NO, 0, None) if (npt is not None): if (npt.noun.is_value("ВИД", None) or npt.noun.is_value("СЛУЧАЙ", None) or npt.noun.is_value("СФЕРА", None)): return ParenthesisToken(t, npt.end_token) if (t1 is not None): if (t1.next0_ is not None): npt1 = NounPhraseHelper.try_parse(t1, NounPhraseParseAttr.NO, 0, None) if (npt1 is not None): if (npt1.noun.is_value("НОРМА", None) or npt1.noun.is_value("ПОЛОЖЕНИЕ", None) or npt1.noun.is_value("УКАЗАНИЕ", None)): t1 = npt1.end_token.next0_ r = t1.get_referent() if (r is not None): res = ParenthesisToken._new1115(t, t1, r) if (t1.next0_ is not None and t1.next0_.is_comma): sila = False ttt = t1.next0_.next0_ first_pass3133 = True while True: if first_pass3133: first_pass3133 = False else: ttt = ttt.next0_ if (not (ttt is not None)): break if (ttt.is_value("СИЛА", None) or ttt.is_value("ДЕЙСТВИЕ", None)): sila = True continue if (ttt.is_comma): if (sila): res.end_token = ttt.previous break if (BracketHelper.can_be_start_of_sequence( ttt, False, False)): break return res npt = NounPhraseHelper.try_parse(t1, NounPhraseParseAttr.NO, 0, None) if (npt is not None): return ParenthesisToken(t, npt.end_token) tt = t if (tt.is_value("НЕ", None) and t is not None): tt = tt.next0_ if (tt.morph.class0_.is_preposition and tt is not None): tt = tt.next0_ npt1 = NounPhraseHelper.try_parse(tt, NounPhraseParseAttr.NO, 0, None) if (npt1 is not None): tt = npt1.end_token if (tt.next0_ is not None and tt.next0_.is_comma): return ParenthesisToken(t, tt.next0_) if (npt1.noun.is_value("ОЧЕРЕДЬ", None)): return ParenthesisToken(t, tt) if (t.is_value("ВЕДЬ", None)): return ParenthesisToken(t, t) return None
def __getNameWithoutBrackets(begin: 'Token', end: 'Token', normalize_first_noun_group: bool = False, normal_first_group_single: bool = False, ignore_geo_referent: bool = False) -> str: """ Получить строковое значение между токенами, при этом исключая кавычки и скобки Args: begin(Token): начальный токен end(Token): конечный токен normalize_first_noun_group(bool): нормализовывать ли первую именную группу (именит. падеж) normal_first_group_single(bool): приводить ли к единственному числу первую именную группу ignore_geo_referent(bool): игнорировать внутри географические сущности """ res = None if (BracketHelper.canBeStartOfSequence(begin, False, False) and BracketHelper.canBeEndOfSequence(end, False, begin, False)): begin = begin.next0_ end = end.previous if (normalize_first_noun_group and not begin.morph.class0_.is_preposition): npt = NounPhraseHelper.tryParse( begin, NounPhraseParseAttr.REFERENTCANBENOUN, 0) if (npt is not None): if (npt.noun.getMorphClassInDictionary().is_undefined and len(npt.adjectives) == 0): npt = (None) if (npt is not None and npt.end_token.end_char > end.end_char): npt = (None) if (npt is not None): res = npt.getNormalCaseText(None, normal_first_group_single, MorphGender.UNDEFINED, False) te = npt.end_token.next0_ if (((te is not None and te.next0_ is not None and te.is_comma) and (isinstance(te.next0_, TextToken)) and te.next0_.end_char <= end.end_char) and te.next0_.morph.class0_.is_verb and te.next0_.morph.class0_.is_adjective): for it in te.next0_.morph.items: if (it.gender == npt.morph.gender or (((it.gender) & (npt.morph.gender))) != (MorphGender.UNDEFINED)): if (not ( (it.case_) & npt.morph.case_).is_undefined): if (it.number == npt.morph.number or (((it.number) & (npt.morph.number))) != (MorphNumber.UNDEFINED)): var = (te.next0_).term if (isinstance(it, MorphWordForm)): var = (it).normal_case bi = MorphBaseInfo._new549( MorphClass.ADJECTIVE, npt.morph.gender, npt.morph.number, npt.morph.language) var = Morphology.getWordform(var, bi) if (var is not None): res = "{0}, {1}".format(res, var) te = te.next0_.next0_ break if (te is not None and te.end_char <= end.end_char): s = ProperNameHelper.getNameEx(te, end, MorphClass.UNDEFINED, MorphCase.UNDEFINED, MorphGender.UNDEFINED, True, ignore_geo_referent) if (not Utils.isNullOrEmpty(s)): if (not str.isalnum(s[0])): res = "{0}{1}".format(res, s) else: res = "{0} {1}".format(res, s) elif ((isinstance(begin, TextToken)) and begin.chars.is_cyrillic_letter): mm = begin.getMorphClassInDictionary() if (not mm.is_undefined): res = begin.getNormalCaseText(mm, False, MorphGender.UNDEFINED, False) if (begin.end_char < end.end_char): res = "{0} {1}".format( res, ProperNameHelper.getNameEx(begin.next0_, end, MorphClass.UNDEFINED, MorphCase.UNDEFINED, MorphGender.UNDEFINED, True, False)) if (res is None): res = ProperNameHelper.getNameEx(begin, end, MorphClass.UNDEFINED, MorphCase.UNDEFINED, MorphGender.UNDEFINED, True, ignore_geo_referent) if (not Utils.isNullOrEmpty(res)): k = 0 i = len(res) - 1 while i >= 0: if (res[i] == '*' or Utils.isWhitespace(res[i])): pass else: break i -= 1 k += 1 if (k > 0): if (k == len(res)): return None res = res[0:0 + len(res) - k] return res
def tryParseNumberWithPostfix(t: 'Token') -> 'NumberExToken': """ Выделение стандартных мер, типа: 10 кв.м. """ if (t is None): return None t0 = t is_dollar = None if (t.length_char == 1 and t.next0_ is not None): is_dollar = NumberHelper._isMoneyChar(t) if ((is_dollar) is not None): t = t.next0_ nt = Utils.asObjectOrNull(t, NumberToken) if (nt is None): if ((not ((isinstance(t.previous, NumberToken))) and t.isChar('(') and (isinstance(t.next0_, NumberToken))) and t.next0_.next0_ is not None and t.next0_.next0_.isChar(')')): toks1 = NumberExHelper._m_postfixes.tryParse( t.next0_.next0_.next0_, TerminParseAttr.NO) if (toks1 is not None and (Utils.valToEnum(toks1.termin.tag, NumberExType)) == NumberExType.MONEY): nt0 = Utils.asObjectOrNull(t.next0_, NumberToken) res = NumberExToken._new471(t, toks1.end_token, nt0.value, nt0.typ, NumberExType.MONEY, nt0.real_value, toks1.begin_token.morph) return NumberExHelper.__correctMoney( res, toks1.begin_token) tt = Utils.asObjectOrNull(t, TextToken) if (tt is None or not tt.morph.class0_.is_adjective): return None val = tt.term i = 4 first_pass2785 = True while True: if first_pass2785: first_pass2785 = False else: i += 1 if (not (i < (len(val) - 5))): break v = val[0:0 + i] li = NumberHelper._m_nums.tryAttachStr(v, tt.morph.language) if (li is None): continue vv = val[i:] lii = NumberExHelper._m_postfixes.tryAttachStr( vv, tt.morph.language) if (lii is not None and len(lii) > 0): re = NumberExToken._new472( t, t, str((li[0].tag)), NumberSpellingType.WORDS, Utils.valToEnum(lii[0].tag, NumberExType), t.morph) NumberExHelper.__correctExtTypes(re) return re break return None if (t.next0_ is None and is_dollar is None): return None f = nt.real_value t1 = nt.next0_ if (((t1 is not None and t1.isCharOf(",."))) or (((isinstance(t1, NumberToken)) and (t1.whitespaces_before_count < 3)))): tt11 = NumberHelper.tryParseRealNumber(nt, False) if (tt11 is not None): t1 = tt11.end_token.next0_ f = tt11.real_value if (t1 is None): if (is_dollar is None): return None elif ((t1.next0_ is not None and t1.next0_.isValue("С", "З") and t1.next0_.next0_ is not None) and t1.next0_.next0_.isValue("ПОЛОВИНА", None)): f += .5 t1 = t1.next0_.next0_ if (t1 is not None and t1.is_hiphen and t1.next0_ is not None): t1 = t1.next0_ det = False altf = f if (((isinstance(t1, NumberToken)) and t1.previous is not None and t1.previous.is_hiphen) and (t1).int_value == 0 and t1.length_char == 2): t1 = t1.next0_ if ((t1 is not None and t1.next0_ is not None and t1.isChar('(')) and (((isinstance(t1.next0_, NumberToken)) or t1.next0_.isValue("НОЛЬ", None))) and t1.next0_.next0_ is not None): nt1 = Utils.asObjectOrNull(t1.next0_, NumberToken) val = 0 if (nt1 is not None): val = nt1.real_value if (math.floor(f) == math.floor(val)): ttt = t1.next0_.next0_ if (ttt.isChar(')')): t1 = ttt.next0_ det = True if ((isinstance(t1, NumberToken)) and (t1).int_value is not None and (t1).int_value == 0): t1 = t1.next0_ elif ( ((((isinstance(ttt, NumberToken)) and ((ttt).real_value < 100) and ttt.next0_ is not None) and ttt.next0_.isChar('/') and ttt.next0_.next0_ is not None) and ttt.next0_.next0_.getSourceText() == "100" and ttt.next0_.next0_.next0_ is not None) and ttt.next0_.next0_.next0_.isChar(')')): rest = NumberExHelper.__getDecimalRest100(f) if ((ttt).int_value is not None and rest == (ttt).int_value): t1 = ttt.next0_.next0_.next0_.next0_ det = True elif ((ttt.isValue("ЦЕЛЫХ", None) and (isinstance(ttt.next0_, NumberToken)) and ttt.next0_.next0_ is not None) and ttt.next0_.next0_.next0_ is not None and ttt.next0_.next0_.next0_.isChar(')')): num2 = Utils.asObjectOrNull(ttt.next0_, NumberToken) altf = num2.real_value if (ttt.next0_.next0_.isValue("ДЕСЯТЫЙ", None)): altf /= (10) elif (ttt.next0_.next0_.isValue("СОТЫЙ", None)): altf /= (100) elif (ttt.next0_.next0_.isValue("ТЫСЯЧНЫЙ", None)): altf /= (1000) elif (ttt.next0_.next0_.isValue("ДЕСЯТИТЫСЯЧНЫЙ", None)): altf /= (10000) elif (ttt.next0_.next0_.isValue("СТОТЫСЯЧНЫЙ", None)): altf /= (100000) elif (ttt.next0_.next0_.isValue("МИЛЛИОННЫЙ", None)): altf /= (1000000) if (altf < 1): altf += val t1 = ttt.next0_.next0_.next0_.next0_ det = True else: toks1 = NumberExHelper._m_postfixes.tryParse( ttt, TerminParseAttr.NO) if (toks1 is not None): if ((Utils.valToEnum( toks1.termin.tag, NumberExType)) == NumberExType.MONEY): if (toks1.end_token.next0_ is not None and toks1.end_token.next0_.isChar(')')): res = NumberExToken._new473( t, toks1.end_token.next0_, nt.value, nt.typ, NumberExType.MONEY, f, altf, toks1.begin_token.morph) return NumberExHelper.__correctMoney( res, toks1.begin_token) res2 = NumberExHelper.tryParseNumberWithPostfix(t1.next0_) if (res2 is not None and res2.end_token.next0_ is not None and res2.end_token.next0_.isChar(')')): if (res2.int_value is not None): res2.begin_token = t res2.end_token = res2.end_token.next0_ res2.alt_real_value = res2.real_value res2.real_value = f NumberExHelper.__correctExtTypes(res2) if (res2.whitespaces_after_count < 2): toks2 = NumberExHelper._m_postfixes.tryParse( res2.end_token.next0_, TerminParseAttr.NO) if (toks2 is not None): if ((Utils.valToEnum( toks2.termin.tag, NumberExType) ) == NumberExType.MONEY): res2.end_token = toks2.end_token return res2 elif (nt1 is not None and nt1.typ == NumberSpellingType.WORDS and nt.typ == NumberSpellingType.DIGIT): altf = nt1.real_value ttt = t1.next0_.next0_ if (ttt.isChar(')')): t1 = ttt.next0_ det = True if (not det): altf = f if ((t1 is not None and t1.isChar('(') and t1.next0_ is not None) and t1.next0_.isValue("СУММА", None)): br = BracketHelper.tryParse(t1, BracketParseAttr.NO, 100) if (br is not None): t1 = br.end_token.next0_ if (is_dollar is not None): te = None if (t1 is not None): te = t1.previous else: t1 = t0 while t1 is not None: if (t1.next0_ is None): te = t1 t1 = t1.next0_ if (te is None): return None if (te.is_hiphen and te.next0_ is not None): if (te.next0_.isValue("МИЛЛИОННЫЙ", None)): f *= (1000000) altf *= (1000000) te = te.next0_ elif (te.next0_.isValue("МИЛЛИАРДНЫЙ", None)): f *= (1000000000) altf *= (1000000000) te = te.next0_ if (not te.is_whitespace_after and (isinstance(te.next0_, TextToken))): if (te.next0_.isValue("M", None)): f *= (1000000) altf *= (1000000) te = te.next0_ elif (te.next0_.isValue("BN", None)): f *= (1000000000) altf *= (1000000000) te = te.next0_ return NumberExToken._new474(t0, te, "", nt.typ, NumberExType.MONEY, f, altf, is_dollar) if (t1 is None or ((t1.is_newline_before and not det))): return None toks = NumberExHelper._m_postfixes.tryParse(t1, TerminParseAttr.NO) if ((toks is None and det and (isinstance(t1, NumberToken))) and (t1).value == "0"): toks = NumberExHelper._m_postfixes.tryParse( t1.next0_, TerminParseAttr.NO) if (toks is not None): t1 = toks.end_token if (not t1.isChar('.') and t1.next0_ is not None and t1.next0_.isChar('.')): if ((isinstance(t1, TextToken)) and t1.isValue( toks.termin.terms[0].canonical_text, None)): pass elif (not t1.chars.is_letter): pass else: t1 = t1.next0_ if (toks.termin.canonic_text == "LTL"): return None if (toks.begin_token == t1): if (t1.morph.class0_.is_preposition or t1.morph.class0_.is_conjunction): if (t1.is_whitespace_before and t1.is_whitespace_after): return None ty = Utils.valToEnum(toks.termin.tag, NumberExType) res = NumberExToken._new473(t, t1, nt.value, nt.typ, ty, f, altf, toks.begin_token.morph) if (ty != NumberExType.MONEY): NumberExHelper.__correctExtTypes(res) return res return NumberExHelper.__correctMoney(res, toks.begin_token) pfx = NumberExHelper.__attachSpecPostfix(t1) if (pfx is not None): pfx.begin_token = t pfx.value = nt.value pfx.typ = nt.typ pfx.real_value = f pfx.alt_real_value = altf return pfx if (t1.next0_ is not None and ((t1.morph.class0_.is_preposition or t1.morph.class0_.is_conjunction))): if (t1.isValue("НА", None)): pass else: nn = NumberExHelper.tryParseNumberWithPostfix(t1.next0_) if (nn is not None): return NumberExToken._new476(t, t, nt.value, nt.typ, nn.ex_typ, f, altf, nn.ex_typ2, nn.ex_typ_param) if (not t1.is_whitespace_after and (isinstance(t1.next0_, NumberToken)) and (isinstance(t1, TextToken))): term = (t1).term ty = NumberExType.UNDEFINED if (term == "СМХ" or term == "CMX"): ty = NumberExType.SANTIMETER elif (term == "MX" or term == "МХ"): ty = NumberExType.METER elif (term == "MMX" or term == "ММХ"): ty = NumberExType.MILLIMETER if (ty != NumberExType.UNDEFINED): return NumberExToken._new477(t, t1, nt.value, nt.typ, ty, f, altf, True) return None
def try_attach(t: 'Token', prev: typing.List['DateItemToken'], detail_regime: bool = False) -> 'DateItemToken': if (t is None): return None t0 = t if (t0.is_char('_')): t = t.next0_ while t is not None: if (t.is_newline_before): return None if (not t.is_char('_')): break t = t.next0_ elif (BracketHelper.can_be_start_of_sequence(t0, True, False)): ok = False t = t.next0_ while t is not None: if (BracketHelper.can_be_end_of_sequence(t, True, t0, False)): ok = True break elif (not t.is_char('_')): break t = t.next0_ if (not ok): t = t0 else: t = t.next0_ while t is not None: if (not t.is_char('_')): break t = t.next0_ elif ((isinstance(t0, TextToken)) and t0.is_value("THE", None)): res0 = DateItemToken.__try_attach(t.next0_, prev, detail_regime) if (res0 is not None): res0.begin_token = t return res0 res = DateItemToken.__try_attach(t, prev, detail_regime) if (res is None): return None res.begin_token = t0 if (not res.is_whitespace_after and res.end_token.next0_ is not None and res.end_token.next0_.is_char('_')): t = res.end_token.next0_ while t is not None: if (not t.is_char('_')): break else: res.end_token = t t = t.next0_ if (res.typ == DateItemToken.DateItemType.YEAR or res.typ == DateItemToken.DateItemType.CENTURY or res.typ == DateItemToken.DateItemType.NUMBER): tok = None ii = 0 t = res.end_token.next0_ if (t is not None and t.is_value("ДО", None)): tok = DateItemToken.M_NEW_AGE.try_parse( t.next0_, TerminParseAttr.NO) ii = -1 elif (t is not None and t.is_value("ОТ", "ВІД")): tok = DateItemToken.M_NEW_AGE.try_parse( t.next0_, TerminParseAttr.NO) ii = 1 else: tok = DateItemToken.M_NEW_AGE.try_parse(t, TerminParseAttr.NO) ii = 1 if (tok is not None): res.new_age = (-1 if ii < 0 else 1) res.end_token = tok.end_token if (res.typ == DateItemToken.DateItemType.NUMBER): res.typ = DateItemToken.DateItemType.YEAR return res
def __correctTailAttributes(p : 'PersonReferent', t0 : 'Token') -> 'Token': res = t0 t = t0 if (t is not None and t.isChar(',')): t = t.next0_ born = False die = False if (t is not None and ((t.isValue("РОДИТЬСЯ", "НАРОДИТИСЯ") or t.isValue("BORN", None)))): t = t.next0_ born = True elif (t is not None and ((t.isValue("УМЕРЕТЬ", "ПОМЕРТИ") or t.isValue("СКОНЧАТЬСЯ", None) or t.isValue("DIED", None)))): t = t.next0_ die = True elif ((t is not None and t.isValue("ДАТА", None) and t.next0_ is not None) and t.next0_.isValue("РОЖДЕНИЕ", "НАРОДЖЕННЯ")): t = t.next0_.next0_ born = True while t is not None: if (t.morph.class0_.is_preposition or t.is_hiphen or t.isChar(':')): t = t.next0_ else: break if (t is not None and t.getReferent() is not None): r = t.getReferent() if (r.type_name == "DATE"): t1 = t if (t.next0_ is not None and ((t.next0_.isValue("Р", None) or t.next0_.isValue("РОЖДЕНИЕ", "НАРОДЖЕННЯ")))): born = True t1 = t.next0_ if (t1.next0_ is not None and t1.next0_.isChar('.')): t1 = t1.next0_ if (born): if (p is not None): p.addSlot(PersonReferent.ATTR_BORN, r, False, 0) res = t1 t = t1 elif (die): if (p is not None): p.addSlot(PersonReferent.ATTR_DIE, r, False, 0) res = t1 t = t1 if (die and t is not None): ag = NumberHelper.tryParseAge(t.next0_) if (ag is not None): if (p is not None): p.addSlot(PersonReferent.ATTR_AGE, str(ag.value), False, 0) t = ag.end_token.next0_ res = ag.end_token if (t is None): return res if (t.isChar('(')): br = BracketHelper.tryParse(t, BracketParseAttr.NO, 100) if (br is not None): t1 = t.next0_ born = False if (t1.isValue("РОД", None)): born = True t1 = t1.next0_ if (t1 is not None and t1.isChar('.')): t1 = t1.next0_ if (isinstance(t1, ReferentToken)): r = t1.getReferent() if (r.type_name == "DATERANGE" and t1.next0_ == br.end_token): bd = Utils.asObjectOrNull(r.getSlotValue("FROM"), Referent) to = Utils.asObjectOrNull(r.getSlotValue("TO"), Referent) if (bd is not None and to is not None): if (p is not None): p.addSlot(PersonReferent.ATTR_BORN, bd, False, 0) p.addSlot(PersonReferent.ATTR_DIE, to, False, 0) res = br.end_token t = res elif (r.type_name == "DATE" and t1.next0_ == br.end_token): if (p is not None): p.addSlot(PersonReferent.ATTR_BORN, r, False, 0) res = br.end_token t = res return res
def __try_parse(t: 'Token', lev: int) -> 'BookLinkToken': if (t is None or lev > 3): return None if (t.is_char('[')): re = BookLinkToken.__try_parse(t.next0_, lev + 1) if (re is not None and re.end_token.next0_ is not None and re.end_token.next0_.is_char(']')): re.begin_token = t re.end_token = re.end_token.next0_ return re if (re is not None and re.end_token.is_char(']')): re.begin_token = t return re if (re is not None): if (re.typ == BookLinkTyp.SOSTAVITEL or re.typ == BookLinkTyp.EDITORS): return re br = BracketHelper.try_parse(t, BracketParseAttr.NO, 100) if (br is not None): if ((isinstance(br.end_token.previous, NumberToken)) and (br.length_char < 30)): return BookLinkToken._new329( t, br.end_token, BookLinkTyp.NUMBER, MiscHelper.get_text_value(br.begin_token.next0_, br.end_token.previous, GetTextAttr.NO)) t0 = t if (isinstance(t, ReferentToken)): if (isinstance(t.get_referent(), PersonReferent)): return BookLinkToken.try_parse_author( t, FioTemplateType.UNDEFINED) if (isinstance(t.get_referent(), GeoReferent)): return BookLinkToken._new326(t, t, BookLinkTyp.GEO, t.get_referent()) if (isinstance(t.get_referent(), DateReferent)): dr = Utils.asObjectOrNull(t.get_referent(), DateReferent) if (len(dr.slots) == 1 and dr.year > 0): return BookLinkToken._new329(t, t, BookLinkTyp.YEAR, str(dr.year)) if (dr.year > 0 and t.previous is not None and t.previous.is_comma): return BookLinkToken._new329(t, t, BookLinkTyp.YEAR, str(dr.year)) if (isinstance(t.get_referent(), OrganizationReferent)): org0_ = Utils.asObjectOrNull(t.get_referent(), OrganizationReferent) if (org0_.kind == OrganizationKind.PRESS): return BookLinkToken._new326(t, t, BookLinkTyp.PRESS, org0_) if (isinstance(t.get_referent(), UriReferent)): uri = Utils.asObjectOrNull(t.get_referent(), UriReferent) if ((uri.scheme == "http" or uri.scheme == "https" or uri.scheme == "ftp") or uri.scheme is None): return BookLinkToken._new326(t, t, BookLinkTyp.URL, uri) tok_ = BookLinkToken.__m_termins.try_parse(t, TerminParseAttr.NO) if (tok_ is not None): typ_ = Utils.valToEnum(tok_.termin.tag, BookLinkTyp) ok = True if (typ_ == BookLinkTyp.TYPE or typ_ == BookLinkTyp.NAMETAIL or typ_ == BookLinkTyp.ELECTRONRES): if (t.previous is not None and ((t.previous.is_char_of(".:[") or t.previous.is_hiphen))): pass else: ok = False if (ok): return BookLinkToken._new329(t, tok_.end_token, typ_, tok_.termin.canonic_text) if (typ_ == BookLinkTyp.ELECTRONRES): tt = tok_.end_token.next0_ first_pass3019 = True while True: if first_pass3019: first_pass3019 = False else: tt = tt.next0_ if (not (tt is not None)): break if ((isinstance(tt, TextToken)) and not tt.chars.is_letter): continue if (isinstance(tt.get_referent(), UriReferent)): return BookLinkToken._new326(t, tt, BookLinkTyp.ELECTRONRES, tt.get_referent()) break if (t.is_char('/')): res = BookLinkToken._new329(t, t, BookLinkTyp.DELIMETER, "/") if (t.next0_ is not None and t.next0_.is_char('/')): res.end_token = t.next0_ res.value = "//" if (not t.is_whitespace_before and not t.is_whitespace_after): coo = 3 no = True tt = t.next0_ while tt is not None and coo > 0: vvv = BookLinkToken.try_parse(tt, lev + 1) if (vvv is not None and vvv.typ != BookLinkTyp.NUMBER): no = False break tt = tt.next0_ coo -= 1 if (no): return None return res if ((isinstance(t, NumberToken)) and t.int_value is not None and t.typ == NumberSpellingType.DIGIT): res = BookLinkToken._new329(t, t, BookLinkTyp.NUMBER, str(t.value)) val = t.int_value if (val >= 1930 and (val < 2030)): res.typ = BookLinkTyp.YEAR if (t.next0_ is not None and t.next0_.is_char('.')): res.end_token = t.next0_ elif ((t.next0_ is not None and t.next0_.length_char == 1 and not t.next0_.chars.is_letter) and t.next0_.is_whitespace_after): res.end_token = t.next0_ elif (isinstance(t.next0_, TextToken)): term = t.next0_.term if (((term == "СТР" or term == "C" or term == "С") or term == "P" or term == "S") or term == "PAGES"): res.end_token = t.next0_ res.typ = BookLinkTyp.PAGES res.value = str(t.value) return res if (isinstance(t, TextToken)): term = t.term if ((((( ((term == "СТР" or term == "C" or term == "С") or term == "ТОМ" or term == "T") or term == "Т" or term == "P") or term == "PP" or term == "V") or term == "VOL" or term == "S") or term == "СТОР" or t.is_value("PAGE", None)) or t.is_value("СТРАНИЦА", "СТОРІНКА")): tt = t.next0_ while tt is not None: if (tt.is_char_of(".:~")): tt = tt.next0_ else: break if (isinstance(tt, NumberToken)): res = BookLinkToken._new328(t, tt, BookLinkTyp.PAGERANGE) tt0 = tt tt1 = tt tt = tt.next0_ first_pass3020 = True while True: if first_pass3020: first_pass3020 = False else: tt = tt.next0_ if (not (tt is not None)): break if (tt.is_char_of(",") or tt.is_hiphen): if (isinstance(tt.next0_, NumberToken)): tt = tt.next0_ res.end_token = tt tt1 = tt continue break res.value = MiscHelper.get_text_value( tt0, tt1, GetTextAttr.NO) return res if ((term == "M" or term == "М" or term == "СПБ") or term == "K" or term == "К"): if (t.next0_ is not None and t.next0_.is_char_of(":;")): re = BookLinkToken._new328(t, t.next0_, BookLinkTyp.GEO) return re if (t.next0_ is not None and t.next0_.is_char_of(".")): res = BookLinkToken._new328(t, t.next0_, BookLinkTyp.GEO) if (t.next0_.next0_ is not None and t.next0_.next0_.is_char_of(":;")): res.end_token = t.next0_.next0_ elif (t.next0_.next0_ is not None and (isinstance(t.next0_.next0_, NumberToken))): pass elif (t.next0_.next0_ is not None and t.next0_.next0_.is_comma and (isinstance(t.next0_.next0_.next0_, NumberToken))): pass else: return None return res if (term == "ПЕР" or term == "ПЕРЕВ" or term == "ПЕРЕВОД"): tt = t if (tt.next0_ is not None and tt.next0_.is_char('.')): tt = tt.next0_ if (tt.next0_ is not None and ((tt.next0_.is_value("C", None) or tt.next0_.is_value("С", None)))): tt = tt.next0_ if (tt.next0_ is None or tt.whitespaces_after_count > 2): return None re = BookLinkToken._new328(t, tt.next0_, BookLinkTyp.TRANSLATE) return re if (term == "ТАМ" or term == "ТАМЖЕ"): res = BookLinkToken._new328(t, t, BookLinkTyp.TAMZE) if (t.next0_ is not None and t.next0_.is_value("ЖЕ", None)): res.end_token = t.next0_ return res if (((term == "СМ" or term == "CM" or term == "НАПР") or term == "НАПРИМЕР" or term == "SEE") or term == "ПОДРОБНЕЕ" or term == "ПОДРОБНО"): res = BookLinkToken._new328(t, t, BookLinkTyp.SEE) t = t.next0_ first_pass3021 = True while True: if first_pass3021: first_pass3021 = False else: t = t.next0_ if (not (t is not None)): break if (t.is_char_of(".:") or t.is_value("ALSO", None)): res.end_token = t continue if (t.is_value("В", None) or t.is_value("IN", None)): res.end_token = t continue vvv = BookLinkToken.__try_parse(t, lev + 1) if (vvv is not None and vvv.typ == BookLinkTyp.SEE): res.end_token = vvv.end_token break break return res if (term == "БОЛЕЕ"): vvv = BookLinkToken.__try_parse(t.next0_, lev + 1) if (vvv is not None and vvv.typ == BookLinkTyp.SEE): vvv.begin_token = t return vvv no = MiscHelper.check_number_prefix(t) if (isinstance(no, NumberToken)): return BookLinkToken._new328(t, no, BookLinkTyp.N) if (((term == "B" or term == "В")) and (isinstance(t.next0_, NumberToken)) and (isinstance(t.next0_.next0_, TextToken))): term2 = t.next0_.next0_.term if (((term2 == "Т" or term2 == "T" or term2.startswith("ТОМ")) or term2 == "TT" or term2 == "ТТ") or term2 == "КН" or term2.startswith("КНИГ")): return BookLinkToken._new328(t, t.next0_.next0_, BookLinkTyp.VOLUME) if (t.is_char('(')): if (((isinstance(t.next0_, NumberToken)) and t.next0_.int_value is not None and t.next0_.next0_ is not None) and t.next0_.next0_.is_char(')')): num = t.next0_.int_value if (num > 1900 and num <= 2040): if (num <= datetime.datetime.now().year): return BookLinkToken._new329(t, t.next0_.next0_, BookLinkTyp.YEAR, str(num)) if (((isinstance(t.next0_, ReferentToken)) and (isinstance(t.next0_.get_referent(), DateReferent)) and t.next0_.next0_ is not None) and t.next0_.next0_.is_char(')')): num = t.next0_.get_referent().year if (num > 0): return BookLinkToken._new329(t, t.next0_.next0_, BookLinkTyp.YEAR, str(num)) return None