Example #1
0
 def __init__(self):
     self.segmenter = Segmenter()
     self.morph_vocab = MorphVocab()
     self.emb = NewsEmbedding()
     self.morph_tagger = NewsMorphTagger(self.emb)
     self.ner_tagger = NewsNERTagger(self.emb)
     self.syntax_parser = NewsSyntaxParser(self.emb)
Example #2
0
 def get_syntax_parser(cls):
     syntax_parser = getattr(cls, "_syntax_parser", None)
     if not syntax_parser:
         embedding = cls.get_embedding()
         syntax_parser = NewsSyntaxParser(embedding)
         cls._syntax_parser = syntax_parser
     return syntax_parser
Example #3
0
    def __init__(self, syntax_model_name):
        self.syntax_model_name = syntax_model_name
        if syntax_model_name == 'deeppavlov':
            self.model_deeppavlov = build_model(
                configs.syntax.syntax_ru_syntagrus_bert, download=True)
        elif syntax_model_name == 'natasha':
            self.segmenter = Segmenter()
            emb = NewsEmbedding()
            self.syntax_parser = NewsSyntaxParser(emb)
        else:
            print('Выберите модель, автоматически выбрана модель deeppavlov')
            self.syntax_model_name = 'deeppavlov'
            self.model_deeppavlov = build_model(
                configs.syntax.syntax_ru_syntagrus_bert, download=True)

        self.conjunction = [
            'и', 'да', 'ни-ни', 'тоже', 'также', 'а', 'но', 'зато', 'однако',
            'же', 'или', 'либо', 'то-то', 'что', 'будто', 'чтобы', 'чтобы не',
            'как бы не', 'когда', 'как только', 'лишь только', 'едва', 'пока',
            'в то время как', 'после того как', 'потому что', 'так как', 'ибо',
            'оттого что', ' из-за того что', 'вследствии того что', 'чтобы',
            'для того чтобы', 'с тем чтобы', 'если', 'если бы', 'раз', 'коль',
            'коли', 'хотя', 'сколько ни', 'когда ни', 'что ни', 'что бы ни',
            'несмотря на то что', 'так что', 'вследствие того что', 'как',
            'будто', 'как будто', 'точно', 'словно'
        ]

        self.morph = pymorphy2.MorphAnalyzer()

        self.like_root = ['acl:relcl', 'advcl', 'root', 'parataxis', 'ccomp']

        self.can_be_root = ['nsubj', 'conj']
def process_text_file(text_file, mongo=None):
    # nlp = spacy.load('ru_core_news_sm')
    segmenter = Segmenter()
    emb = NewsEmbedding()
    morph_tagger = NewsMorphTagger(emb)
    syntax_parser = NewsSyntaxParser(emb)

    with open(text_file, 'r', encoding='utf-8') as file:
        file_name = file.name[2:]
        line_number = 0
        for line in file:
            line_number += 1
            if line_number % 100 == 0:
                logging.info(f'Processed line {line_number}')
                if line_number >= 100000:
                    return
            sents = [sent.text for sent in sentenize(line)]
            sentence_number = 0
            for sentence in sents:
                doc = Doc(sentence)
                doc.segment(segmenter)
                doc.tag_morph(morph_tagger)
                doc.parse_syntax(syntax_parser)
                sentence_number += 1
                sentence_tokens = doc.tokens

                # sentence_tokens = [
                #     {
                #         'text': token.text,
                #         'lemma': token.lemma_,
                #         'pos': token.pos_,
                #         'tag': token.tag_,
                #         'dep': token.dep_,
                #         'shape': token.shape_,
                #         'is_alpha': token.is_alpha,
                #         'is_stop': token.is_stop
                #     } for token in sentence]
                words = markup_words(doc.syntax)
                deps = token_deps(doc.syntax.tokens)
                html = show_dep_markup(words, deps)
                save_html(
                    html,
                    f'./htmls/dependency_plot_{file_name}_{line_number}_{sentence_number}.html'
                )
                #
                # svg = displacy.render(sentence, style='dep', options={'compact': False, 'bg': '#09a3d5',
                #                                                       'color': 'white', 'font': 'Source Sans Pro'})
                # output_path = Path(f'./images/dependency_plot_{file_name}_{line_number}_{sentence_number}.svg')
                # output_path.open('w', encoding='utf-8').write(svg)
                PatternExtractor.extract_relations(
                    file_name,
                    line_number,
                    sentence_number,
                    sentence,
                    sentence_tokens,
                    # noun_phrases,
                    # mongo=mongo
                )
Example #5
0
def calculate_skills_assessment(text, ca):
    vacancy_key_skills = list(
        map(
            lambda x: x.lower(),
            list(ca.core_vacancy.key_skills.all().values_list('title',
                                                              flat=True))))
    vacancy_additional_skills = list(
        map(
            lambda x: x.lower(),
            list(ca.core_vacancy.additional_skills.all().values_list(
                'title', flat=True))))

    segmenter = Segmenter()
    emb = NewsEmbedding()
    morph_tagger = NewsMorphTagger(emb)
    syntax_parser = NewsSyntaxParser(emb)
    morph_vocab = MorphVocab()

    text = extract_text(ca.cv_file.path)

    doc = Doc(text)

    doc.segment(segmenter)
    doc.tag_morph(morph_tagger)
    doc.parse_syntax(syntax_parser)

    cv_key_skills = []
    cv_additional_skills = []

    for token in doc.tokens:
        token.lemmatize(morph_vocab)
        print(token)
        if token.lemma in vacancy_key_skills and token.lemma not in cv_key_skills:
            cv_key_skills.append(token.lemma)
            print(token.lemma)

        if token.lemma in vacancy_additional_skills and token.lemma not in cv_additional_skills:
            cv_additional_skills.append(token.lemma)
            print(token.lemma)

    candidate_conformity = {
        "key_skills": {
            "vacancy_key_skills": vacancy_key_skills,
            "cv_key_skills": cv_key_skills,
            "conformity_percent": len(cv_key_skills) / len(vacancy_key_skills)
        },
        "additional_skills": {
            "vacancy_additional_skills":
            vacancy_additional_skills,
            "cv_additional_skills":
            cv_additional_skills,
            "conformity_percent":
            len(cv_additional_skills) / len(vacancy_additional_skills)
        }
    }

    return candidate_conformity
Example #6
0
 def __init__(self):
     self.segmenter = Segmenter()
     self.morph_vocab = MorphVocab()
     self.emb = NewsEmbedding()
     self.morph_tagger = NewsMorphTagger(self.emb)
     self.syntax_parser = NewsSyntaxParser(self.emb)
     self.ner_tagger = NewsNERTagger(self.emb)
     self.names_extractor = NamesExtractor(self.morph_vocab)
     self.doc = []
     self.term_extractor = TermExtractor()
Example #7
0
    def __init__(self):
        self.replacer = ""
        self.text_gender = 'masc'
        self.setting_gender = 'auto'
        self.syntax_map = []
        self.segmenter = Segmenter()
        self.emb = NewsEmbedding()
        self.syntax_parser = NewsSyntaxParser(self.emb)
        self.morph = pymorphy2.MorphAnalyzer(lang='ru',
                                             path=resource_path('data/'))

        self.stop = False
Example #8
0
def preprocess_sent(incoming_sent):
    doc = Doc(incoming_sent)

    segmenter = Segmenter()

    emb = NewsEmbedding()
    morph_tagger = NewsMorphTagger(emb)
    syntax_parser = NewsSyntaxParser(emb)

    doc.segment(segmenter)

    doc.tag_morph(morph_tagger)
    doc.parse_syntax(syntax_parser)

    return doc.sents[0]
 def __init__(self, text):
     self.doc = Doc(text)
     self.doc.segment(Segmenter())
     self.doc.tag_morph(NewsMorphTagger(NewsEmbedding()))
     morph_vocab = MorphVocab()
     for token in self.doc.tokens:
         token.lemmatize(morph_vocab)
     self.doc.parse_syntax(NewsSyntaxParser(NewsEmbedding()))
     self.doc.tag_ner(NewsNERTagger(NewsEmbedding()))
     for span in self.doc.spans:
         span.normalize(morph_vocab)
     self.words = tuple(filter(lambda x: x.pos not in ('X', 'PUNCT'), self.doc.tokens))
     self.tokens_nouns = tuple(filter(lambda t: t.pos in ['NOUN', 'PROPN'], self.doc.tokens))
     self.tokens_adjs = tuple(filter(lambda t: t.pos == 'ADJ', self.doc.tokens))
     self.tokens_verbs = tuple(filter(lambda t: t.pos == 'VERB', self.doc.tokens))
Example #10
0
from natasha import (Segmenter, NewsEmbedding, NewsMorphTagger,
                     NewsSyntaxParser, NewsNERTagger, MorphVocab, PER, ORG,
                     NamesExtractor, MoneyExtractor, Doc)

import myextractors

status = 1
res = {}

segmenter = Segmenter()

emb = NewsEmbedding()
morph_tagger = NewsMorphTagger(emb)
syntax_parser = NewsSyntaxParser(emb)
ner_tagger = NewsNERTagger(emb)
morph_vocab = MorphVocab()

names_extractor = NamesExtractor(morph_vocab)
money_extractor = MoneyExtractor(morph_vocab)

text = 'Посол Израиля на Украине Йоэль Лион признался, что пришел в шок, узнав о решении властей Львовской области объявить 2019 год годом лидера запрещенной в России Организации украинских националистов (ОУН) Степана Бандеры...'

docType = 'coast'

doc = Doc(text)
doc.segment(segmenter)
doc.tag_morph(morph_tagger)
doc.parse_syntax(syntax_parser)
doc.tag_ner(ner_tagger)

for span in doc.spans:
Example #11
0
def ca_details(request, ca_id):

    ca = get_object_or_404(CandidateApplication, id=ca_id)

    vacancy_key_skills = list(
        map(
            lambda x: x.lower(),
            list(ca.core_vacancy.key_skills.all().values_list('title',
                                                              flat=True))))
    vacancy_additional_skills = list(
        map(
            lambda x: x.lower(),
            list(ca.core_vacancy.additional_skills.all().values_list(
                'title', flat=True))))

    segmenter = Segmenter()
    emb = NewsEmbedding()
    morph_tagger = NewsMorphTagger(emb)
    syntax_parser = NewsSyntaxParser(emb)
    morph_vocab = MorphVocab()

    text = extract_text(ca.cv_file.path)

    doc = Doc(text)

    doc.segment(segmenter)
    doc.tag_morph(morph_tagger)
    doc.parse_syntax(syntax_parser)

    cv_key_skills = []
    cv_additional_skills = []

    for token in doc.tokens:
        token.lemmatize(morph_vocab)
        print(token)
        if token.lemma in vacancy_key_skills and token.lemma not in cv_key_skills:
            cv_key_skills.append(token.lemma)
            print(token.lemma)

        if token.lemma in vacancy_additional_skills and token.lemma not in cv_additional_skills:
            cv_additional_skills.append(token.lemma)
            print(token.lemma)

    candidate_conformity = {
        "key_skills": {
            "vacancy_key_skills": vacancy_key_skills,
            "cv_key_skills": cv_key_skills,
            "conformity_percent": len(cv_key_skills) / len(vacancy_key_skills)
        },
        "additional_skills": {
            "vacancy_additional_skills":
            vacancy_additional_skills,
            "cv_additional_skills":
            cv_additional_skills,
            "conformity_percent":
            len(cv_additional_skills) / len(vacancy_additional_skills)
        }
    }

    return render(request,
                  'demo_data.html',
                  context={'data': json.dumps(candidate_conformity)})
Example #12
0
def syntax_parser(embedding):
    return NewsSyntaxParser(embedding)
Example #13
0
                                  "navec_news_v1_1B_250K_300d_100q.tar")
    NEWS_MORPH = os.path.join(sys._MEIPASS, "slovnet_morph_news_v1.tar")
    NEWS_SYNTAX = os.path.join(sys._MEIPASS, "slovnet_syntax_news_v1.tar")
    NEWS_NER = os.path.join(sys._MEIPASS, "slovnet_ner_news_v1.tar")
    DICTS = os.path.join(sys._MEIPASS, "dicts")
else:
    NEWS_EMBEDDING = os.path.join("navec_news_v1_1B_250K_300d_100q.tar")
    NEWS_MORPH = os.path.join("slovnet_morph_news_v1.tar")
    NEWS_SYNTAX = os.path.join("slovnet_syntax_news_v1.tar")
    NEWS_NER = os.path.join("slovnet_ner_news_v1.tar")
    DICTS = "dicts"

emb = NewsEmbedding(path=NEWS_EMBEDDING)
morph_tagger = NewsMorphTagger(emb, path=NEWS_MORPH)
segmenter = Segmenter()
syntax_parser = NewsSyntaxParser(emb, path=NEWS_SYNTAX)
ner_tagger = NewsNERTagger(emb, path=NEWS_NER)
NARRATOR = -1

DETPRON = {
    "Fem": {
        '3': ["ее", "её"],
        '1': [
            'мой', 'моя', 'моё', 'мое', 'мои', 'моего', 'моей', 'моих',
            'моему', 'моим', 'мою', 'моим', 'моею', 'моими', 'моем', 'моём'
        ]
    },
    "Masc": {
        '3': ['его'],
        '1': [
            'мой', 'моя', 'моё', 'мое', 'мои', 'моего', 'моей', 'моих',
Example #14
0
def Main(docType, text):
    status = 1
    res = {}

    segmenter = Segmenter()

    emb = NewsEmbedding()
    morph_tagger = NewsMorphTagger(emb)
    syntax_parser = NewsSyntaxParser(emb)
    ner_tagger = NewsNERTagger(emb)
    morph_vocab = MorphVocab()

    names_extractor = NamesExtractor(morph_vocab)
    money_extractor = MoneyExtractor(morph_vocab)

    doc = Doc(text)
    doc.segment(segmenter)
    doc.tag_morph(morph_tagger)
    doc.parse_syntax(syntax_parser)
    doc.tag_ner(ner_tagger)

    for span in doc.spans:
        span.normalize(morph_vocab)

    #для судебного приказа
    if docType == 'coast':
        #фио
        for span in doc.spans:
            if span.type == PER:
                span.extract_fact(names_extractor)
        x = [_.fact.as_dict for _ in doc.spans if _.type == PER]
        if x:
            res['ФИО'] = x
        else:
            status = 0
        #инн
        y = myextractors.findINN(text)
        if y:
            res['ИНН'] = y
        else:
            status = 0
        #номер судебного приказа
        y = myextractors.findNCOASTCASE(text)
        if y:
            res['номер судебного приказа'] = y
        else:
            status = 0
        #дата с п
        y = myextractors.findDATECOAST(text)
        if y:
            res['дата судебного приказа'] = y
        else:
            status = 0
        #организации
        y = []
        for span in doc.spans:
            if span.type == ORG:
                d = {}
                d['name'] = span.text
                y = y + [d]
        if y:
            res['организации'] = y
        else:
            status = 0

    #для письма
    if docType == 'mail':
        #фио
        for span in doc.spans:
            if span.type == PER:
                span.extract_fact(names_extractor)
        x = [_.fact.as_dict for _ in doc.spans if _.type == PER]
        if x:
            res['ФИО'] = x
        else:
            status = 0
        #инн
        y = myextractors.findINN(text)
        if y:
            res['ИНН'] = y
        else:
            status = 0
        #номер дог
        y = myextractors.findNCONTRACT(text)
        if y:
            res['номер договора'] = y
        else:
            status = 0
        #дата дог
        y = myextractors.findDATECONT(text)
        if y:
            res['дата договора'] = y
        else:
            status = 0

    #для платежного поручения
    if docType == 'order':
        #фио
        for span in doc.spans:
            if span.type == PER:
                span.extract_fact(names_extractor)
        x = [_.fact.as_dict for _ in doc.spans if _.type == PER]
        if x:
            res['ФИО'] = x
        else:
            status = 0
        #инн
        y = myextractors.findINN(text)
        if y:
            res['ИНН'] = y
        else:
            status = 0
        #организации
        y = []
        for span in doc.spans:
            if span.type == ORG:
                d = {}
                d['name'] = span.text
                y = y + [d]
        if y:
            res['организации'] = y
        else:
            status = 0
        #номер дог
        y = myextractors.findNCONTRACT(text)
        if y:
            res['номер договора'] = y
        else:
            status = 0
        #дата дог
        y = myextractors.findNCONTRACT(text)
        if y:
            res['номер договора'] = y
        else:
            status = 0
        #сумма
        matches = list(money_extractor(text))
        y = [_.fact for _ in matches]
        ret = []
        for i in y:
            z = {}
            z['amount'] = i.amount
            z['currency'] = i.currency
            ret = ret + [z]
        if ret:
            res['сумма'] = ret
        else:
            status = 0

    returning = {}

    if status == 1:
        returning['status'] = 'успех'
    else:
        returning['status'] = 'не успех'

    returning['entities'] = res
    return returning