Example #1
0
    def __test_parsing(self, ra_version):
        # Initialize text parser pipeline.
        text_parser = BaseTextParser(pipeline=[RuSentRelTextEntitiesParser(),
                                               DefaultTextTokenizer(keep_tokens=True)])

        # iterating through collection
        news_read = 0

        news_it = RuAttitudesCollection.iter_news(version=ra_version,
                                                  get_news_index_func=lambda _: news_read,
                                                  return_inds_only=False)

        for news in tqdm(news_it):

            # parse news
            parsed_news = NewsParser.parse(news=news, text_parser=text_parser)
            terms = parsed_news.iter_sentence_terms(sentence_index=0,
                                                    return_id=False)

            str_terms = []
            for t in terms:
                if isinstance(t, Entity):
                    str_terms.append("E")
                elif isinstance(t, Token):
                    str_terms.append(t.get_token_value())
                else:
                    str_terms.append(t)

            for t in str_terms:
                self.assertIsInstance(t, str)

            news_read += 1
Example #2
0
def create_text_parser(exp_ctx, entities_parser, value_to_group_id_func):
    assert(isinstance(entities_parser, BasePipelineItem))
    assert(callable(value_to_group_id_func) or value_to_group_id_func is None)

    if not isinstance(exp_ctx, ExperimentSerializationContext):
        # We do not utlize text_parser in such case.
        return None

    ppl_entities_grouping = EntitiesGroupingPipelineItem(
        value_to_group_id_func=value_to_group_id_func) \
        if value_to_group_id_func is not None else None

    pipeline = [entities_parser,
                ppl_entities_grouping,
                DefaultTextTokenizer(keep_tokens=True),
                LemmasBasedFrameVariantsParser(frame_variants=exp_ctx.FrameVariantCollection,
                                               stemmer=create_stemmer())]

    return BaseTextParser(pipeline)
Example #3
0
    def test_parsing(self):

        # Initializing logger.
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)
        logging.basicConfig(level=logging.DEBUG)

        # Initializing stemmer.
        stemmer = MystemWrapper()

        # frame and variants.
        frames = RuSentiFramesCollection.read_collection(version=RuSentiFramesVersions.V20)
        frame_variants = FrameVariantsCollection()
        frame_variants.fill_from_iterable(variants_with_id=frames.iter_frame_id_and_variants(),
                                          overwrite_existed_variant=True,
                                          raise_error_on_existed_variant=False)

        text_parser = BaseTextParser(pipeline=[RuSentRelTextEntitiesParser(),
                                               DefaultTextTokenizer(keep_tokens=True),
                                               LemmasBasedFrameVariantsParser(frame_variants=frame_variants,
                                                                              stemmer=stemmer,
                                                                              save_lemmas=False),
                                               FrameVariantsSentimentNegation()])

        # Reading synonyms collection.
        synonyms = RuSentRelSynonymsCollectionProvider.load_collection(stemmer=stemmer)

        version = RuSentRelVersions.V11
        for doc_id in RuSentRelIOUtils.iter_collection_indices(version):

            # Parsing
            news = RuSentRelNews.read_document(doc_id=doc_id,
                                               synonyms=synonyms,
                                               version=version)

            # Perform text parsing.
            parsed_news = NewsParser.parse(news=news, text_parser=text_parser)
            debug_show_news_terms(parsed_news=parsed_news)
Example #4
0
    def test_rusentrel_news_text_parsing(self):
        version = RuSentRelVersions.V11

        text_parser = BaseTextParser(pipeline=[
            RuSentRelTextEntitiesParser(),
            DefaultTextTokenizer(keep_tokens=True)
        ])

        stemmer = MystemWrapper()
        synonyms = RuSentRelSynonymsCollectionProvider.load_collection(
            stemmer=stemmer, version=version)
        news = RuSentRelNews.read_document(doc_id=1,
                                           synonyms=synonyms,
                                           version=version)

        # Parse news via external parser.
        parsed_news = NewsParser.parse(news=news, text_parser=text_parser)

        # Display result
        for parsed_text in parsed_news:
            self.__print_parsed_text(parsed_text)

        assert (isinstance(parsed_news, ParsedNews))
Example #5
0
def run_data_serialization_pipeline(sentences, terms_per_context,
                                    entities_parser, embedding_path,
                                    entity_fmt_type, stemmer):
    assert (isinstance(sentences, list))
    assert (isinstance(entities_parser, BasePipelineItem)
            or entities_parser is None)
    assert (isinstance(terms_per_context, int))
    assert (isinstance(embedding_path, str))
    assert (isinstance(entity_fmt_type, EntityFormatterTypes))

    labels_scaler = BaseLabelScaler(uint_dict=OrderedDict([(NoLabel(), 0)]),
                                    int_dict=OrderedDict([(NoLabel(), 0)]))

    label_provider = MultipleLabelProvider(label_scaler=labels_scaler)

    sentences = list(map(lambda text: BaseNewsSentence(text), sentences))

    annot_algo = PairBasedAnnotationAlgorithm(
        dist_in_terms_bound=None,
        label_provider=PairSingleLabelProvider(label_instance=NoLabel()))

    frames_collection = create_frames_collection()
    frame_variants_collection = create_and_fill_variant_collection(
        frames_collection)

    # Step 1. Annotate text.
    synonyms = RuSentRelSynonymsCollectionProvider.load_collection(
        stemmer=stemmer, version=RuSentRelVersions.V11)

    # Step 2. Parse text.
    news = News(doc_id=0, sentences=sentences)

    text_parser = BaseTextParser(pipeline=[
        TermsSplitterParser(),
        TextEntitiesParser() if entities_parser is None else entities_parser,
        EntitiesGroupingPipelineItem(synonyms.get_synonym_group_index),
        DefaultTextTokenizer(keep_tokens=True),
        FrameVariantsParser(frame_variants=frame_variants_collection),
        LemmasBasedFrameVariantsParser(
            save_lemmas=False,
            stemmer=stemmer,
            frame_variants=frame_variants_collection),
        FrameVariantsSentimentNegation()
    ])

    embedding = RusvectoresEmbedding.from_word2vec_format(
        filepath=embedding_path, binary=True)
    embedding.set_stemmer(stemmer)

    exp_ctx = RuSentRelExperimentSerializationContext(
        labels_scaler=label_provider.LabelScaler,
        stemmer=stemmer,
        embedding=embedding,
        annotator=DefaultAnnotator(annot_algo=annot_algo),
        terms_per_context=terms_per_context,
        str_entity_formatter=create_entity_formatter(entity_fmt_type),
        pos_tagger=POSMystemWrapper(MystemWrapper().MystemInstance),
        name_provider=create_infer_experiment_name_provider(),
        data_folding=NoFolding(doc_ids_to_fold=[0],
                               supported_data_types=[DataType.Test]))

    labels_fmt = StringLabelsFormatter(stol={"neu": NoLabel})

    exp_io = InferIOUtils(exp_ctx)

    # Step 3. Serialize data
    experiment = CustomExperiment(exp_io=exp_io,
                                  exp_ctx=exp_ctx,
                                  doc_ops=SingleDocOperations(
                                      exp_ctx=exp_ctx,
                                      news=news,
                                      text_parser=text_parser),
                                  labels_formatter=labels_fmt,
                                  synonyms=synonyms,
                                  neutral_labels_fmt=labels_fmt)

    NetworkInputHelper.prepare(
        exp_ctx=experiment.ExperimentContext,
        exp_io=experiment.ExperimentIO,
        doc_ops=experiment.DocumentOperations,
        opin_ops=experiment.OpinionOperations,
        terms_per_context=terms_per_context,
        balance=False,
        value_to_group_id_func=synonyms.get_synonym_group_index)

    return experiment.ExperimentIO
Example #6
0
 def test_parse_sinle_string(self):
     text = "А контроль над этими провинциями — это господство над без малого половиной сирийской территории."
     parser = BaseTextParser(pipeline=[DefaultTextTokenizer(keep_tokens=True)])
     news = News(doc_id=0, sentences=[BaseNewsSentence(text.split())])
     parsed_news = NewsParser.parse(news=news, text_parser=parser)
     debug_show_news_terms(parsed_news=parsed_news)
    def test(self):
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.INFO)
        logging.basicConfig(level=logging.DEBUG)

        text_parser = BaseTextParser(pipeline=[
            RuSentRelTextEntitiesParser(),
            DefaultTextTokenizer(keep_tokens=True),
            LemmasBasedFrameVariantsParser(
                frame_variants=self.unique_frame_variants,
                stemmer=self.stemmer,
                save_lemmas=True),
            FrameVariantsSentimentNegation()
        ])

        random.seed(10)
        for doc_id in [35, 36]:  # RuSentRelIOUtils.iter_collection_indices():

            logger.info("NewsID: {}".format(doc_id))

            news, parsed_news, opinions = init_rusentrel_doc(
                doc_id=doc_id, text_parser=text_parser, synonyms=self.synonyms)

            # Initialize service providers.
            pairs_provider = TextOpinionPairsProvider(
                value_to_group_id_func=self.synonyms.get_synonym_group_index)
            entity_service = EntityServiceProvider()

            # Setup parsed news.
            pairs_provider.init_parsed_news(parsed_news)
            entity_service.init_parsed_news(parsed_news)

            text_opinion_iter = iter_same_sentence_linked_text_opinions(
                pairs_provider=pairs_provider,
                entity_service=entity_service,
                opinions=opinions)

            for text_opinion in text_opinion_iter:

                s_index = entity_service.get_entity_position(
                    id_in_document=text_opinion.SourceId,
                    position_type=TermPositionTypes.SentenceIndex)

                s_ind = entity_service.get_entity_position(
                    id_in_document=text_opinion.SourceId,
                    position_type=TermPositionTypes.IndexInSentence)
                t_ind = entity_service.get_entity_position(
                    id_in_document=text_opinion.TargetId,
                    position_type=TermPositionTypes.IndexInSentence)

                terms = list(
                    parsed_news.iter_sentence_terms(s_index, return_id=False))
                x_feature = IndicesFeature.from_vector_to_be_fitted(
                    value_vector=np.array(terms),
                    e1_ind=s_ind,
                    e2_ind=t_ind,
                    expected_size=random.randint(50, 60),
                    filler="<PAD>")

                cropped_terms = x_feature.ValueVector
                subj_ind = s_ind - x_feature.StartIndex
                obj_ind = t_ind - x_feature.StartIndex

                logger.info(len(terms))
                logger.info("Source Index: {}".format(subj_ind))
                logger.info("Target Index: {}".format(obj_ind))
                s = " ".join(terms_to_str(cropped_terms))
                logger.info("Result: {}".format(s))

                assert (isinstance(x_feature.ValueVector[subj_ind], Entity))
                assert (isinstance(x_feature.ValueVector[obj_ind], Entity))
Example #8
0
    def test_opinions_in_rusentrel(self):
        logger = logging.getLogger(__name__)
        logger.setLevel(logging.INFO)
        logging.basicConfig(level=logging.DEBUG)

        # Initialize text parser pipeline.
        text_parser = BaseTextParser(pipeline=[
            RuSentRelTextEntitiesParser(),
            DefaultTextTokenizer(keep_tokens=True),
            LemmasBasedFrameVariantsParser(
                frame_variants=self.unique_frame_variants,
                stemmer=self.stemmer)
        ])

        # Initialize specific document
        doc_id = 47
        logger.info("NewsID: {}".format(doc_id))
        news, parsed_news, opinions = init_rusentrel_doc(
            doc_id=doc_id, text_parser=text_parser, synonyms=self.synonyms)

        # Initialize providers.
        pairs_provider = TextOpinionPairsProvider(
            value_to_group_id_func=self.synonyms.get_synonym_group_index)
        entity_service = EntityServiceProvider()

        pairs_provider.init_parsed_news(parsed_news)
        entity_service.init_parsed_news(parsed_news)

        text_opinions = iter_same_sentence_linked_text_opinions(
            opinions=opinions,
            pairs_provider=pairs_provider,
            entity_service=entity_service)

        for text_opinion in text_opinions:

            s_index = entity_service.get_entity_position(
                id_in_document=text_opinion.SourceId,
                position_type=TermPositionTypes.SentenceIndex)

            terms = list(
                parsed_news.iter_sentence_terms(s_index, return_id=False))
            str_terms_joined = " ".join(terms_to_str(terms)).strip()

            s_ind = entity_service.get_entity_position(
                id_in_document=text_opinion.SourceId,
                position_type=TermPositionTypes.IndexInSentence)
            t_ind = entity_service.get_entity_position(
                id_in_document=text_opinion.TargetId,
                position_type=TermPositionTypes.IndexInSentence)

            logger.info("text_opinion: {}->{}".format(text_opinion.SourceId,
                                                      text_opinion.TargetId))
            logger.info("[{}] {}->{}".format(s_index, s_ind, t_ind))
            logger.info("'{}' -> '{}'".format(terms[s_ind], terms[t_ind]))
            logger.info(str_terms_joined)

            logger.info(" ".join(
                self.__process(terms=terms,
                               entities_formatter=self.entities_formatter,
                               s_ind=s_ind,
                               t_ind=t_ind)))

            self.assertTrue(isinstance(terms[s_ind], Entity))
            self.assertTrue(isinstance(terms[t_ind], Entity))