def add_complex_scores_to_model(artm_model,
                                n_top_tokens,
                                p_mass_threshold,
                                common_topics,
                                subject_topics,
                                class_name,
                                _debug_print=False):
    if _debug_print:
        print '[{}] adding scores'.format(datetime.now())
    # subject
    artm_model.scores.add(
        artm.PerplexityScore(name='perplexity_score_subject',
                             dictionary=dictionary,
                             topic_names=subject_topics))
    artm_model.scores.add(
        artm.SparsityPhiScore(name='ss_phi_score_subject',
                              class_id=class_name,
                              topic_names=subject_topics))
    artm_model.scores.add(
        artm.SparsityThetaScore(name='ss_theta_score_subject',
                                topic_names=subject_topics))
    artm_model.scores.add(
        artm.TopicKernelScore(name='topic_kernel_score_subject',
                              class_id=class_name,
                              topic_names=subject_topics,
                              probability_mass_threshold=p_mass_threshold))
    artm_model.scores.add(
        artm.TopTokensScore(name='top_tokens_score_subject',
                            class_id=class_name,
                            topic_names=subject_topics,
                            num_tokens=n_top_tokens))

    # common
    artm_model.scores.add(
        artm.PerplexityScore(name='perplexity_score_common',
                             dictionary=dictionary,
                             topic_names=common_topics))
    artm_model.scores.add(
        artm.SparsityPhiScore(name='ss_phi_score_common',
                              class_id=class_name,
                              topic_names=common_topics))
    artm_model.scores.add(
        artm.SparsityThetaScore(name='ss_theta_score_common',
                                topic_names=common_topics))
    artm_model.scores.add(
        artm.TopicKernelScore(name='topic_kernel_score_common',
                              class_id=class_name,
                              topic_names=common_topics,
                              probability_mass_threshold=p_mass_threshold))
    artm_model.scores.add(
        artm.TopTokensScore(name='top_tokens_score_common',
                            class_id=class_name,
                            topic_names=common_topics,
                            num_tokens=n_top_tokens))
Пример #2
0
def experiment(filename, tau_phi, tau_theta):
    batch_vectorizer = artm.BatchVectorizer(data_path=filename, data_format='vowpal_wabbit',
                                            target_folder='batches')

    dictionary = batch_vectorizer.dictionary

    topic_num = 30
    tokens_num = 100
    print("ARTM training")
    topic_names = ['topic_{}'.format(i) for i in range(topic_num)]
    model_artm = artm.ARTM(topic_names=topic_names, dictionary=dictionary, cache_theta=True)
    model_plsa = artm.ARTM(topic_names=topic_names, cache_theta=True,
                           scores=[artm.PerplexityScore(name='PerplexityScore', dictionary=dictionary)])
    model_lda = artm.LDA(num_topics=topic_num)

    model_artm.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
    model_artm.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score'))
    model_artm.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
    model_artm.scores.add(artm.TopTokensScore(name='top_tokens_score', num_tokens=tokens_num))
    model_artm.scores.add(artm.TopicKernelScore(name='topic_kernel_score', probability_mass_threshold=0.3))
    model_artm.scores.add(artm.BackgroundTokensRatioScore(name='background_tokens_ratio_score'))
    model_artm.scores.add(artm.ClassPrecisionScore(name='class_precision_score'))
    model_artm.scores.add(artm.TopicMassPhiScore(name='topic_mass_phi_score'))

    model_plsa.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
    model_plsa.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score'))
    model_plsa.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
    model_plsa.scores.add(artm.TopTokensScore(name='top_tokens_score'))
    model_plsa.scores.add(artm.TopicKernelScore(name='topic_kernel_score', probability_mass_threshold=0.3))
    model_plsa.scores.add(artm.BackgroundTokensRatioScore(name='background_tokens_ratio_score'))
    model_plsa.scores.add(artm.ClassPrecisionScore(name='class_precision_score'))
    model_plsa.scores.add(artm.TopicMassPhiScore(name='topic_mass_phi_score'))

    model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='sparse_phi_regularizer'))
    model_artm.regularizers.add(artm.SmoothSparseThetaRegularizer(name='sparse_theta_regularizer'))
    model_artm.regularizers.add(artm.DecorrelatorPhiRegularizer(name='decorrelator_phi_regularizer'))

    model_artm.regularizers['sparse_phi_regularizer'].tau = tau_phi
    model_artm.regularizers['sparse_theta_regularizer'].tau = tau_theta
    model_artm.regularizers['decorrelator_phi_regularizer'].tau = 1e+3

    model_plsa.initialize(dictionary=dictionary)
    model_artm.initialize(dictionary=dictionary)
    model_lda.initialize(dictionary=dictionary)

    passes = 100
    model_plsa.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=passes)
    model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=passes)
    model_lda.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=passes)

    print_measures(model_plsa, model_artm, model_lda)
Пример #3
0
def define_model(n_topics: int, dictionary: artm.Dictionary,
                 sparse_theta: float, sparse_phi: float,
                 decorrelator_phi: float) -> artm.artm_model.ARTM:
    """
    Define the ARTM model.
    :param n_topics: number of topics.
    :param dictionary: batch vectorizer dictionary.
    :param sparse_theta: sparse theta parameter.
    :param sparse_phi: sparse phi Parameter.
    :param decorrelator_phi: decorellator phi Parameter.
    :return: ARTM model.
    """
    print("Defining the model.")
    topic_names = ["topic_{}".format(i) for i in range(1, n_topics + 1)]
    model_artm = artm.ARTM(
        topic_names=topic_names,
        cache_theta=True,
        scores=[
            artm.PerplexityScore(name="PerplexityScore",
                                 dictionary=dictionary),
            artm.SparsityPhiScore(name="SparsityPhiScore"),
            artm.SparsityThetaScore(name="SparsityThetaScore"),
            artm.TopicKernelScore(name="TopicKernelScore",
                                  probability_mass_threshold=0.3),
            artm.TopTokensScore(name="TopTokensScore", num_tokens=15)
        ],
        regularizers=[
            artm.SmoothSparseThetaRegularizer(name="SparseTheta",
                                              tau=sparse_theta),
            artm.SmoothSparsePhiRegularizer(name="SparsePhi", tau=sparse_phi),
            artm.DecorrelatorPhiRegularizer(name="DecorrelatorPhi",
                                            tau=decorrelator_phi)
        ])
    return model_artm
Пример #4
0
    def init_model(self, dictionary_path=None):
        """dictionary_path: optional, used with pretrained model"""
        self.dictionary = artm.Dictionary()
        if dictionary_path is None:
            self.dictionary.gather(data_path=self.batches_path)
            self.dictionary.filter(min_tf=10, max_df_rate=0.1)
            self.dictionary.save_text(
                f"{self.dir_path}/dicts/dict_{self.name_dataset}.txt")
        else:
            self.dictionary.load_text(dictionary_path)

        self.model = artm.ARTM(
            num_topics=self.n_topics,
            dictionary=self.dictionary,
            show_progress_bars=True,
        )

        # scores
        self.model.scores.add(
            artm.PerplexityScore(name="PerplexityScore",
                                 dictionary=self.dictionary))
        self.model.scores.add(
            artm.SparsityThetaScore(name="SparsityThetaScore"))
        self.model.scores.add(artm.SparsityPhiScore(name="SparsityPhiScore"))

        # regularizers
        self.model.regularizers.add(
            artm.SmoothSparsePhiRegularizer(name="SparsePhi", tau=-0.1))
        self.model.regularizers.add(
            artm.SmoothSparseThetaRegularizer(name="SparseTheta", tau=-0.5))
        self.model.regularizers.add(
            artm.DecorrelatorPhiRegularizer(name="DecorrelatorPhi", tau=1.5e5))
Пример #5
0
    def set_scores(self):

        self.model.scores.add(
            artm.PerplexityScore(name='PerplexityScore',
                                 dictionary=self.dictionary))

        self.model.scores.add(
            artm.SparsityPhiScore(name='SparsityPhiScore',
                                  class_id='@default_class',
                                  topic_names=self.specific))
        self.model.scores.add(
            artm.SparsityThetaScore(name='SparsityThetaScore',
                                    topic_names=self.specific))

        # Fraction of background words in the whole collection
        self.model.scores.add(
            artm.BackgroundTokensRatioScore(name='BackgroundTokensRatioScore',
                                            class_id='@default_class'))

        # Kernel characteristics
        self.model.scores.add(
            artm.TopicKernelScore(name='TopicKernelScore',
                                  class_id='@default_class',
                                  topic_names=self.specific,
                                  probability_mass_threshold=0.5,
                                  dictionary=self.dictionary))

        # Looking at top tokens
        self.model.scores.add(
            artm.TopTokensScore(name='TopTokensScore',
                                class_id='@default_class',
                                num_tokens=100))
Пример #6
0
def experiment_enviroment(request):
    """ """
    with warnings.catch_warnings():
        warnings.filterwarnings(action="ignore", message=W_DIFF_BATCHES_1)
        dataset = Dataset('tests/test_data/test_dataset.csv')
        dictionary = dataset.get_dictionary()

    model_artm = artm.ARTM(
        num_processors=1,
        num_topics=5,
        cache_theta=True,
        num_document_passes=1,
        dictionary=dictionary,
        scores=[artm.PerplexityScore(name='PerplexityScore', )],
    )
    model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
    model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
    ex_score = ScoreExample()
    tm = TopicModel(model_artm,
                    model_id='new_id',
                    custom_scores={'example_score': ex_score})
    # experiment starts without model
    experiment = Experiment(tm,
                            experiment_id="test_cube_creator",
                            save_path="tests/experiments")
    return tm, dataset, experiment, dictionary
Пример #7
0
    def init_hierarchical_model(class_ids):
        score = [artm.PerplexityScore(name='perplexity_words', class_ids=['body']),
                 artm.PerplexityScore(name='perplexity_bigrams', class_ids=['bigrams'])]

        top_tokens = [artm.TopTokensScore(name='top_words', num_tokens=15, class_id='body'),
                      artm.TopTokensScore(name='top_bigrams', num_tokens=10, class_id='bigrams')]

        sparsity = [artm.SparsityThetaScore(name='sparsity_theta', eps=1e-6),
                    artm.SparsityPhiScore(name='sparsity_phi_words', class_id='words', eps=1e-6),
                    artm.SparsityPhiScore(name='sparsity_phi_bigrams', class_id='bigrams', eps=1e-6)]

        regularizers = [artm.DecorrelatorPhiRegularizer(tau=0, class_ids=['body'], name='decorr_words'),
                        artm.DecorrelatorPhiRegularizer(tau=0, class_ids=['bigram'], name='decorr_bigrams'),
                        artm.DecorrelatorPhiRegularizer(tau=0, class_ids=['categories'], name='decorr_categories'),
                        artm.SmoothSparseThetaRegularizer(tau=0, name='sparsity_theta'),
                        artm.SmoothSparsePhiRegularizer(tau=0, class_ids=['body'], name='sparsity_words'),
                        artm.SmoothSparsePhiRegularizer(tau=0, class_ids=['bigram'], name='sparsity_bigrams')]

        hmodel = artm.hARTM(class_ids=class_ids,
                            cache_theta=True,
                            reuse_theta=True,
                            scores=score + top_tokens + sparsity,
                            regularizers=regularizers,
                            theta_columns_naming='title')
        return hmodel
Пример #8
0
def create_and_learn_ARTM_decorPhi_modal(name="",
                                         topic_number=750,
                                         num_collection_passes=1,
                                         weigths=[1., 1., 1., 1.],
                                         decorTau=1.0):

    batch_vectorizer_train = None
    batch_vectorizer_train = artm.BatchVectorizer(data_path='./' + name,
                                                  data_format='vowpal_wabbit',
                                                  target_folder='folder' +
                                                  name)
    dictionary = artm.Dictionary()
    dictionary.gather(data_path=batch_vectorizer_train.data_path)
    topic_names = ['topic_{}'.format(i) for i in range(topic_number)]

    model = artm.ARTM(topic_names=topic_names,
                      class_ids={
                          '@text': weigths[0],
                          '@first': weigths[1],
                          '@second': weigths[2],
                          '@third': weigths[3]
                      },
                      cache_theta=True,
                      theta_columns_naming='title',
                      scores=[
                          artm.PerplexityScore(name='PerplexityScore',
                                               dictionary=dictionary)
                      ])
    model.regularizers.add(
        artm.DecorrelatorPhiRegularizer(
            name='DecorrelatorPhi_modals',
            tau=decorTau,
            class_ids=['@first', '@second', '@third']))

    model.initialize(dictionary=dictionary)

    model.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
    model.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
    model.scores.add(
        artm.TopicKernelScore(name='TopicKernelScore',
                              class_id='@text',
                              probability_mass_threshold=0.3))
    model.scores.add(
        artm.TopTokensScore(name='TopTokensScore',
                            num_tokens=6,
                            class_id='@text'))
    model.scores.add(
        artm.SparsityPhiScore(name='sparsity_phi_score', class_id='@third'))

    model.num_document_passes = 1

    model.fit_offline(batch_vectorizer=batch_vectorizer_train,
                      num_collection_passes=num_collection_passes)

    theta_train = model.transform(batch_vectorizer=batch_vectorizer_train)

    return model, theta_train
Пример #9
0
def experiment_enviroment(request):
    """ """
    with warnings.catch_warnings():
        warnings.filterwarnings(action="ignore", message=W_DIFF_BATCHES_1)
        dataset = Dataset('tests/test_data/test_dataset.csv')
        dictionary = dataset.get_dictionary()

    model_artm = artm.ARTM(
        num_processors=3,
        num_topics=5,
        cache_theta=True,
        num_document_passes=1,
        dictionary=dictionary,
        scores=[artm.PerplexityScore(name='PerplexityScore')])
    model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
    model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))

    ex_score = ScoreExample()
    tm = TopicModel(model_artm,
                    model_id='new_id',
                    custom_scores={'example_score': ex_score})

    experiment = Experiment(tm,
                            experiment_id="test_pipeline",
                            save_path="tests/experiments")
    cube_settings = [{
        'CubeCreator': {
            'num_iter': 10,
            'parameters': [
                {
                    'name': 'seed',
                    'values': [82019, 322],
                },
            ],
            'reg_search': 'grid',
            'separate_thread': USE_MULTIPROCESS,
        },
        'selection': [
            'model.seed = 82019 and PerplexityScore -> min COLLECT 2',
        ]
    }, {
        'RegularizersModifierCube': {
            'num_iter': 10,
            'regularizer_parameters': {
                "regularizer": artm.regularizers.SmoothSparsePhiRegularizer(),
                "tau_grid": [0.1, 0.5, 1, 5, 10]
            },
            'reg_search': 'grid',
            'use_relative_coefficients': False,
            'separate_thread': USE_MULTIPROCESS,
        },
        'selection': [
            'PerplexityScore -> max COLLECT 2',
        ]
    }]

    return tm, dataset, experiment, dictionary, cube_settings
 def create_topic_model(self, topic_model_name: str,
                        batch_vectorizer: artm.BatchVectorizer,
                        dictionary: artm.Dictionary) -> artm.ARTM:
     topic_model = artm.ARTM(num_topics=self.number_of_topics,
                             dictionary=dictionary,
                             cache_theta=False)
     topic_model.scores.add(
         artm.PerplexityScore(name='perplexity_score',
                              dictionary=dictionary))
     topic_model.scores.add(
         artm.SparsityPhiScore(name='sparsity_phi_score'))
     topic_model.scores.add(
         artm.SparsityThetaScore(name='sparsity_theta_score'))
     topic_model.num_document_passes = 5
     topic_model.num_processors = max(1, os.cpu_count() - 1)
     topic_model.regularizers.add(
         artm.SmoothSparsePhiRegularizer(name='sparse_phi_regularizer'))
     topic_model.regularizers.add(
         artm.SmoothSparseThetaRegularizer(name='sparse_theta_regularizer'))
     topic_model.regularizers.add(
         artm.DecorrelatorPhiRegularizer(
             name='decorrelator_phi_regularizer'))
     topic_model.regularizers['sparse_phi_regularizer'].tau = -1.0
     topic_model.regularizers['sparse_theta_regularizer'].tau = -0.5
     topic_model.regularizers['decorrelator_phi_regularizer'].tau = 1e+5
     best_score = None
     keyword_extraction_logger.info(
         'epoch  perplexity_score  sparsity_phi_score  sparsity_theta_score'
     )
     for restart_index in range(10):
         topic_model.fit_offline(batch_vectorizer=batch_vectorizer,
                                 num_collection_passes=3)
         if best_score is None:
             best_score = topic_model.score_tracker[
                 'perplexity_score'].last_value
         else:
             if best_score > topic_model.score_tracker[
                     'perplexity_score'].last_value:
                 best_score = topic_model.score_tracker[
                     'perplexity_score'].last_value
                 self.save_topic_model(topic_model, topic_model_name)
         keyword_extraction_logger.info(
             '{0:5}  {1:16.9}  {2:18.9}  {3:20.9}'.format(
                 (restart_index + 1) * 3,
                 topic_model.score_tracker['perplexity_score'].last_value,
                 topic_model.score_tracker['sparsity_phi_score'].last_value,
                 topic_model.score_tracker['sparsity_theta_score'].
                 last_value))
     del topic_model
     return self.load_topic_model(
         artm.ARTM(num_topics=self.number_of_topics,
                   dictionary=dictionary,
                   cache_theta=False), topic_model_name)
Пример #11
0
    def _get_corpus_model(self,
                          corpus_vector_spaced,
                          clustering_method='artm'):
        if 'gensim' == clustering_method:
            return self._get_model_LSI(corpus_vector_spaced)
        elif 'sklearn' == clustering_method:
            return self._get_model_LDA(corpus_vector_spaced)
        elif 'artm' == clustering_method:
            batch_vectorizer = corpus_vector_spaced['batch_vectorizer']
            dictionary = corpus_vector_spaced['dictionary']

            topic_names = [
                'topic_{}'.format(i) for i in range(self.num_of_clusters)
            ]

            model_artm = artm.ARTM(
                topic_names=topic_names,
                cache_theta=True,
                scores=[
                    artm.PerplexityScore(name='PerplexityScore',
                                         dictionary=dictionary)
                ],
                regularizers=[
                    artm.SmoothSparseThetaRegularizer(name='SparseTheta',
                                                      tau=-0.15)
                ])

            model_artm.scores.add(
                artm.SparsityPhiScore(name='SparsityPhiScore'))
            model_artm.scores.add(
                artm.SparsityThetaScore(name='SparsityThetaScore'))
            model_artm.scores.add(
                artm.TopicKernelScore(name='TopicKernelScore',
                                      probability_mass_threshold=0.3))
            model_artm.scores.add(artm.TopTokensScore(name='TopTokensScore',
                                                      num_tokens=10),
                                  overwrite=True)

            model_artm.regularizers.add(
                artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=-0.1))
            model_artm.regularizers['SparseTheta'].tau = -0.2
            model_artm.regularizers.add(
                artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi',
                                                tau=1.5e+5))

            model_artm.num_document_passes = 1

            model_artm.initialize(dictionary)
            model_artm.fit_offline(batch_vectorizer=batch_vectorizer,
                                   num_collection_passes=30)

            return model_artm.transform(batch_vectorizer=batch_vectorizer).T
Пример #12
0
def create_thematic_model(checked_list, num_topics, num_tokens, phi_tau,
                          theta_tau, decorr_tau):
    """ Create a thematic model """
    gluing_bag_of_words(checked_list)

    batch_vectorizer = artm.BatchVectorizer(data_path=COLLECTION_PATH,
                                            data_format='vowpal_wabbit',
                                            target_folder=TARGET_FOLDER,
                                            batch_size=len(checked_list))
    dictionary = artm.Dictionary(data_path=TARGET_FOLDER)
    model = artm.ARTM(
        num_topics=num_topics,
        num_document_passes=len(checked_list),
        dictionary=dictionary,
        regularizers=[
            artm.SmoothSparsePhiRegularizer(name='sparse_phi_regularizer',
                                            tau=phi_tau),
            artm.SmoothSparseThetaRegularizer(name='sparse_theta_regularizer',
                                              tau=theta_tau),
            artm.DecorrelatorPhiRegularizer(
                name='decorrelator_phi_regularizer', tau=decorr_tau),
        ],
        scores=[
            artm.PerplexityScore(name='perplexity_score',
                                 dictionary=dictionary),
            artm.SparsityPhiScore(name='sparsity_phi_score'),
            artm.SparsityThetaScore(name='sparsity_theta_score'),
            artm.TopTokensScore(name='top_tokens_score', num_tokens=num_tokens)
        ])

    model.fit_offline(batch_vectorizer=batch_vectorizer,
                      num_collection_passes=len(checked_list))

    top_tokens = model.score_tracker['top_tokens_score']

    topic_dictionary = OrderedDict()

    for topic_name in model.topic_names:
        list_name = []
        for (token, weight) in zip(top_tokens.last_tokens[topic_name],
                                   top_tokens.last_weights[topic_name]):
            list_name.append(token + '-' + str(round(weight, 3)))
        topic_dictionary[str(topic_name)] = list_name

    return model.score_tracker[
        'perplexity_score'].last_value, model.score_tracker[
            'sparsity_phi_score'].last_value, model.score_tracker[
                'sparsity_theta_score'].last_value, topic_dictionary
Пример #13
0
def add_scores_to_model(current_dictionary, artm_model, n_top_tokens,
                        p_mass_threshold):
    artm_model.scores.add(
        artm.PerplexityScore(name='perplexity_score',
                             use_unigram_document_model=False,
                             dictionary=current_dictionary))
    artm_model.scores.add(
        artm.SparsityPhiScore(name='sparsity_phi_score', class_id='ngramm'))
    artm_model.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
    artm_model.scores.add(
        artm.TopicKernelScore(name='topic_kernel_score',
                              class_id='ngramm',
                              probability_mass_threshold=p_mass_threshold))
    artm_model.scores.add(
        artm.TopTokensScore(name='top_tokens_score',
                            class_id='ngramm',
                            num_tokens=n_top_tokens))
Пример #14
0
def create_and_learn_PLSA(name="", topic_number=750, num_collection_passes=1):

    batch_vectorizer_train = None
    batch_vectorizer_train = artm.BatchVectorizer(data_path='./' + name,
                                                  data_format='vowpal_wabbit',
                                                  target_folder='folder' +
                                                  name)
    dictionary = artm.Dictionary()
    dictionary.gather(data_path=batch_vectorizer_train.data_path)
    topic_names = ['topic_{}'.format(i) for i in range(topic_number)]

    model_plsa = artm.ARTM(topic_names=topic_names,
                           class_ids={
                               '@text': 1.0,
                               '@first': 1.0,
                               '@second': 1.0,
                               '@third': 1.0
                           },
                           cache_theta=True,
                           theta_columns_naming='title',
                           scores=[
                               artm.PerplexityScore(name='PerplexityScore',
                                                    dictionary=dictionary)
                           ])

    model_plsa.initialize(dictionary=dictionary)

    model_plsa.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
    model_plsa.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
    model_plsa.scores.add(
        artm.TopicKernelScore(name='TopicKernelScore',
                              class_id='@text',
                              probability_mass_threshold=0.3))
    model_plsa.scores.add(
        artm.TopTokensScore(name='TopTokensScore',
                            num_tokens=6,
                            class_id='@text'))

    model_plsa.num_document_passes = 1

    model_plsa.fit_offline(batch_vectorizer=batch_vectorizer_train,
                           num_collection_passes=num_collection_passes)

    theta_train = model_plsa.transform(batch_vectorizer=batch_vectorizer_train)

    return model_plsa, theta_train
Пример #15
0
def pipeline_plsa_bigartm(lines,
                          TOPIC_NUMBER,
                          ngram_range,
                          topnwords,
                          LOGS_DATA_PATH="plsa.txt",
                          TARGET_FOLDER="plsa"):

    make_file(lines, ngram_range, LOGS_DATA_PATH)

    batch_vectorizer = artm.BatchVectorizer(data_path=LOGS_DATA_PATH,
                                            data_format='vowpal_wabbit',
                                            target_folder=TARGET_FOLDER)

    model_artm = artm.ARTM(num_topics=TOPIC_NUMBER, cache_theta=True)
    model_artm.initialize(dictionary=batch_vectorizer.dictionary)

    model_artm.regularizers.add(
        artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=0.05))
    model_artm.regularizers.add(
        artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi', tau=1.5e+5))
    model_artm.regularizers.add(
        artm.SmoothSparseThetaRegularizer(name='SparseTheta', tau=-0.01))

    model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
    model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
    model_artm.scores.add(artm.TopTokensScore(name='TopTokensScore',
                                              num_tokens=topnwords),
                          overwrite=True)
    model_artm.scores.add(
        artm.PerplexityScore(name='PerplexityScore',
                             dictionary=batch_vectorizer.dictionary))

    model_artm.num_document_passes = 2
    model_artm.fit_offline(batch_vectorizer=batch_vectorizer,
                           num_collection_passes=15)

    topic_names = {}
    for topic_name in model_artm.topic_names:
        topic_names[topic_name] = model_artm.score_tracker[
            'TopTokensScore'].last_tokens[topic_name]

    #return label_after_bigarm(model_artm),  topic_names
    return "nothing, sorry", topic_names
Пример #16
0
def create_model_with_background(dictionary, num_tokens, num_document_passes):

    sm_phi_tau = 0.0001 * 1e-4
    sp_phi_tau = -0.0001 * 1e-4

    decor_phi_tau = 1

    specific_topics = ['topic {}'.format(i) for i in range(1, 20)]
    topic_names = specific_topics + ["background"]
    scores = [
        artm.PerplexityScore(name='PerplexityScore', dictionary=dictionary),
        artm.TopTokensScore(
            name='TopTokensScore', num_tokens=10, class_id='plain_text'
        ),  # web version of Palmetto works only with <= 10 tokens
        artm.SparsityPhiScore(name='SparsityPhiScore'),
        artm.SparsityThetaScore(name='SparsityThetaScore'),
        artm.TopicKernelScore(name='TopicKernelScore',
                              probability_mass_threshold=0.3,
                              class_id='plain_text')
    ]

    model = artm.ARTM(topic_names=specific_topics + ["background"],
                      regularizers=[],
                      cache_theta=True,
                      scores=scores,
                      class_ids={'plain_text': 1.0})

    model.regularizers.add(
        artm.SmoothSparsePhiRegularizer(name='SparsePhi',
                                        tau=-sp_phi_tau,
                                        topic_names=specific_topics))
    model.regularizers.add(
        artm.SmoothSparsePhiRegularizer(name='SmoothPhi',
                                        tau=sm_phi_tau,
                                        topic_names=["background"]))
    # model.regularizers.add(artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi', tau=decor_phi_tau))

    model.initialize(dictionary=dictionary)
    model.num_document_passes = num_document_passes

    return model
Пример #17
0
def create_model(dictionary, num_tokens, num_document_passes):

    tn = ['topic {}'.format(i) for i in range(1, 20)]
    scores = [
        artm.PerplexityScore(name='PerplexityScore', dictionary=dictionary),
        artm.TopTokensScore(
            name='TopTokensScore', num_tokens=10
        ),  # web version of Palmetto works only with <= 10 tokens
        artm.SparsityPhiScore(name='SparsityPhiScore'),
        artm.SparsityThetaScore(name='SparsityThetaScore')
    ]

    model = artm.ARTM(topic_names=tn,
                      regularizers=[],
                      cache_theta=True,
                      scores=scores)

    model.initialize(dictionary=dictionary)
    model.num_document_passes = num_document_passes

    return model
def add_scores_to_model(artm_model,
                        dictionary,
                        n_top_tokens,
                        p_mass_threshold,
                        class_name,
                        _debug_print=False):
    if _debug_print:
        print '[{}] adding scores'.format(datetime.now())
    artm_model.scores.add(
        artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
    artm_model.scores.add(
        artm.SparsityPhiScore(name='ss_phi_score', class_id=class_name))
    artm_model.scores.add(artm.SparsityThetaScore(name='ss_theta_score'))
    artm_model.scores.add(
        artm.TopicKernelScore(name='topic_kernel_score',
                              class_id=class_name,
                              probability_mass_threshold=p_mass_threshold))
    artm_model.scores.add(
        artm.TopTokensScore(name='top_tokens_score',
                            class_id=class_name,
                            num_tokens=n_top_tokens))
Пример #19
0
def init_score_tracker(model_artm, my_dictionary, class_id='text'):
    model_artm.scores.add(artm.PerplexityScore(name='PerplexityScore',
                                               dictionary=my_dictionary),
                          overwrite=True)

    model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore',
                                                class_id=class_id),
                          overwrite=True)

    model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'),
                          overwrite=True)

    model_artm.scores.add(artm.TopTokensScore(name="top_words",
                                              num_tokens=200,
                                              class_id=class_id),
                          overwrite=True)

    model_artm.scores.add(artm.TopicKernelScore(
        name='TopicKernelScore',
        class_id=class_id,
        probability_mass_threshold=0.6),
                          overwrite=True)
    print('Scores are set!')
Пример #20
0
def train():
    try:
        logger.info("TRAINIG BEGIN")
        batch_vectorizer_mono = artm.BatchVectorizer(
            data_path="./opt/vw/prod/dataset1.vw",
            data_format='vowpal_wabbit',
            target_folder="./opt/vw/prod/dataset_batch")
        logger.info("Batches stage completed")
        model_artm = artm.ARTM(
            num_topics=NUM_TOPICS,
            num_processors=THREADS,
            theta_columns_naming='title',
            # show_progress_bars=True,
            theta_name="prod_theta",
            class_ids={'@modal': 1},
            cache_theta=True)
        dictionary = artm.Dictionary()
        dictionary.gather(data_path=batch_vectorizer_mono.data_path)
        dictionary.filter(min_df_rate=0.01, min_tf=10, inplace=True)
        model_artm.initialize(dictionary=dictionary)
        logger.info("Dictionary stage completed")
        model_artm.scores.add(
            artm.SparsityPhiScore(name='sparsity_phi_score',
                                  class_id='@modal'))
        model_artm.scores.add(
            artm.SparsityThetaScore(name='sparsity_theta_score'))
        model_artm.scores.add(
            artm.TopTokensScore(name='top_tokens_score', class_id="@modal"))
        model_artm.scores.add(
            artm.PerplexityScore(name='perplexity_score',
                                 class_ids={'@modal': 1}))
        model_artm.regularizers.add(
            artm.DecorrelatorPhiRegularizer(name='decorrelator_phi_lab',
                                            tau=1.0e+5,
                                            class_ids=['@modal']))

        model_artm.num_document_passes = 1
        ITERATIONS = [8, 8, 8]
        logger.info("Model initialization stage completed")
        model_artm.fit_offline(batch_vectorizer=batch_vectorizer_mono,
                               num_collection_passes=ITERATIONS[0])
        logger.info("1/3 trainig stage")
        model_artm.regularizers.add(
            artm.SmoothSparseThetaRegularizer(name='sparse_theta_regularizer',
                                              tau=-1.5))
        model_artm.fit_offline(batch_vectorizer=batch_vectorizer_mono,
                               num_collection_passes=ITERATIONS[1])
        logger.info("2/3 trainig stage")
        model_artm.fit_offline(batch_vectorizer=batch_vectorizer_mono,
                               num_collection_passes=ITERATIONS[2])
        logger.info("3/3 trainig stage")
        logger.info("Training is completed")

        # logger.info("Sparsity_phi_score: {}".format(model_artm.score_tracker['sparsity_phi_score'].value))
        # logger.info("Sparsity_theta_score".format(model_artm.score_tracker['sparsity_theta_score'].value))
        logger.info("Perplexity_score".format(
            model_artm.score_tracker['perplexity_score'].value[-1]))
        # delete tm_model_sources folder

        if os.path.isdir('./opt/tm_model/tm_model_sources'):
            shutil.rmtree("./opt/tm_model/tm_model_sources")
        logger.warning("Tm_model folder was deleted")

        model_artm.dump_artm_model("./opt/tm_model/tm_model_sources")
        logger.info("TM model is saved")

    except Exception as error:
        logger.critical(
            "During training critical error occured | {0}".format(error))
Пример #21
0
def test_func():
    # constants
    num_tokens = 11
    probability_mass_threshold = 0.9
    sp_reg_tau = -0.1
    decor_tau = 1.5e+5
    decor_rel_tau = 0.3
    num_collection_passes = 15
    num_document_passes = 1
    num_topics = 15
    vocab_size = 6906
    num_docs = 3430

    data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
    batches_folder = tempfile.mkdtemp()

    sp_zero_eps = 0.001
    sparsity_phi_value = [
        0.034, 0.064, 0.093, 0.120, 0.145, 0.170, 0.194, 0.220, 0.246, 0.277,
        0.312, 0.351, 0.390, 0.428, 0.464
    ]

    sparsity_phi_rel_value = [
        0.442, 0.444, 0.444, 0.446, 0.448, 0.449, 0.458, 0.468, 0.476, 0.488,
        0.501, 0.522, 0.574, 0.609, 0.670
    ]

    sparsity_theta_value = [0.0] * num_collection_passes

    perp_zero_eps = 2.0
    perplexity_value = [
        6873, 2590, 2685, 2578, 2603, 2552, 2536, 2481, 2419, 2331, 2235, 2140,
        2065, 2009, 1964
    ]

    perplexity_rel_value = [
        6873, 2667, 2458, 2323, 2150, 2265, 2015, 1967, 1807, 1747, 1713, 1607,
        1632, 1542, 1469
    ]

    top_zero_eps = 0.0001
    top_tokens_num_tokens = [num_tokens * num_topics] * num_collection_passes
    top_tokens_topic_0_tokens = [
        u'party', u'state', u'campaign', u'tax', u'political', u'republican',
        u'senate', u'candidate', u'democratic', u'court', u'president'
    ]
    top_tokens_topic_0_weights = [
        0.0209, 0.0104, 0.0094, 0.0084, 0.0068, 0.0067, 0.0065, 0.0058, 0.0053,
        0.0053, 0.0051
    ]

    ker_zero_eps = 0.02
    topic_kernel_topic_0_contrast = 0.96
    topic_kernel_topic_0_purity = 0.014
    topic_kernel_topic_0_size = 18.0
    topic_kernel_average_size = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13, 0.6, 1.6, 3.53, 7.15, 12.6,
        20.4, 29.06
    ]
    topic_kernel_average_contrast = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12, 0.31, 0.7, 0.96, 0.96, 0.96,
        0.96, 0.97
    ]
    topic_kernel_average_purity = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.01, 0.015, 0.017, 0.02,
        0.03, 0.04, 0.05
    ]

    len_last_document_ids = 10

    try:
        batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
                                                data_format='bow_uci',
                                                collection_name='kos',
                                                target_folder=batches_folder)

        dictionary = artm.Dictionary()
        dictionary.gather(data_path=batch_vectorizer.data_path)

        model = artm.ARTM(
            topic_names=['topic_{}'.format(i) for i in range(num_topics)],
            dictionary=dictionary.name,
            cache_theta=True)

        model.regularizers.add(
            artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=sp_reg_tau))
        model.regularizers.add(
            artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi',
                                            tau=decor_tau))

        model.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
        model.scores.add(
            artm.PerplexityScore(name='PerplexityScore',
                                 use_unigram_document_model=False,
                                 dictionary=dictionary))
        model.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
        model.scores.add(
            artm.TopTokensScore(name='TopTokensScore', num_tokens=num_tokens))
        model.scores.add(
            artm.TopicKernelScore(
                name='TopicKernelScore',
                probability_mass_threshold=probability_mass_threshold))
        model.scores.add(artm.ThetaSnippetScore(name='ThetaSnippetScore'))

        model.num_document_passes = num_document_passes
        model.fit_offline(batch_vectorizer=batch_vectorizer,
                          num_collection_passes=num_collection_passes)

        for i in range(num_collection_passes):
            assert abs(model.score_tracker['SparsityPhiScore'].value[i] -
                       sparsity_phi_value[i]) < sp_zero_eps

        for i in range(num_collection_passes):
            assert abs(model.score_tracker['SparsityThetaScore'].value[i] -
                       sparsity_theta_value[i]) < sp_zero_eps

        for i in range(num_collection_passes):
            assert abs(model.score_tracker['PerplexityScore'].value[i] -
                       perplexity_value[i]) < perp_zero_eps

        for i in range(num_collection_passes):
            assert model.score_tracker['TopTokensScore'].num_tokens[
                i] == top_tokens_num_tokens[i]

        for i in range(num_tokens):
            assert model.score_tracker['TopTokensScore'].last_tokens[
                model.topic_names[0]][i] == top_tokens_topic_0_tokens[i]
            assert abs(model.score_tracker['TopTokensScore'].last_weights[
                model.topic_names[0]][i] -
                       top_tokens_topic_0_weights[i]) < top_zero_eps

        assert len(model.score_tracker['TopicKernelScore'].last_tokens[
            model.topic_names[0]]) > 0

        assert abs(topic_kernel_topic_0_contrast -
                   model.score_tracker['TopicKernelScore'].last_contrast[
                       model.topic_names[0]]) < ker_zero_eps
        assert abs(topic_kernel_topic_0_purity -
                   model.score_tracker['TopicKernelScore'].last_purity[
                       model.topic_names[0]]) < ker_zero_eps
        assert abs(topic_kernel_topic_0_size -
                   model.score_tracker['TopicKernelScore'].last_size[
                       model.topic_names[0]]) < ker_zero_eps

        for i in range(num_collection_passes):
            assert abs(
                model.score_tracker['TopicKernelScore'].average_size[i] -
                topic_kernel_average_size[i]) < ker_zero_eps
            assert abs(
                model.score_tracker['TopicKernelScore'].average_contrast[i] -
                topic_kernel_average_contrast[i]) < ker_zero_eps
            assert abs(
                model.score_tracker['TopicKernelScore'].average_purity[i] -
                topic_kernel_average_purity[i]) < ker_zero_eps

        model.fit_online(batch_vectorizer=batch_vectorizer)

        info = model.info
        assert info is not None
        assert len(info.config.topic_name) == num_topics
        assert len(info.score) >= len(model.score_tracker)
        assert len(info.regularizer) == len(model.regularizers.data)
        assert len(info.cache_entry) > 0

        temp = model.score_tracker['ThetaSnippetScore'].last_document_ids
        assert len_last_document_ids == len(temp)
        assert len(model.score_tracker['ThetaSnippetScore'].last_snippet[
            temp[0]]) == num_topics

        phi = model.get_phi()
        assert phi.shape == (vocab_size, num_topics)
        theta = model.get_theta()
        assert theta.shape == (num_topics, num_docs)

        assert model.library_version.count('.') == 2  # major.minor.patch

        # test relative coefficients for Phi matrix regularizers
        model = artm.ARTM(num_topics=num_topics,
                          dictionary=dictionary.name,
                          cache_theta=False)

        model.regularizers.add(
            artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi',
                                            tau=decor_rel_tau))
        model.regularizers['DecorrelatorPhi'].gamma = 0.0

        model.scores.add(
            artm.PerplexityScore(name='PerplexityScore',
                                 use_unigram_document_model=False,
                                 dictionary=dictionary))
        model.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))

        model.num_document_passes = num_document_passes
        model.fit_offline(batch_vectorizer=batch_vectorizer,
                          num_collection_passes=num_collection_passes)

        for i in range(num_collection_passes):
            assert abs(model.score_tracker['SparsityPhiScore'].value[i] -
                       sparsity_phi_rel_value[i]) < sp_zero_eps

        for i in range(num_collection_passes):
            assert abs(model.score_tracker['PerplexityScore'].value[i] -
                       perplexity_rel_value[i]) < perp_zero_eps
    finally:
        shutil.rmtree(batches_folder)
def topic_model_clf(X, y, topic_num=30):
    labels_decreasing_size_order = list(y.value_counts().index)

    (X_train, X_test, y_train, y_test) = train_test_split(X,
                                                          y,
                                                          test_size=0.2,
                                                          stratify=y,
                                                          random_state=42)

    file_train = 'temp_files/X_train.txt'
    file_test = 'temp_files/X_test.txt'

    temp_df = pd.DataFrame()
    temp_df['text'] = X_train
    temp_df['class_label'] = y_train
    write_vw(temp_df, X_train.index, file_train)

    temp_df = pd.DataFrame()
    temp_df['text'] = X_test
    write_vw(temp_df, X_test.index, file_test)

    if len(glob.glob(os.path.join('batches_train*.batch'))) < 1:
        batch_vectorizer_train = artm.BatchVectorizer(
            data_path=file_train,
            data_format='vowpal_wabbit',
            target_folder='batches_train',
            gather_dictionary=True)
    else:
        batch_vectorizer_train = artm.BatchVectorizer(
            data_path='batches_train',
            data_format='batches',
            gather_dictionary=True)

    if len(glob.glob(os.path.join('batches_test' + '*.batch'))) < 1:
        batch_vectorizer_test = artm.BatchVectorizer(
            data_path=file_test,
            data_format='vowpal_wabbit',
            target_folder='batches_test',
            gather_dictionary=True)
    else:
        batch_vectorizer_test = artm.BatchVectorizer(data_path='batches_test',
                                                     data_format='batches',
                                                     gather_dictionary=True)

    model = artm.ARTM(num_topics=topic_num,
                      class_ids={
                          '@text': 5.0,
                          '@class_label': 100.0
                      },
                      cache_theta=True,
                      dictionary=batch_vectorizer_train.dictionary,
                      theta_columns_naming='title')

    scores = [
        artm.PerplexityScore(name='Perplexity',
                             dictionary=batch_vectorizer_train.dictionary,
                             class_ids=['@text']),
        artm.SparsityPhiScore(name='SparsityPhiText', class_id='@text'),
        artm.SparsityPhiScore(name='SparsityPhiClasses',
                              class_id='@class_label'),
        artm.SparsityThetaScore(name='SparsityTheta'),
        artm.TopicKernelScore(name='TopicKernelText',
                              probability_mass_threshold=0.1,
                              class_id='@text'),
        artm.TopTokensScore(name='TopTokensText',
                            class_id='@text',
                            num_tokens=20),
        artm.TopTokensScore(name='TopTokensClasses',
                            class_id='@class_label',
                            num_tokens=10)
    ]

    regularizers = [
        artm.DecorrelatorPhiRegularizer(name='DeccorText',
                                        class_ids=['@text'],
                                        tau=10000),
        artm.SmoothSparsePhiRegularizer(name='SmoothPhiText',
                                        class_ids=['@text'],
                                        tau=0),
        artm.SmoothSparsePhiRegularizer(name='SmoothPhiClasses',
                                        class_ids=['@class_label'],
                                        tau=-1),
        # artm.SmoothSparsePhiRegularizer(name='SmoothBackgroundPhi', tau=100, topic_names=['background_topic']),
        artm.SmoothSparseThetaRegularizer(name='SmoothTheta', tau=-1.5),
        # artm.SmoothSparseThetaRegularizer(name='SmoothBackgroundTheta', tau=100, topic_names=['background_topic'])
    ]

    for r in regularizers:
        model.regularizers.add(r)
    for s in scores:
        model.scores.add(s)

    for i in tqdm(range(35)):
        model.fit_offline(batch_vectorizer=batch_vectorizer_train,
                          num_collection_passes=1)

    p_cd = model.transform(batch_vectorizer=batch_vectorizer_test,
                           predict_class_id='@class_label')

    # пооптимизируем это место
    y_pred = p_cd.idxmax(axis=0).astype(int)[[str(x)
                                              for x in X_test.index]].values
    # y_pred = p_cd[[str(x) for x in X_test.index]].idxmax(axis=0).values

    # metrics_visualization(target_pred=y_pred, target_true=y_test,
    #                       top_tokens_class=model.score_tracker['TopTokensClasses'],
    #                       top_tokens_text=model.score_tracker['TopTokensText'],
    #                       score_tracker=model.score_tracker,
    #                       scores_names=['Perplexity', 'SparsityPhiClasses',
    #                                     'SparsityPhiText', 'SparsityTheta'])

    print('Accuracy_score: {}'.format(accuracy_score(y_test, y_pred)))
    plt.hist(y_pred, color='g', label='pred')
    plt.hist(y_test, color='b', alpha=0.7, label='true')
    plt.title('Topic Model')
    plt.show()
    # print(classification_report(y_test, y_pred, labels=labels_decreasing_size_order))

    create_confusion_matrix(y_test,
                            y_pred,
                            labels=labels_decreasing_size_order).savefig(
                                '../../reports/topic_model_conf_matrix.png')

    micro_roc_auc = roc_auc_score(label_binarize(y_test,
                                                 classes=list(range(0, 17))),
                                  p_cd.T,
                                  average='micro')
    macro_roc_auc = roc_auc_score(label_binarize(y_test,
                                                 classes=list(range(0, 17))),
                                  p_cd.T,
                                  average='macro')
    macro_f1 = f1_score(y_test, y_pred, average='macro')
    micro_f1 = f1_score(y_test, y_pred, average='micro')
    log_loss_score = log_loss(y_test, p_cd.T)

    return (micro_roc_auc, macro_roc_auc, micro_f1, macro_f1, log_loss_score,
            precision_recall_fscore_support(
                y_test, y_pred, labels=labels_decreasing_size_order))
Пример #23
0
batch_vectorizer = artm.BatchVectorizer(data_path='lemmed.txt', data_format='vowpal_wabbit', target_folder='batches')

dictionary = batch_vectorizer.dictionary

topic_num = 10
tokens_num = 100
print("ARTM training")
topic_names = ['topic_{}'.format(i) for i in range(topic_num)]
model_artm = artm.ARTM(topic_names=topic_names, dictionary=dictionary, cache_theta=True)
model_plsa = artm.ARTM(topic_names=topic_names, cache_theta=True,
                       scores=[artm.PerplexityScore(name='PerplexityScore', dictionary=dictionary)])
model_lda = artm.LDA(num_topics=topic_num)

model_artm.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
model_artm.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score'))
model_artm.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
model_artm.scores.add(artm.TopTokensScore(name='top_tokens_score', num_tokens=tokens_num))
model_artm.scores.add(artm.TopicKernelScore(name='topic_kernel_score', probability_mass_threshold=0.3))
model_artm.scores.add(artm.BackgroundTokensRatioScore(name='background_tokens_ratio_score'))
model_artm.scores.add(artm.ClassPrecisionScore(name='class_precision_score'))
model_artm.scores.add(artm.TopicMassPhiScore(name='topic_mass_phi_score'))

model_plsa.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
model_plsa.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score'))
model_plsa.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
model_plsa.scores.add(artm.TopTokensScore(name='top_tokens_score'))
model_plsa.scores.add(artm.TopicKernelScore(name='topic_kernel_score', probability_mass_threshold=0.3))
model_plsa.scores.add(artm.BackgroundTokensRatioScore(name='background_tokens_ratio_score'))
model_plsa.scores.add(artm.ClassPrecisionScore(name='class_precision_score'))
model_plsa.scores.add(artm.TopicMassPhiScore(name='topic_mass_phi_score'))
Пример #24
0
coherence_score = artm.TopTokensScore(name='TopTokensCoherenceScore',
                                      dictionary=cooc_dict,
                                      num_tokens=15)

model_artm = artm.LDA(num_topics=N_TOPICS)

model_artm._internal_model.scores.add(
    artm.TopTokensScore(name="top_words", num_tokens=10))
model_artm._internal_model.scores.add(coherence_score)
model_artm._internal_model.scores.add(
    artm.PerplexityScore(name='perplexity_score', dictionary=bv.dictionary))
model_artm._internal_model.scores.add(
    artm.SparsityPhiScore(name='sparsity_phi_score'))
model_artm._internal_model.scores.add(
    artm.SparsityThetaScore(name='sparsity_theta_score'))

model_artm.initialize(dictionary=dictionary)
print("Initializing time: {}".format(time.time() - start))

start = time.time()
model_artm.fit_offline(bv, num_collection_passes=N_PASSES)
print("Training time: {}".format(time.time() - start))

if os.path.isdir(SAVE_DIR):
    shutil.rmtree(SAVE_DIR)
model_artm._internal_model.dump_artm_model(SAVE_DIR)
# model_artm = artm.load_artm_model(SAVE_DIR)

for topic_name in model_artm._internal_model.topic_names:
    print("".center(25, "*"))
Пример #25
0
                           artm.SmoothSparseThetaRegularizer(
                               name='SparseTheta', tau=-0.15)
                       ],
                       cache_theta=True)

if not os.path.isfile(filename + '/dictionary.dict'):
    dictionary.gather(data_path=batch_vectorizer.data_path)
    dictionary.save(dictionary_path=filename + '/dictionary.dict')

dictionary.load(dictionary_path=(filename + '/dictionary.dict'))
dictionary.load(dictionary_path=(filename + '/dictionary.dict'))

model_artm.initialize(dictionary=dictionary)

model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
model_artm.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
model_artm.scores.add(
    artm.TopicKernelScore(name='TopicKernelScore',
                          probability_mass_threshold=0.3))

model_artm.regularizers.add(
    artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=-0.1))
model_artm.regularizers.add(
    artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi', tau=1.5e+5))
model_artm.regularizers.add(
    artm.TopicSelectionThetaRegularizer(name='TopicSelection', tau=0.25))

model_artm.regularizers['SparsePhi'].tau = -0.5
model_artm.regularizers['SparseTheta'].tau = -0.5
model_artm.regularizers['DecorrelatorPhi'].tau = 1e+5
Пример #26
0
def calc_coeffs():
    batch_vectorizer = artm.BatchVectorizer(data_path='lemmed.txt', data_format='vowpal_wabbit',
                                            target_folder='batches')

    dictionary = batch_vectorizer.dictionary

    topic_num = 10
    topic_names = ['topic_{}'.format(i) for i in range(topic_num)]
    model_artm = artm.ARTM(topic_names=topic_names, dictionary=dictionary, cache_theta=True)

    model_artm.scores.add(artm.PerplexityScore(name='perplexity_score', dictionary=dictionary))
    model_artm.scores.add(artm.SparsityPhiScore(name='sparsity_phi_score'))
    model_artm.scores.add(artm.SparsityThetaScore(name='sparsity_theta_score'))
    model_artm.scores.add(artm.TopTokensScore(name='top_tokens_score'))
    model_artm.scores.add(artm.TopicKernelScore(name='topic_kernel_score', probability_mass_threshold=0.3))

    model_artm.regularizers.add(artm.SmoothSparsePhiRegularizer(name='sparse_phi_regularizer'))
    model_artm.regularizers.add(artm.SmoothSparseThetaRegularizer(name='sparse_theta_regularizer'))
    model_artm.regularizers.add(artm.DecorrelatorPhiRegularizer(name='decorrelator_phi_regularizer'))

    best_tau_phi = -5.0
    best_tau_theta = -5.0
    best_perplexity = 1000000

    print("Started parameters choosing")

    for i in range(-20, 20, 5):
        for j in range(-20, 20, 5):
            model_artm.regularizers['sparse_phi_regularizer'].tau = (i / 10.0)
            model_artm.regularizers['sparse_theta_regularizer'].tau = (j / 10.0)
            model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=100)
            if model_artm.score_tracker['perplexity_score'].last_value < best_perplexity:
                best_perplexity = model_artm.score_tracker['perplexity_score'].last_value
                best_tau_phi = (i / 10.0)
                best_tau_theta = (j / 10.0)
                print(best_perplexity, " ", best_tau_phi, " ", best_tau_theta)

    print("RESULT 1 ", best_perplexity, " ", best_tau_phi, " ", best_tau_theta)

    for i in range(int(10 * best_tau_phi) - 5, int(10 * best_tau_phi) + 5, 1):
        for j in range(int(10 * best_tau_theta) - 5, int(10 * best_tau_theta) + 5, 1):
            model_artm.regularizers['sparse_phi_regularizer'].tau = (i / 10.0)
            model_artm.regularizers['sparse_theta_regularizer'].tau = (j / 10.0)
            model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=100)
            if model_artm.score_tracker['perplexity_score'].last_value < best_perplexity:
                best_perplexity = model_artm.score_tracker['perplexity_score'].last_value
                best_tau_phi = (i / 10.0)
                best_tau_theta = (j / 10.0)
                print(best_perplexity, " ", best_tau_phi, " ", best_tau_theta)

    print("RESULT 2 ", best_perplexity, " ", best_tau_phi, " ", best_tau_theta)

    for i in range(int(100 * best_tau_phi) - 10, int(100 * best_tau_phi) + 10, 1):
        for j in range(int(100 * best_tau_theta) - 10, int(100 * best_tau_theta) + 10, 1):
            model_artm.regularizers['sparse_phi_regularizer'].tau = (i / 100.0)
            model_artm.regularizers['sparse_theta_regularizer'].tau = (j / 100.0)
            model_artm.fit_offline(batch_vectorizer=batch_vectorizer, num_collection_passes=100)
            if model_artm.score_tracker['perplexity_score'].last_value < best_perplexity:
                best_perplexity = model_artm.score_tracker['perplexity_score'].last_value
                best_tau_phi = (i / 100.0)
                best_tau_theta = (j / 100.0)
                print(best_perplexity, " ", best_tau_phi, " ", best_tau_theta)

    print("RESULT 3 ", best_perplexity, " ", best_tau_phi, " ", best_tau_theta)
    return {"tau_phi": best_tau_phi, "tau_theta": best_tau_theta}
Пример #27
0
def test_func():
    # constants
    dictionary_name = 'dictionary'
    num_tokens = 11
    probability_mass_threshold = 0.9
    sp_reg_tau = -0.1
    decor_tau = 1.5e+5
    num_collection_passes = 15
    num_document_passes = 1
    num_topics = 15
    vocab_size = 6906
    num_docs = 3430

    data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
    batches_folder = tempfile.mkdtemp()

    sp_zero_eps = 0.001
    sparsity_phi_value = [
        0.034, 0.064, 0.093, 0.120, 0.145, 0.170, 0.194, 0.220, 0.246, 0.277,
        0.312, 0.351, 0.390, 0.428, 0.464
    ]

    sparsity_theta_value = [0.0] * num_collection_passes

    perp_zero_eps = 2.0
    perplexity_value = [
        6873, 2590, 2685, 2578, 2603, 2552, 2536, 2481, 2419, 2331, 2235, 2140,
        2065, 2009, 1964
    ]

    top_zero_eps = 0.0001
    top_tokens_num_tokens = [num_tokens * num_topics] * num_collection_passes
    top_tokens_topic_0_tokens = [
        u'party', u'state', u'campaign', u'tax', u'political', u'republican',
        u'senate', u'candidate', u'democratic', u'court', u'president'
    ]
    top_tokens_topic_0_weights = [
        0.0209, 0.0104, 0.0094, 0.0084, 0.0068, 0.0067, 0.0065, 0.0058, 0.0053,
        0.0053, 0.0051
    ]

    ker_zero_eps = 0.01
    topic_kernel_topic_0_contrast = 0.96
    topic_kernel_topic_0_purity = 0.014
    topic_kernel_topic_0_size = 18.0
    topic_kernel_average_size = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13, 0.53, 1.6, 3.33, 7.13, 12.067,
        19.53, 27.8
    ]
    topic_kernel_average_contrast = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.12, 0.25, 0.7, 0.96, 0.96, 0.96,
        0.96, 0.97
    ]
    topic_kernel_average_purity = [
        0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.01, 0.015, 0.017, 0.02,
        0.03, 0.04, 0.05
    ]

    len_last_document_ids = 10

    try:
        data_path = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
        batch_vectorizer = None
        batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
                                                data_format='bow_uci',
                                                collection_name='kos',
                                                target_folder=batches_folder)

        model = artm.ARTM(
            topic_names=['topic_{}'.format(i) for i in xrange(num_topics)],
            cache_theta=True)

        model.gather_dictionary(dictionary_name, batch_vectorizer.data_path)
        model.initialize(dictionary_name=dictionary_name)

        model.regularizers.add(
            artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=sp_reg_tau))
        model.regularizers.add(
            artm.DecorrelatorPhiRegularizer(name='DecorrelatorPhi',
                                            tau=decor_tau))

        model.scores.add(artm.SparsityThetaScore(name='SparsityThetaScore'))
        model.scores.add(
            artm.PerplexityScore(name='PerplexityScore',
                                 use_unigram_document_model=False,
                                 dictionary_name=dictionary_name))
        model.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
        model.scores.add(
            artm.TopTokensScore(name='TopTokensScore', num_tokens=num_tokens))
        model.scores.add(
            artm.TopicKernelScore(
                name='TopicKernelScore',
                probability_mass_threshold=probability_mass_threshold))
        model.scores.add(artm.ThetaSnippetScore(name='ThetaSnippetScore'))

        model.num_document_passes = num_document_passes
        model.fit_offline(batch_vectorizer=batch_vectorizer,
                          num_collection_passes=num_collection_passes)

        for i in xrange(num_collection_passes):
            assert abs(model.score_tracker['SparsityPhiScore'].value[i] -
                       sparsity_phi_value[i]) < sp_zero_eps

        for i in xrange(num_collection_passes):
            assert abs(model.score_tracker['SparsityThetaScore'].value[i] -
                       sparsity_theta_value[i]) < sp_zero_eps

        for i in xrange(num_collection_passes):
            assert abs(model.score_tracker['PerplexityScore'].value[i] -
                       perplexity_value[i]) < perp_zero_eps

        for i in xrange(num_collection_passes):
            assert model.score_tracker['TopTokensScore'].num_tokens[
                i] == top_tokens_num_tokens[i]

        for i in xrange(num_tokens):
            assert model.score_tracker['TopTokensScore'].last_tokens[
                model.topic_names[0]][i] == top_tokens_topic_0_tokens[i]
            assert abs(model.score_tracker['TopTokensScore'].last_weights[
                model.topic_names[0]][i] -
                       top_tokens_topic_0_weights[i]) < top_zero_eps

        assert len(model.score_tracker['TopicKernelScore'].last_tokens[
            model.topic_names[0]]) > 0

        assert abs(topic_kernel_topic_0_contrast -
                   model.score_tracker['TopicKernelScore'].last_contrast[
                       model.topic_names[0]]) < ker_zero_eps
        assert abs(topic_kernel_topic_0_purity -
                   model.score_tracker['TopicKernelScore'].last_purity[
                       model.topic_names[0]]) < ker_zero_eps
        assert abs(topic_kernel_topic_0_size -
                   model.score_tracker['TopicKernelScore'].last_size[
                       model.topic_names[0]]) < ker_zero_eps

        for i in xrange(num_collection_passes):
            assert abs(
                model.score_tracker['TopicKernelScore'].average_size[i] -
                topic_kernel_average_size[i]) < ker_zero_eps
            assert abs(
                model.score_tracker['TopicKernelScore'].average_contrast[i] -
                topic_kernel_average_contrast[i]) < ker_zero_eps
            assert abs(
                model.score_tracker['TopicKernelScore'].average_purity[i] -
                topic_kernel_average_purity[i]) < ker_zero_eps

        model.fit_online(batch_vectorizer=batch_vectorizer)

        info = model.info
        assert info is not None
        assert len(info.config.topic_name) == num_topics
        assert len(info.score) == len(model.score_tracker)
        assert len(info.regularizer) == len(model.regularizers.data)
        assert len(info.cache_entry) > 0

        temp = model.score_tracker['ThetaSnippetScore'].last_document_ids
        assert len_last_document_ids == len(temp)
        assert len(model.score_tracker['ThetaSnippetScore'].last_snippet[
            temp[0]]) == num_topics

        phi = model.get_phi()
        assert phi.shape == (vocab_size, num_topics)
        theta = model.get_theta()
        assert theta.shape == (num_topics, num_docs)
    finally:
        shutil.rmtree(batches_folder)
Пример #28
0
path = 'C:\\NIVC\\Nivc_BigARTM_corpus\\unary_comm\\'
subd = "golosislamacom"
batch_vectorizer = artm.BatchVectorizer(data_path=path + "\\" + subd + "\\" +
                                        "batches_pos",
                                        data_format='batches')

modelPLSA = artm.ARTM(topic_names=['topic_{}'.format(i) for i in xrange(100)],
                      scores=[
                          artm.PerplexityScore(
                              name='PerplexityScore',
                              use_unigram_document_model=False,
                              dictionary=batch_vectorizer.dictionary,
                              class_ids=["text"]),
                          artm.SparsityPhiScore(name='SparsityPhiScore',
                                                class_id="text"),
                          artm.SparsityThetaScore(name='SparsityThetaScore'),
                          artm.TopicKernelScore(name='TopicKernelScore',
                                                probability_mass_threshold=0.3,
                                                class_id="text"),
                          artm.TopTokensScore(name='TopTokensScore',
                                              num_tokens=100,
                                              class_id="text")
                      ],
                      cache_theta=True)

modelPLSA.initialize(dictionary=batch_vectorizer.dictionary)

modelPLSA.num_document_passes = 5

modelPLSA.fit_offline(batch_vectorizer=batch_vectorizer,
                      num_collection_passes=30)
Пример #29
0
def test_func():
    # constants
    num_tokens = 15
    alpha = 0.01
    beta = 0.02
    num_collection_passes = 15
    num_document_passes = 1
    num_topics = 15
    vocab_size = 6906
    num_docs = 3430
    zero_eps = 0.001

    data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
    batches_folder = tempfile.mkdtemp()

    try:
        batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
                                                data_format='bow_uci',
                                                collection_name='kos',
                                                target_folder=batches_folder)

        dictionary = artm.Dictionary()
        dictionary.gather(data_path=batch_vectorizer.data_path)

        model_artm = artm.ARTM(num_topics=num_topics,
                               dictionary=dictionary,
                               cache_theta=True,
                               reuse_theta=True)

        model_artm.regularizers.add(
            artm.SmoothSparsePhiRegularizer(name='SparsePhi', tau=beta))
        model_artm.regularizers.add(
            artm.SmoothSparseThetaRegularizer(name='SparseTheta', tau=alpha))

        model_artm.scores.add(
            artm.SparsityThetaScore(name='SparsityThetaScore'))
        model_artm.scores.add(
            artm.PerplexityScore(name='PerplexityScore',
                                 dictionary=dictionary))
        model_artm.scores.add(artm.SparsityPhiScore(name='SparsityPhiScore'))
        model_artm.scores.add(
            artm.TopTokensScore(name='TopTokensScore', num_tokens=num_tokens))

        model_lda = artm.LDA(num_topics=num_topics,
                             alpha=alpha,
                             beta=beta,
                             dictionary=dictionary,
                             cache_theta=True)
        model_lda.initialize(dictionary=dictionary)

        model_artm.num_document_passes = num_document_passes
        model_lda.num_document_passes = num_document_passes

        model_artm.fit_offline(batch_vectorizer=batch_vectorizer,
                               num_collection_passes=num_collection_passes)
        model_lda.fit_offline(batch_vectorizer=batch_vectorizer,
                              num_collection_passes=num_collection_passes)

        for i in range(num_collection_passes):
            assert abs(model_artm.score_tracker['SparsityPhiScore'].value[i] -
                       model_lda.sparsity_phi_value[i]) < zero_eps

        for i in range(num_collection_passes):
            assert abs(
                model_artm.score_tracker['SparsityThetaScore'].value[i] -
                model_lda.sparsity_theta_value[i]) < zero_eps

        for i in range(num_collection_passes):
            assert abs(model_artm.score_tracker['PerplexityScore'].value[i] -
                       model_lda.perplexity_value[i]) < zero_eps

        lda_tt = model_lda.get_top_tokens(num_tokens=num_tokens)
        assert len(lda_tt) == num_topics

        for i in range(num_topics):
            for j in range(num_tokens):
                assert model_artm.score_tracker['TopTokensScore'].last_tokens[
                    model_artm.topic_names[i]][j] == lda_tt[i][j]

        lda_tt = model_lda.get_top_tokens(num_tokens=num_tokens,
                                          with_weights=True)
        for i in range(num_tokens):
            assert abs(model_artm.score_tracker['TopTokensScore'].last_weights[
                model_artm.topic_names[0]][i] - lda_tt[0][i][1]) < zero_eps

        model_lda.fit_online(batch_vectorizer=batch_vectorizer)

        phi = model_lda.phi_
        assert phi.shape == (vocab_size, num_topics)
        theta = model_lda.get_theta()
        assert theta.shape == (num_topics, num_docs)

        assert model_lda.library_version.count('.') == 2  # major.minor.patch

        model_lda = artm.LDA(num_topics=num_topics,
                             alpha=alpha,
                             beta=([0.1] * num_topics),
                             dictionary=dictionary,
                             cache_theta=True)
        assert model_lda._internal_model.regularizers.size() == num_topics + 1
    finally:
        shutil.rmtree(batches_folder)
Пример #30
0
def test_func():
    data_path = os.environ.get('BIGARTM_UNITTEST_DATA')
    batches_folder = tempfile.mkdtemp()
    dump_folder = tempfile.mkdtemp()

    try:
        batch_vectorizer = artm.BatchVectorizer(data_path=data_path,
                                                data_format='bow_uci',
                                                collection_name='kos',
                                                target_folder=batches_folder)

        model_1 = artm.ARTM(num_processors=7,
                            cache_theta=True,
                            num_document_passes=5,
                            reuse_theta=True,
                            seed=10,
                            num_topics=15,
                            class_ids={'@default_class': 1.0},
                            theta_name='THETA',
                            dictionary=batch_vectorizer.dictionary)

        model_2 = artm.ARTM(num_processors=7,
                            cache_theta=False,
                            num_document_passes=5,
                            reuse_theta=False,
                            seed=10,
                            num_topics=15,
                            class_ids={'@default_class': 1.0},
                            dictionary=batch_vectorizer.dictionary)

        for model in [model_1, model_2]:
            model.scores.add(
                artm.PerplexityScore(name='perp',
                                     dictionary=batch_vectorizer.dictionary))
            model.scores.add(artm.SparsityThetaScore(name='sp_theta', eps=0.1))
            model.scores.add(artm.TopTokensScore(name='top_tok',
                                                 num_tokens=10))
            model.scores.add(
                artm.SparsityPhiScore(name='sp_nwt',
                                      model_name=model.model_nwt))
            model.scores.add(
                artm.TopicKernelScore(name='kernel',
                                      topic_names=model.topic_names[0:5],
                                      probability_mass_threshold=0.4))

            topic_pairs = {}
            for topic_name_1 in model.topic_names:
                for topic_name_2 in model.topic_names:
                    if topic_name_1 not in topic_pairs:
                        topic_pairs[topic_name_1] = {}
                    topic_pairs[topic_name_1][
                        topic_name_2] = numpy.random.randint(0, 3)

            model.regularizers.add(
                artm.DecorrelatorPhiRegularizer(name='decor',
                                                tau=100000.0,
                                                topic_pairs=topic_pairs))
            model.regularizers.add(
                artm.SmoothSparsePhiRegularizer(
                    name='smsp_phi',
                    tau=-0.5,
                    gamma=0.3,
                    dictionary=batch_vectorizer.dictionary))
            model.regularizers.add(
                artm.SmoothSparseThetaRegularizer(name='smsp_theta',
                                                  tau=0.1,
                                                  doc_topic_coef=[2.0] *
                                                  model.num_topics))
            model.regularizers.add(
                artm.SmoothPtdwRegularizer(name='sm_ptdw', tau=0.1))

            # learn first model and dump it on disc
            model.fit_offline(batch_vectorizer, num_collection_passes=10)
            model.fit_online(batch_vectorizer, update_every=1)

            model.dump_artm_model(os.path.join(dump_folder, 'target'))

            params = {}
            with open(os.path.join(dump_folder, 'target', 'parameters.json'),
                      'r') as fin:
                params = json.load(fin)
            _assert_json_params(params)

            # create second model from the dump and check the results are equal
            model_new = artm.load_artm_model(
                os.path.join(dump_folder, 'target'))

            _assert_params_equality(model, model_new)
            _assert_scores_equality(model, model_new)
            _assert_regularizers_equality(model, model_new)
            _assert_score_values_equality(model, model_new)
            _assert_matrices_equality(model, model_new)

            # continue learning of both models
            model.fit_offline(batch_vectorizer, num_collection_passes=3)
            model.fit_online(batch_vectorizer, update_every=1)

            model_new.fit_offline(batch_vectorizer, num_collection_passes=3)
            model_new.fit_online(batch_vectorizer, update_every=1)

            # check new results are also equal
            _assert_params_equality(model, model_new)
            _assert_scores_equality(model, model_new)
            _assert_regularizers_equality(model, model_new)
            _assert_score_values_equality(model, model_new)
            _assert_matrices_equality(model, model_new)

            shutil.rmtree(os.path.join(dump_folder, 'target'))
    finally:
        shutil.rmtree(batches_folder)
        shutil.rmtree(dump_folder)