def test_label_finder(): finder = BigramLabelFinder(measure='pmi', pos=None) labels = finder.find(load_nips(years=[2009]), top_n=5) assert_equal(labels, [(u'monte', u'carlo'), (u'high', u'dimensional'), (u'does', u'not'), # not so good (u'experimental', u'results'), (u'nonparametric', u'bayesian')])
def test_label_finder(): finder = BigramLabelFinder(measure='pmi', pos=None) labels = finder.find(load_nips(years=[2009]), top_n=5) assert_equal( labels, [ (u'monte', u'carlo'), (u'high', u'dimensional'), (u'does', u'not'), # not so good (u'experimental', u'results'), (u'nonparametric', u'bayesian') ])
def main(): with open('machine_learning_tweets.json', 'r') as f: tweets = json.load(f)['tweets'] clean_tweets = prepare_tweets(tweets) test_sample = random.sample(clean_tweets, int(0.1 * len(clean_tweets))) data_sample = list(set(clean_tweets) - set(test_sample)) tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features, stop_words='english') tf = tf_vectorizer.fit_transform(data_sample) lda = LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', learning_offset=50., random_state=0) lda.fit(tf) docs = [nltk.word_tokenize(doc) for doc in data_sample] finder = BigramLabelFinder('pmi', min_freq=label_min_df, pos=tag_constraints) cand_labels = finder.find(docs, top_n=n_cand_labels) pmi_cal = PMICalculator( doc2word_vectorizer=CountVectorizer(max_df=0.95, min_df=5, max_features=n_features, stop_words='english'), doc2label_vectorizer=LabelCountVectorizer()) pmi_w2l = pmi_cal.from_texts(docs, cand_labels) tf_feature_names = tf_vectorizer.get_feature_names() print_top_words(lda, tf_feature_names, n_top_words) ranker = LabelRanker(apply_intra_topic_coverage=False) ranked_lables = ranker.top_k_labels(topic_models=lda.components_, pmi_w2l=pmi_w2l, index2label=pmi_cal.index2label_, label_models=None, k=n_labels) print 'Labels' print for i, labels in enumerate(ranked_lables): print(u"Topic {}: {}\n".format(i, ', '.join(map(lambda l: ' '.join(l), labels))))
def test_label_finder_with_pos(): tagger = CorpusPOSTagger() finder = BigramLabelFinder(measure='pmi', pos=[('NN', 'NN'), ('JJ', 'NN')]) docs = load_nips(years=[2009]) docs = tagger.transform(docs) labels = finder.find(docs, top_n=5, strip_tags=False) assert_equal(labels, [((u'monte', 'NN'), (u'carlo', 'NN')), ((u'nonparametric', 'JJ'), (u'bayesian', 'NN')), ((u'active', 'JJ'), (u'learning', 'NN')), ((u'machine', 'NN'), (u'learning', 'NN')), ((u'semi-supervised', 'JJ'), (u'learning', 'NN'))]) labels = finder.find(docs, top_n=5) assert_equal(labels, [(u'monte', u'carlo'), (u'nonparametric', u'bayesian'), (u'active', u'learning'), (u'machine', u'learning'), (u'semi-supervised', u'learning')])
def get_topic_labels(corpus_path, n_topics, n_top_words, preprocessing_steps, n_cand_labels, label_min_df, label_tags, n_labels, lda_random_state, lda_n_iter): """ Refer the arguments to `create_parser` """ print("Loading docs...") docs = load_line_corpus(corpus_path) if 'wordlen' in preprocessing_steps: print("Word length filtering...") wl_filter = CorpusWordLengthFilter(minlen=3) docs = wl_filter.transform(docs) if 'stem' in preprocessing_steps: print("Stemming...") stemmer = CorpusStemmer() docs = stemmer.transform(docs) if 'tag' in preprocessing_steps: print("POS tagging...") tagger = CorpusPOSTagger() tagged_docs = tagger.transform(docs) tag_constraints = [] if label_tags != ['None']: for tags in label_tags: tag_constraints.append(tuple(map(lambda t: t.strip(), tags.split(',')))) if len(tag_constraints) == 0: tag_constraints = None print("Tag constraints: {}".format(tag_constraints)) print("Generate candidate bigram labels(with POS filtering)...") finder = BigramLabelFinder('pmi', min_freq=label_min_df, pos=tag_constraints) if tag_constraints: assert 'tag' in preprocessing_steps, \ 'If tag constraint is applied, pos tagging(tag) should be performed' cand_labels = finder.find(tagged_docs, top_n=n_cand_labels) else: # if no constraint, then use untagged docs cand_labels = finder.find(docs, top_n=n_cand_labels) print("Collected {} candidate labels".format(len(cand_labels))) print("Calculate the PMI scores...") pmi_cal = PMICalculator( doc2word_vectorizer=WordCountVectorizer( min_df=5, stop_words=load_lemur_stopwords()), doc2label_vectorizer=LabelCountVectorizer()) pmi_w2l = pmi_cal.from_texts(docs, cand_labels) print("Topic modeling using LDA...") model = lda.LDA(n_topics=n_topics, n_iter=lda_n_iter, random_state=lda_random_state) model.fit(pmi_cal.d2w_) print("\nTopical words:") print("-" * 20) for i, topic_dist in enumerate(model.topic_word_): top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1] topic_words = [pmi_cal.index2word_[id_] for id_ in top_word_ids] print('Topic {}: {}'.format(i, ' '.join(topic_words))) ranker = LabelRanker(apply_intra_topic_coverage=False) return ranker.top_k_labels(topic_models=model.topic_word_, pmi_w2l=pmi_w2l, index2label=pmi_cal.index2label_, label_models=None, k=n_labels)
def get_topic_labels( corpus_path, n_topics, n_top_words, preprocessing_steps, n_cand_labels, label_min_df, label_tags, n_labels, lda_random_state, lda_n_iter, ): """ Refer the arguments to `create_parser` """ print("Loading docs...") docs = load_line_corpus(corpus_path) if "wordlen" in preprocessing_steps: print("Word length filtering...") wl_filter = CorpusWordLengthFilter(minlen=3) docs = wl_filter.transform(docs) if "stem" in preprocessing_steps: print("Stemming...") stemmer = CorpusStemmer() docs = stemmer.transform(docs) if "tag" in preprocessing_steps: print("POS tagging...") tagger = CorpusPOSTagger() tagged_docs = tagger.transform(docs) tag_constraints = [] if label_tags != ["None"]: for tags in label_tags: tag_constraints.append(tuple(map(lambda t: t.strip(), tags.split(",")))) if len(tag_constraints) == 0: tag_constraints = None print("Tag constraints: {}".format(tag_constraints)) print("Generate candidate bigram labels(with POS filtering)...") finder = BigramLabelFinder("pmi", min_freq=label_min_df, pos=tag_constraints) if tag_constraints: assert "tag" in preprocessing_steps, "If tag constraint is applied, pos tagging(tag) should be performed" cand_labels = finder.find(tagged_docs, top_n=n_cand_labels) else: # if no constraint, then use untagged docs cand_labels = finder.find(docs, top_n=n_cand_labels) print("Collected {} candidate labels".format(len(cand_labels))) print("Calculate the PMI scores...") pmi_cal = PMICalculator( doc2word_vectorizer=WordCountVectorizer(min_df=5, stop_words=load_lemur_stopwords()), doc2label_vectorizer=LabelCountVectorizer(), ) pmi_w2l = pmi_cal.from_texts(docs, cand_labels) print("Topic modeling using LDA...") model = lda.LDA(n_topics=n_topics, n_iter=lda_n_iter, random_state=lda_random_state) model.fit(pmi_cal.d2w_) print("\nTopical words:") print("-" * 20) for i, topic_dist in enumerate(model.topic_word_): top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1] topic_words = [pmi_cal.index2word_[id_] for id_ in top_word_ids] print("Topic {}: {}".format(i, " ".join(topic_words))) ranker = LabelRanker(apply_intra_topic_coverage=False) return ranker.top_k_labels( topic_models=model.topic_word_, pmi_w2l=pmi_w2l, index2label=pmi_cal.index2label_, label_models=None, k=n_labels )