def test_load_line_corpus(): docs = data.load_line_corpus(CURDIR + '/../datasets/nips-2008.dat', tokenize=True) assert_equal(len(docs), 250) assert_true(isinstance(docs[0], list)) docs = data.load_line_corpus(CURDIR + '/../datasets/nips-2008.dat', tokenize=False) assert_equal(len(docs), 250) assert_true(isinstance(docs[0], basestring))
def get_topic_labels(corpus_path, n_topics, n_top_words, preprocessing_steps, n_cand_labels, label_min_df, label_tags, n_labels, lda_random_state, lda_n_iter): """ Refer the arguments to `create_parser` """ print("Loading docs...") docs = load_line_corpus(corpus_path) if 'wordlen' in preprocessing_steps: print("Word length filtering...") wl_filter = CorpusWordLengthFilter(minlen=3) docs = wl_filter.transform(docs) if 'stem' in preprocessing_steps: print("Stemming...") stemmer = CorpusStemmer() docs = stemmer.transform(docs) if 'tag' in preprocessing_steps: print("POS tagging...") tagger = CorpusPOSTagger() tagged_docs = tagger.transform(docs) tag_constraints = [] if label_tags != ['None']: for tags in label_tags: tag_constraints.append(tuple(map(lambda t: t.strip(), tags.split(',')))) if len(tag_constraints) == 0: tag_constraints = None print("Tag constraints: {}".format(tag_constraints)) print("Generate candidate bigram labels(with POS filtering)...") finder = BigramLabelFinder('pmi', min_freq=label_min_df, pos=tag_constraints) if tag_constraints: assert 'tag' in preprocessing_steps, \ 'If tag constraint is applied, pos tagging(tag) should be performed' cand_labels = finder.find(tagged_docs, top_n=n_cand_labels) else: # if no constraint, then use untagged docs cand_labels = finder.find(docs, top_n=n_cand_labels) print("Collected {} candidate labels".format(len(cand_labels))) print("Calculate the PMI scores...") pmi_cal = PMICalculator( doc2word_vectorizer=WordCountVectorizer( min_df=5, stop_words=load_lemur_stopwords()), doc2label_vectorizer=LabelCountVectorizer()) pmi_w2l = pmi_cal.from_texts(docs, cand_labels) print("Topic modeling using LDA...") model = lda.LDA(n_topics=n_topics, n_iter=lda_n_iter, random_state=lda_random_state) model.fit(pmi_cal.d2w_) print("\nTopical words:") print("-" * 20) for i, topic_dist in enumerate(model.topic_word_): top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1] topic_words = [pmi_cal.index2word_[id_] for id_ in top_word_ids] print('Topic {}: {}'.format(i, ' '.join(topic_words))) ranker = LabelRanker(apply_intra_topic_coverage=False) return ranker.top_k_labels(topic_models=model.topic_word_, pmi_w2l=pmi_w2l, index2label=pmi_cal.index2label_, label_models=None, k=n_labels)
def get_topic_labels( corpus_path, n_topics, n_top_words, preprocessing_steps, n_cand_labels, label_min_df, label_tags, n_labels, lda_random_state, lda_n_iter, ): """ Refer the arguments to `create_parser` """ print("Loading docs...") docs = load_line_corpus(corpus_path) if "wordlen" in preprocessing_steps: print("Word length filtering...") wl_filter = CorpusWordLengthFilter(minlen=3) docs = wl_filter.transform(docs) if "stem" in preprocessing_steps: print("Stemming...") stemmer = CorpusStemmer() docs = stemmer.transform(docs) if "tag" in preprocessing_steps: print("POS tagging...") tagger = CorpusPOSTagger() tagged_docs = tagger.transform(docs) tag_constraints = [] if label_tags != ["None"]: for tags in label_tags: tag_constraints.append(tuple(map(lambda t: t.strip(), tags.split(",")))) if len(tag_constraints) == 0: tag_constraints = None print("Tag constraints: {}".format(tag_constraints)) print("Generate candidate bigram labels(with POS filtering)...") finder = BigramLabelFinder("pmi", min_freq=label_min_df, pos=tag_constraints) if tag_constraints: assert "tag" in preprocessing_steps, "If tag constraint is applied, pos tagging(tag) should be performed" cand_labels = finder.find(tagged_docs, top_n=n_cand_labels) else: # if no constraint, then use untagged docs cand_labels = finder.find(docs, top_n=n_cand_labels) print("Collected {} candidate labels".format(len(cand_labels))) print("Calculate the PMI scores...") pmi_cal = PMICalculator( doc2word_vectorizer=WordCountVectorizer(min_df=5, stop_words=load_lemur_stopwords()), doc2label_vectorizer=LabelCountVectorizer(), ) pmi_w2l = pmi_cal.from_texts(docs, cand_labels) print("Topic modeling using LDA...") model = lda.LDA(n_topics=n_topics, n_iter=lda_n_iter, random_state=lda_random_state) model.fit(pmi_cal.d2w_) print("\nTopical words:") print("-" * 20) for i, topic_dist in enumerate(model.topic_word_): top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1] topic_words = [pmi_cal.index2word_[id_] for id_ in top_word_ids] print("Topic {}: {}".format(i, " ".join(topic_words))) ranker = LabelRanker(apply_intra_topic_coverage=False) return ranker.top_k_labels( topic_models=model.topic_word_, pmi_w2l=pmi_w2l, index2label=pmi_cal.index2label_, label_models=None, k=n_labels )