def test_doc_topic_strengths_over_periods():
    # 2 topics, 2 periods(2+3 docs)
    doc_topic_matrix = np.asarray([[0.1, 0.9],
                                   [0.2, 0.8],
                                   [0.8, 0.2],
                                   [0.7, 0.3],
                                   [0.3, 0.7]])
    period2docs = {'p1': [0, 1],
                   'p2': [2, 3, 4]}
    actual = doc_topic_strengths_over_periods(doc_topic_matrix, period2docs)

    expected = {'p1': np.asarray([0.15, 0.85]),
                'p2': np.asarray([0.6, 0.4])}

    assert_equal(len(actual), 2)
    assert_array_almost_equal(actual['p1'], expected['p1'])
    assert_array_almost_equal(actual['p2'], expected['p2'])
def main():
    # parameters
    collection_name = "nips"
    years = xrange(2008, 2015)  # 10 ~ 14
    n_topics = 6
    n_top_words = 15
    
    # load corpus
    corpus_paths = map(lambda y: 
                       "data/{}-{}.dat".format(collection_name, y),
                       years)
    all_corpus = []
    year2corpus = {}
    for year, path in zip(years, corpus_paths):
        corpus = list(load_line_corpus(path))
        all_corpus.append(corpus)
        year2corpus[year] = corpus

    all_corpus = list(itertools.chain.from_iterable(all_corpus))

    preprocessor = lambda doc: ' '.join(transform(doc, ALL_PIPELINE_NAMES))
    tokenizer = lambda doc: doc.split()
    
    with codecs.open('data/lemur-stopwords.txt', 
                     'r' 'utf8') as f:
        stop_words = map(lambda s: s.strip(), f.readlines())

    vectorizer = CountVectorizer(preprocessor=preprocessor,
                                 tokenizer=tokenizer,
                                 stop_words=stop_words,
                                 min_df=5)

    X = vectorizer.fit_transform(all_corpus)

    id2word = {id_: word
               for word, id_ in vectorizer.vocabulary_.items()}
    
    # build the model
    model = lda.LDA(n_topics=n_topics, n_iter=700,
                    # alpha=1.0, eta=1.0,
                    random_state=1)
    model.fit(X)
    
    # print topics
    for i, topic_dist in enumerate(model.topic_word_):
        top_word_ids = np.argsort(topic_dist)[:-n_top_words:-1]
        topic_words = [id2word[id_] for id_ in top_word_ids]
        print('Topic {}: {}'.format(i, ' '.join(topic_words)))
        
    year2docs = {}
    start_document_index = 0

    for year in years:
        corpus_size = len(year2corpus[year])
        end_document_index = start_document_index + corpus_size
        year2docs[year] = np.arange(start_document_index, end_document_index)
        start_document_index = end_document_index

    tbl = doc_topic_strengths_over_periods(model.doc_topic_, year2docs)
    print tbl
    print np.array(tbl.values())