def process_data(tokens, vocab, model): """ preprocessing of the data by counting word occurrences and filtering according to these. The most frequent words are subsampled, and cleans the vocabulary words according to the word2vec models vocabulary :param tokens: spacy tokens :param vocab: spacy vocabulary :param model: word2vec model name :return: """ corpus = Corpus() # Make a ranked list of rare vs frequent words corpus.update_word_count(tokens) corpus.finalize() # The tokenization uses spaCy indices, and so may have gaps # between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=15) # Convert the compactified arrays into bag of words arrays bow = corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids) assert flattened.min() >= 0 # Fill in the pretrained word vectors #n_dim = 300 fn_wordvc = model print("starts to compact word vectors") vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc) print("done with compact word vectors") # Save all of the preprocessed files print("now saving files") pickle.dump(vocab, open('vocab_' + id + '.pkl', 'w')) pickle.dump(corpus, open('corpus_' + id + '.pkl', 'w')) np.save('flattened_' + id, flattened) np.save('doc_ids_' + id, doc_ids) np.save('pruned_' + id, pruned) #np.save('bow_'+id, bow) Does not seem to be neccessary for lda2vec_run.py np.save('vectors_' + id, vectors)
corpus.finalize() # The tokenization uses spaCy indices, and so may have gaps # between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=30) # Convert the compactified arrays into bag of words arrays bow = corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids) assert flattened.min() >= 0 # Fill in the pretrained word vectors n_dim = 300 fn_wordvc = 'GoogleNews-vectors-negative300.bin' vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc) # Save all of the preprocessed files pickle.dump(vocab, open('vocab.pkl', 'w')) pickle.dump(corpus, open('corpus.pkl', 'w')) np.save("flattened", flattened) np.save("doc_ids", doc_ids) np.save("pruned", pruned) np.save("bow", bow) np.save("vectors", vectors)
class Lda2VecFeaturizer: def __init__(self,\ clambda=200,\ n_topics=10,\ batchsize=4096,\ power=0.75,\ words_pretrained=True,\ temperature=1,\ max_length=1000,\ min_count=0,\ word2vec_path=None): # 'Strength' of the dircihlet prior; 200.0 seems to work well self.clambda = clambda # Number of topics to fit self.n_topics = n_topics #int(os.getenv('n_topics', 10)) self.batchsize = batchsize # Power for neg sampling self.power = power #float(os.getenv('power', 0.75)) # Intialize with pretrained word vectors self.words_pretrained = words_pretrained #bool(int(os.getenv('pretrained', True))) self.temp = temperature self.max_length = max_length self.min_count = min_count self.word2vec_model = KeyedVectors.load_word2vec_format(word2vec_path, binary=True) def preprocess(self, docs=None): """ Uses spaCy to quickly tokenize text and return an array of indices. This method stores a global NLP directory in memory, and takes up to a minute to run for the time. Later calls will have the tokenizer in memory.""" assert (isinstance(docs, list)), ("input list of documents") assert (all(isinstance(doc, unicode) for doc in docs)),("expected unicode, got string") self.corpus = Corpus() tokens, self.vocab = preprocess.tokenize(docs, self.max_length, merge=False,n_threads=4) # Make a ranked list of rare vs frequent words self.corpus.update_word_count(tokens) self.corpus.finalize() # The tokenization uses spaCy indices, and so may have gaps # between indices for words that aren't present in our dataset. # This builds a new compact index compact = self.corpus.to_compact(tokens) # Remove extremely rare words pruned = self.corpus.filter_count(compact, min_count=0) # Convert the compactified arrays into bag of words arrays bow = self.corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = self.corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words self.doc_ids = np.arange(pruned.shape[0]) self.flattened, (self.doc_ids,) = self.corpus.compact_to_flat(pruned, self.doc_ids) self.vectors, s, f = self.corpus.compact_word_vectors(self.vocab, model = self.word2vec_model) # vectors = np.delete(vectors,77743,0) # Model Parameters # Number of documents self.n_docs = len(docs) #doc_ids.max() + 1 # Number of unique words in the vocabulary self.n_vocab=self.flattened.max() + 1 doc_idx, lengths = np.unique(self.doc_ids, return_counts=True) self.doc_lengths = np.zeros(self.doc_ids.max() + 1, dtype='int32') self.doc_lengths[doc_idx] = lengths # Count all token frequencies tok_idx, freq = np.unique(self.flattened, return_counts=True) self.term_frequency = np.zeros(self.n_vocab, dtype='int32') self.term_frequency[tok_idx] = freq self.fraction = self.batchsize * 1.0 / self.flattened.shape[0] # Get the string representation for every compact key self.words = self.corpus.word_list(self.vocab)[:self.n_vocab] def train(self,docs=None, epochs=200, update_words=False, update_topics=True): """ Takes the training documents as a list of documents preprocesses the documents and reurns a dictionary data containing the Topic distribution Vocab document length and topic word distribution""" texts = docs docs = [] for text in texts: docs.append(unicode(" ".join(word for word in text.split() if word in self.word2vec_model.vocab))) logging.info("preprocessing...") self.preprocess(docs) logging.info('preprocessed!') self.train_model = LDA2Vec(n_documents=self.n_docs,\ n_document_topics=self.n_topics,\ n_units=300,\ n_vocab=self.n_vocab,\ counts=self.term_frequency,\ n_samples=15,\ power=self.power,\ temperature=self.temp) if self.words_pretrained: self.train_model.sampler.W.data = self.vectors[:self.n_vocab, :] optimizer = O.Adam() optimizer.setup(self.train_model) clip = chainer.optimizer.GradientClipping(5.0) optimizer.add_hook(clip) j = 0 msgs = defaultdict(list) for epoch in range(epochs): print "epoch : ",epoch data = prepare_topics(cuda.to_cpu(self.train_model.mixture.weights.W.data).copy(), cuda.to_cpu(self.train_model.mixture.factors.W.data).copy(), cuda.to_cpu(self.train_model.sampler.W.data).copy(), self.words) top_words = print_top_words_per_topic(data) if j % 100 == 0 and j > 100: coherence = topic_coherence(top_words) for j in range(self.n_topics): print j, coherence[(j, 'cv')] kw = dict(top_words=top_words, coherence=coherence, epoch=epoch) #progress[str(epoch)] = pickle.dumps(kw) data['doc_lengths'] = self.doc_lengths data['term_frequency'] = self.term_frequency #np.savez('topics.pyldavis', **data) for d, f in utils.chunks(self.batchsize, self.doc_ids, self.flattened): t0 = time.time() optimizer.zero_grads() l = self.train_model.fit_partial(d.copy(), f.copy(), update_words=update_words, update_topics=update_topics) prior = self.train_model.prior() loss = prior * self.fraction loss.backward() optimizer.update() msg = ("J:{j:05d} E:{epoch:05d} L:{loss:1.3e} " "P:{prior:1.3e} R:{rate:1.3e}") prior.to_cpu() loss.to_cpu() t1 = time.time() dt = t1 - t0 rate = self.batchsize / dt msgs["E"].append(epoch) msgs["L"].append(float(l)) j += 1 logs = dict(loss=float(l), epoch=epoch, j=j, prior=float(prior.data), rate=rate) print msg.format(**logs) print "\n ================================= \n" #serializers.save_hdf5("lda2vec.hdf5", self.model) msgs["loss_per_epoch"].append(float(l)) return data, msgs def initialize_infer(self,\ clambda=200,\ batchsize=4096,\ power=0.75,\ words_pretrained=True,\ temperature=1,\ max_length=1000,\ min_count=0): """ Initializes parameters for testing, if needed Usually not called. """ # 'Strength' of the dircihlet prior; 200.0 seems to work well self.clambda = clambda # Number of topics to fit self.batchsize = batchsize # Power for neg sampling self.power = power #float(os.getenv('power', 0.75)) # Intialize with pretrained word vectors self.words_pretrained = words_pretrained #bool(int(os.getenv('pretrained', True))) self.temp = temperature self.max_length = max_length self.min_count = min_count logging.info('Test parameters initialized!') def infer(self,docs=None,epochs=200, update_words=False, update_topics=False, topic_vectors=None): """ Infers the featurs of a new document that is passed in. By running the Lda2vec algorithm again. But by updating only the topic distributions""" texts = docs docs = [] for text in texts: docs.append(unicode(" ".join(word for word in text.split() if word in self.word2vec_model.vocab))) logging.info("preprocessing") self.preprocess(docs) logging.info('preprocessed!') self.infer_model = LDA2Vec(n_documents=self.n_docs,\ n_document_topics=self.n_topics,\ n_units=300,\ n_vocab=self.n_vocab,\ counts=self.term_frequency,\ n_samples=15,\ power=self.power,\ temperature=self.temp) if self.words_pretrained: self.infer_model.sampler.W.data = self.vectors[:self.n_vocab, :] self.infer_model.mixture.factors.W.data = self.train_model.mixture.factors.W.data if topic_vectors is not None: assert(topic_vectors.shape==self.infer_model.mixture.factors.W.data.shape), ("topic vectors shape doesn't match") self.infer_model.mixture.factors.W.data = topic_vectors optimizer = O.Adam() optimizer.setup(self.infer_model) clip = chainer.optimizer.GradientClipping(5.0) optimizer.add_hook(clip) j = 0 msgs = defaultdict(list) for epoch in range(epochs): print "epoch : ",epoch data = prepare_topics(cuda.to_cpu(self.infer_model.mixture.weights.W.data).copy(), cuda.to_cpu(self.infer_model.mixture.factors.W.data).copy(), cuda.to_cpu(self.infer_model.sampler.W.data).copy(), self.words) top_words = print_top_words_per_topic(data) if j % 100 == 0 and j > 100: coherence = topic_coherence(top_words) for j in range(self.n_topics): print j, coherence[(j, 'cv')] kw = dict(top_words=top_words, coherence=coherence, epoch=epoch) #progress[str(epoch)] = pickle.dumps(kw) data['doc_lengths'] = self.doc_lengths data['term_frequency'] = self.term_frequency #np.savez('topics.pyldavis', **data) for d, f in utils.chunks(self.batchsize, self.doc_ids, self.flattened): t0 = time.time() optimizer.zero_grads() l = self.infer_model.fit_partial(d.copy(), f.copy(), update_words=update_words, update_topics=update_topics) prior = self.infer_model.prior() loss = prior * self.fraction loss.backward() optimizer.update() msg = ("J:{j:05d} E:{epoch:05d} L:{loss:1.3e} " "P:{prior:1.3e} R:{rate:1.3e}") prior.to_cpu() loss.to_cpu() t1 = time.time() dt = t1 - t0 rate = self.batchsize / dt msgs["E"].append(epoch) msgs["L"].append(float(l)) j += 1 logs = dict(loss=float(l), epoch=epoch, j=j, prior=float(prior.data), rate=rate) print msg.format(**logs) print "\n ================================= \n" #serializers.save_hdf5("lda2vec.hdf5", self.model) msgs["loss_per_epoch"].append(float(l)) return data, msgs
corpus.finalize() # The tokenization uses spaCy indices, and so may have gaps # between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=30) # Convert the compactified arrays into bag of words arrays bow = corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids,) = corpus.compact_to_flat(pruned, doc_ids) assert flattened.min() >= 0 # Fill in the pretrained word vectors n_dim = 300 fn_wordvc = 'GoogleNews-vectors-negative300.bin' vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc) # Save all of the preprocessed files pickle.dump(vocab, open('vocab.pkl', 'w')) pickle.dump(corpus, open('corpus.pkl', 'w')) np.save("flattened", flattened) np.save("doc_ids", doc_ids) np.save("pruned", pruned) np.save("bow", bow) np.save("vectors", vectors)
# between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=30) # Convert the compactified arrays into bag of words arrays bow = corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids,) = corpus.compact_to_flat(pruned, doc_ids) assert flattened.min() >= 0 # Fill in the pretrained word vectors n_dim = 300 vectors = np.random.randn((flattened.max() + 1, n_dim)) * 0.1331 fn_wordvc = 'GoogleNews-vectors-negative300.bin' vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc, use_spacy=False) # Save all of the preprocessed files pickle.dump(vocab, open('vocab.pkl', 'w')) pickle.dump(corpus, open('corpus.pkl', 'w')) np.save("flattened", flattened) np.save("doc_ids", doc_ids) np.save("pruned", pruned) np.save("bow", bow) np.save("vectors", vectors)
# between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=50) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids,) = corpus.compact_to_flat(pruned, doc_ids) # Optionally, we can initialize our word vectors from a pretrained # model. This helps when our corpus is small and we'd like to bootstrap word_vectors = corpus.compact_word_vectors(vocab) # Model Parameters # Number of documents n_docs = len(texts) # Number of unique words in the vocabulary n_words = flattened.max() + 1 # Number of dimensions in a single word vector # (if using pretrained vectors, should match that dimensionality) n_hidden = 300 # Number of topics to fit n_topics = 20 # Get the count for each key counts = corpus.keys_counts[:n_words] # Get the string representation for every compact key words = corpus.word_list(vocab)[:n_words]
# between indices for words that aren't present in our dataset. # This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=50) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids) # Optionally, we can initialize our word vectors from a pretrained # model. This helps when our corpus is small and we'd like to bootstrap word_vectors = corpus.compact_word_vectors(vocab) # Model Parameters # Number of documents n_docs = len(texts) # Number of unique words in the vocabulary n_words = flattened.max() + 1 # Number of dimensions in a single word vector # (if using pretrained vectors, should match that dimensionality) n_hidden = 300 # Number of topics to fit n_topics = 20 # Get the count for each key counts = corpus.keys_counts[:n_words] # Get the string representation for every compact key words = corpus.word_list(vocab)[:n_words]
# This builds a new compact index compact = corpus.to_compact(tokens) # Remove extremely rare words pruned = corpus.filter_count(compact, min_count=30) # Convert the compactified arrays into bag of words arrays bow = corpus.compact_to_bow(pruned) # Words tend to have power law frequency, so selectively # downsample the most prevalent words clean = corpus.subsample_frequent(pruned) # Now flatten a 2D array of document per row and word position # per column to a 1D array of words. This will also remove skips # and OoV words doc_ids = np.arange(pruned.shape[0]) flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids) assert flattened.min() >= 0 # Fill in the pretrained word vectors n_dim = 300 vectors = np.random.randn((flattened.max() + 1, n_dim)) * 0.1331 fn_wordvc = 'GoogleNews-vectors-negative300.bin' vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc, use_spacy=False) # Save all of the preprocessed files pickle.dump(vocab, open('vocab.pkl', 'w')) pickle.dump(corpus, open('corpus.pkl', 'w')) np.save("flattened", flattened) np.save("doc_ids", doc_ids) np.save("pruned", pruned) np.save("bow", bow) np.save("vectors", vectors)