Beispiel #1
0
def text_prep(text_doc, count):
    text_doc = [digit_removal(d) for d in text_doc]
    text_doc = [stop_word_removal(d) for d in text_doc]
    maxlength_doc = 80000
    text_doc = [unicode(clean(d)) for d in text_doc]
    print(text_doc)
    tokens, vocab = preprocess.tokenize(text_doc,
                                        maxlength_doc,
                                        merge=False,
                                        n_threads=4)
    print(tokens, vocab)
    corpus = Corpus()
    corpus.update_word_count(tokens)
    corpus.finalize()
    compact = corpus.to_compact(tokens)
    pruned = corpus.filter_count(compact, min_count=50)
    bow = corpus.compact_to_bow(pruned)
    clean_data = corpus.subsample_frequent(pruned)
    doc_ids = numpy.arange(pruned.shape[0])
    flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids)
    assert flattened.min() >= 0
    pickle.dump(
        vocab, open(os.path.join(result_folder,
                                 str(count) + 'vocab.pkl'), 'w'))
    pickle.dump(
        corpus,
        open(os.path.join(result_folder,
                          str(count) + 'corpus.pkl'), 'w'))
    numpy.save("flattened", flattened)
    numpy.save("doc_ids", doc_ids)
    numpy.save("pruned", pruned)
    numpy.save("bow", bow)
    def preprocess(self, docs=None):
        """ Uses spaCy to quickly tokenize text and return an array
    of indices.
    This method stores a global NLP directory in memory, and takes
    up to a minute to run for the time. Later calls will have the
    tokenizer in memory."""

        assert (isinstance(docs, list)), ("input list of documents")
        assert (all(isinstance(doc, unicode) for doc in docs)),("expected unicode, got string")
        
        self.corpus = Corpus()
        
        tokens, self.vocab = preprocess.tokenize(docs, self.max_length, merge=False,n_threads=4)
        
        # Make a ranked list of rare vs frequent words
        self.corpus.update_word_count(tokens)
        self.corpus.finalize()
        # The tokenization uses spaCy indices, and so may have gaps
        # between indices for words that aren't present in our dataset.
        # This builds a new compact index
        compact = self.corpus.to_compact(tokens)
        # Remove extremely rare words
        pruned = self.corpus.filter_count(compact, min_count=0)
        # Convert the compactified arrays into bag of words arrays
        bow = self.corpus.compact_to_bow(pruned)
        # Words tend to have power law frequency, so selectively
        # downsample the most prevalent words
        clean = self.corpus.subsample_frequent(pruned)
        # Now flatten a 2D array of document per row and word position
        # per column to a 1D array of words. This will also remove skips
        # and OoV words
        self.doc_ids = np.arange(pruned.shape[0])
        self.flattened, (self.doc_ids,) = self.corpus.compact_to_flat(pruned, self.doc_ids)

        self.vectors, s, f = self.corpus.compact_word_vectors(self.vocab, model = self.word2vec_model)
        # vectors = np.delete(vectors,77743,0)
        # Model Parameters
        # Number of documents
        self.n_docs = len(docs) #doc_ids.max() + 1
        # Number of unique words in the vocabulary
        self.n_vocab=self.flattened.max()  + 1

        doc_idx, lengths = np.unique(self.doc_ids, return_counts=True)
        self.doc_lengths = np.zeros(self.doc_ids.max() + 1, dtype='int32')
        self.doc_lengths[doc_idx] = lengths
        # Count all token frequencies
        tok_idx, freq = np.unique(self.flattened, return_counts=True)
        self.term_frequency = np.zeros(self.n_vocab, dtype='int32')
        self.term_frequency[tok_idx] = freq

        self.fraction = self.batchsize * 1.0 / self.flattened.shape[0]

        # Get the string representation for every compact key
        self.words = self.corpus.word_list(self.vocab)[:self.n_vocab]
Beispiel #3
0
def make_corpus(tokens, min_count=50):
    """ Creates LDA2vec corpus
    :param text:
    :return:
    """
    corpus = Corpus()
    corpus.update_word_count(tokens)
    corpus.finalize()

    compact = corpus.to_compact(tokens)

    pruned = corpus.filter_count(compact, min_count=min_count)
    clean = corpus.subsample_frequent(pruned)
    doc_ids = np.arange(pruned.shape[0])
    corpus, flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids)
    return corpus, flattened, doc_ids, clean
Beispiel #4
0
def process_data(tokens, vocab, model):
    """
    preprocessing of the data by counting word occurrences and filtering according to these.
    The most frequent words are subsampled, and cleans the vocabulary words according to the
    word2vec models vocabulary

    :param tokens: spacy tokens
    :param vocab: spacy vocabulary
    :param model: word2vec model name
    :return:
    """
    corpus = Corpus()
    # Make a ranked list of rare vs frequent words
    corpus.update_word_count(tokens)
    corpus.finalize()
    # The tokenization uses spaCy indices, and so may have gaps
    # between indices for words that aren't present in our dataset.
    # This builds a new compact index
    compact = corpus.to_compact(tokens)
    # Remove extremely rare words
    pruned = corpus.filter_count(compact, min_count=15)
    # Convert the compactified arrays into bag of words arrays
    bow = corpus.compact_to_bow(pruned)
    # Words tend to have power law frequency, so selectively
    # downsample the most prevalent words
    clean = corpus.subsample_frequent(pruned)
    # Now flatten a 2D array of document per row and word position
    # per column to a 1D array of words. This will also remove skips
    # and OoV words
    doc_ids = np.arange(pruned.shape[0])
    flattened, (doc_ids, ) = corpus.compact_to_flat(pruned, doc_ids)
    assert flattened.min() >= 0
    # Fill in the pretrained word vectors
    #n_dim = 300
    fn_wordvc = model
    print("starts to compact word vectors")
    vectors, s, f = corpus.compact_word_vectors(vocab, filename=fn_wordvc)
    print("done with compact word vectors")
    # Save all of the preprocessed files
    print("now saving files")
    pickle.dump(vocab, open('vocab_' + id + '.pkl', 'w'))
    pickle.dump(corpus, open('corpus_' + id + '.pkl', 'w'))
    np.save('flattened_' + id, flattened)
    np.save('doc_ids_' + id, doc_ids)
    np.save('pruned_' + id, pruned)
    #np.save('bow_'+id, bow) Does not seem to be neccessary for lda2vec_run.py
    np.save('vectors_' + id, vectors)
Beispiel #5
0
bad = set(["ax>", '`@("', '---', '===', '^^^'])


def clean(line):
    return ' '.join(w for w in line.split() if not any(t in w for t in bad))


# Preprocess data
max_length = 10000  # Limit of 10k words per document
# Convert to unicode (spaCy only works with unicode)
texts = [unicode(clean(d)) for d in texts]
tokens, vocab = preprocess.tokenize(texts,
                                    max_length,
                                    merge=False,
                                    n_threads=4)
corpus = Corpus()
# Make a ranked list of rare vs frequent words
corpus.update_word_count(tokens)
corpus.finalize()
# The tokenization uses spaCy indices, and so may have gaps
# between indices for words that aren't present in our dataset.
# This builds a new compact index
compact = corpus.to_compact(tokens)
# Remove extremely rare words
pruned = corpus.filter_count(compact, min_count=30)
# Convert the compactified arrays into bag of words arrays
bow = corpus.compact_to_bow(pruned)
# Words tend to have power law frequency, so selectively
# downsample the most prevalent words
clean = corpus.subsample_frequent(pruned)
# Now flatten a 2D array of document per row and word position