def scores(self, docId): """ Return the score from the given document to every other document in the index. Documents not listed are assumed to have no similarity detected by shared terms. :param docId: ID of doc to compare other docs to. :returns: A list of tuples of (document ID, similarity score). Larger scores are better. """ if not self._idf: self._computeIdfs() # Track the scores # docScores = FreqDist() for termid, freq in self.termFrequencies[docId].iteritems(): # Find the frequency with which this term appears in other documents. # inverseDocumentFrequency = self._idf[termid] for otherDocId in self.termsToDocuments[termid]: if otherDocId == docId: # Skip this document continue # Find the term frequency of the term in the other document. # otherFreq = self.termFrequencies[docId][termid] # Score proportional to product of frequencies times the inverse of # the document frequency. # docScores.inc(otherDocId, freq * otherFreq * inverseDocumentFrequency) return docScores
def store_freqdists(self): """ Build NLTK frequency distributions based on feature counts and store them to Redis. """ #TODO: this step and the above may possibly be combined word_fd = FreqDist() label_word_freqdist = ConditionalFreqDist() pos_words = self.r.zrange('positive_wordcounts', 0, -1, withscores=True, desc=True) neg_words = self.r.zrange('negative_wordcounts', 0, -1, withscores=True, desc=True) assert pos_words and neg_words, 'Requires wordcounts to be stored in redis.' #build a condtional freqdist with the feature counts per label for word, count in pos_words: word_fd.inc(word, count) label_word_freqdist['positive'].inc(word, count) for word,count in neg_words: word_fd.inc(word, count) label_word_freqdist['negative'].inc(word, count) self.pickle_store('word_fd', word_fd) self.pickle_store('label_fd', label_word_freqdist)
def choose_tag(self, tokens, index, history): tags = FreqDist() for tagger in self._taggers: tags.inc(tagger.choose_tag(tokens, index, history)) return tags.max()
class IncrementalNaiveBayes(object): """Builds the NB model externally, allowing incremental training. The source for this class is taken from the NLTK NaiveBayesClassifier class. Specifically, the train() method is split up so that training can be done incrementally instead of forcing it to be done all at once. """ def __init__(self): self.label_freqdist = FreqDist() self.feature_freqdist = defaultdict(FreqDist) self.feature_values = defaultdict(set) self.fnames = set() def train(self, labeled_featuresets): """Incrementally train the NB classifier. :param labeled_featuresets: A list of classified featuresets, i.e., a list of tuples ``(featureset, label)``. """ # Count up how many times each feature value occurred, given # the label and featurename. for featureset, label in labeled_featuresets: self.label_freqdist.inc(label) for fname, fval in featureset.items(): # Increment freq(fval|label, fname) self.feature_freqdist[label, fname].inc(fval) # Record that fname can take the value fval. self.feature_values[fname].add(fval) # Keep a list of all feature names. self.fnames.add(fname) def get_model(self, estimator=ELEProbDist): """Potentially unsafe to call more than a single time (but maybe OK)""" # Make a copy of the model to generate the classifier label_freqdist = self.label_freqdist feature_freqdist = copy.copy(self.feature_freqdist) feature_values = copy.copy(self.feature_values) fnames = self.fnames # If a feature didn't have a value given for an instance, then # we assume that it gets the implicit value 'None.' This loop # counts up the number of 'missing' feature values for each # (label,fname) pair, and increments the count of the fval # 'None' by that amount. for label in label_freqdist: num_samples = label_freqdist[label] for fname in fnames: count = feature_freqdist[label, fname].N() feature_freqdist[label, fname].inc(None, num_samples - count) feature_values[fname].add(None) # Create the P(label) distribution label_probdist = estimator(label_freqdist) # Create the P(fval|label, fname) distribution feature_probdist = {} for ((label, fname), freqdist) in feature_freqdist.items(): probdist = estimator(freqdist, bins=len(feature_values[fname])) feature_probdist[label, fname] = probdist return NaiveBayesClassifier(label_probdist, feature_probdist)
def create_word_bigram_scores(): posdata = tp.seg_fil_txt("/home/hadoop/goodnew.txt") negdata = tp.seg_fil_txt("/home/hadoop/badnew.txt") posWords = list(itertools.chain(*posdata)) negWords = list(itertools.chain(*negdata)) bigram_finderr = BigramCollocationFinder.from_words(posWords) bigram_finder = BigramCollocationFinder.from_words(negWords) posBigrams = bigram_finderr.nbest(BigramAssocMeasures.chi_sq,350000) negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq,350000) pos = posWords + posBigrams neg = negWords + negBigrams word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in pos: word_fd.inc(word) cond_word_fd['pos'].inc(word) for word in neg: word_fd.inc(word) cond_word_fd['neg'].inc(word) pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores
def __setTermsCHISQUARE__(self,size): word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for word in self.reader.words(categories=['pos']): word_fd.inc(word.lower()) label_word_fd['pos'].inc(word.lower()) for word in self.reader.words(categories=['neg']): word_fd.inc(word.lower()) label_word_fd['neg'].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count wordScores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word], (freq, neg_word_count), total_word_count) wordScores[word] = pos_score + neg_score termScore = sorted(wordScores.items(),key=lambda(w,s):s,reverse=True)[:size] self.terms = [w for (w,s) in termScore];
def freq_dst(self,posCorpus,negCorpus): #Creates frequency distribution for words in corpus posFreqDist = FreqDist() for word in posCorpus.words(): posFreqDist.inc(word) negFreqDist = FreqDist() for word in negCorpus.words(): negFreqDist.inc(word) #Frequency Distributions with Laplace Smoothing global posLapFreq posLapFreq = nltk.probability.LaplaceProbDist(posFreqDist) global negLapFreq negLapFreq = nltk.probability.LaplaceProbDist(negFreqDist) #GetBigrams posBigrams = nltk.bigrams(posCorpus.words()) negBigrams = nltk.bigrams(negCorpus.words()) #Get no. of words per corpus posWordLen = len(posCorpus.words()) negWordLen = len(negCorpus.words()) #FreqDist for Bigrams global posBiFreq posBiFreq = nltk.probability.LaplaceProbDist(nltk.FreqDist(posBigrams)) global negBiFreq negBiFreq = nltk.probability.LaplaceProbDist(nltk.FreqDist(negBigrams))
def get_bestwords(contents, labels, limit = 10000, n = None, cache = True): if cache: if n: cache_path = 'cache/%s_%s.pkl' % (limit, n) if os.path.exists(cache_path): bestwords = pickle.load(open(cache_path, 'r')) print 'Loaded from cache' print 'bestwords count = %d' % (len(bestwords)) return bestwords word_fd = FreqDist() label_word_fd = ConditionalFreqDist() pos_contents = contents[labels == 1] neg_contents = contents[labels != 0] pos_words = set() neg_words = set() for pos_content in pos_contents: pos_words = pos_words.union(word_tokenize(pos_content)) for neg_content in neg_contents: neg_words = neg_words.union(word_tokenize(neg_content)) for word in pos_words: word_fd.inc(word.lower()) label_word_fd['pos'].inc(word.lower()) for word in neg_words: word_fd.inc(word.lower()) label_word_fd['neg'].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:limit] bestwords = set([w for w, s in best]) print 'all words count = %d' % (len(word_scores)) print 'bestwords count = %d' % (len(bestwords)) if cache: if n: cache_path = 'cache/%s_%s.pkl' % (limit, n) f = open(cache_path, 'w') pickle.dump(bestwords, f) print 'Dumped to cache' return bestwords
def text_to_dict(docs, metric): """ Create dictionaries of term frequencies based on documents Metric must be either :attr:`FrequencyMetrics.TF` or :attr:`FrequencyMetrics.TF_IDF`. """ doc_freqs = FreqDist() # Distribution over how many documents each word appear in. tf_dists = [] # List of TF distributions per document # Create freq_dist for each document for doc in docs: doc = preprocess.preprocess_text(doc) fd = FreqDist() for word in doc: fd.inc(word) doc_freqs.update(fd.samples()) tf_dists.append(fd) num_docs = len(docs) # Build dictionaries dicts = [] for i, fd in enumerate(tf_dists): if i%100==0: print ' dict',str(i)+'/'+str(len(tf_dists)) d = {} if metric == FrequencyMetrics.TF: for word in fd.samples(): d[word] = fd.freq(word) elif metric == FrequencyMetrics.TF_IDF: for word in fd.samples(): d[word] = fd.freq(word) * math.log(float(num_docs)/doc_freqs[word]) else: raise ValueError("No such feature type: %s" % feature_type); dicts.append(d) return dicts
def best_word_feats(self, words): word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for word in movie_reviews.words(categories=['pos']): word_fd.inc(word.lower()) label_word_fd['pos'].inc(word.lower()) for word in movie_reviews.words(categories=['neg']): word_fd.inc(word.lower()) label_word_fd['neg'].inc(word.lower()) # n_ii = label_word_fd[label][word] # n_ix = word_fd[word] # n_xi = label_word_fd[label].N() # n_xx = label_word_fd.N() pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:10000] bestwords = set([w for w, s in best]) return dict([(word, True) for word in words if word in bestwords])
def bysegment(db): dist = FreqDist() total = 0 while db.hasNext(): fragments = db.nextPwd() pwd = fragments[0].password for f in fragments: # iterate through fragments total += 1 if total % 100000 == 0: print "{} segments processed...".format(total) if f.is_gap(): dist.inc("gap") else: raw_word = pwd[f.s_index:f.e_index] if raw_word.isupper(): dist.inc('upper') elif raw_word.istitle(): dist.inc('capitalized') elif raw_word.islower(): dist.inc('lower') else: dist.inc('mangled') for k, v in dist.items(): print "{}\t{}".format(k, v)
def getWordFrequencies(self, sentences): freq_dist = FreqDist() for sentence in sentences: for token in nltk.word_tokenize(sentence): if token not in string.punctuation: freq_dist.inc(token) return freq_dist
def simhash(raw_text): """Compute the simhash value for a string.""" fdist = FreqDist() for word in regexp_tokenize(raw_text, pattern=r'\w+([.,]\w+)*|\S+'): fdist.inc(word.lower()) v = [0] * 128 for word in fdist: projection = bitarray() projection.fromstring(hashlib.md5(word).digest()) #print "\tw:%s, %d" % (word, fdist[word]) #print "\t\t 128 bit hash: " + str(b) for i in xrange(128): if projection[i]: v[i] += fdist.get(word) else: v[i] -= fdist.get(word) hash_val = bitarray(128) hash_val.setall(False) for i in xrange(128): if v[i] > 0: hash_val[i] = True return hash_val
def most_informative_words(corpus, categories=['dem', 'rep'], count=2500): fd = FreqDist() cond_fd = ConditionalFreqDist() word_counts = {} for cat in categories: for word in corpus.words(categories=[cat]): word = word.lower().strip(".!?:,/ ") if not word.isalpha() or word in stopset: continue fd.inc(word) cond_fd[cat].inc(word) word_counts[cat] = cond_fd[cat].N() total_word_count = sum(word_counts.values()) word_scores = collections.defaultdict(int) for word, freq in fd.iteritems(): for cat in categories: cat_word_score = BigramAssocMeasures.chi_sq( cond_fd[cat][word], (freq, word_counts[cat]), total_word_count) word_scores[word] += cat_word_score informative_words = sorted(word_scores.iteritems(), key=lambda (w, s): s, reverse=True)[:count] return set([w for w, s in informative_words])
def create_bigram_scores(): posdata = tp.seg_fil_senti_excel("D:/code/sentiment_test/pos_review.xlsx", 1, 1) negdata = tp.seg_fil_senti_excel("D:/code/sentiment_test/neg_review.xlsx", 1, 1) posWords = list(itertools.chain(*posdata)) negWords = list(itertools.chain(*negdata)) bigram_finder = BigramCollocationFinder.from_words(posWords) bigram_finder = BigramCollocationFinder.from_words(negWords) posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 8000) negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 8000) pos = posBigrams neg = negBigrams word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in pos: word_fd.inc(word) cond_word_fd['pos'].inc(word) for word in neg: word_fd.inc(word) cond_word_fd['neg'].inc(word) pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores
def text_to_vector(docs, metric): """ Create frequency based feature-vector from text Metric must be either :attr:`FrequencyMetrics.TF` or :attr:`FrequencyMetrics.TF_IDF`. """ doc_freqs = FreqDist() # Distribution over how many documents each word appear in. tf_dists = [] # List of TF distributions per document # Create freq_dist for each document for doc in docs: doc = preprocess.preprocess_text(doc) fd = FreqDist() for word in doc: fd.inc(word) doc_freqs.update(fd.samples()) tf_dists.append(fd) all_tokens = doc_freqs.keys() num_docs = len(docs) num_features = len(all_tokens) # Build feature x document matrix matrix = np.zeros((num_features, num_docs)) for i, fd in enumerate(tf_dists): if metric == FrequencyMetrics.TF: v = [fd.freq(word) for word in all_tokens] elif metric == FrequencyMetrics.TF_IDF: v = [fd.freq(word) * math.log(float(num_docs)/doc_freqs[word]) for word in all_tokens] else: raise ValueError("No such feature type: %s" % feature_type); matrix[:,i] = v return matrix
def build_freqdists(self, wordcount_range=150000): """ Build word and label freq dists from the stored words with 'wordcount_range' words and store the resulting FreqDists in Redis. This cannot be cached as we have to continously update these values from incremented word counts. """ word_freqdist = FreqDist() label_word_freqdist = ConditionalFreqDist() pos_words = self.r.zrange('positive_wordcounts', 0, wordcount_range, withscores=True, desc=True) neg_words = self.r.zrange('negative_wordcounts', 0, wordcount_range, withscores=True, desc=True) assert pos_words and neg_words, 'Requires wordcounts to be stored in redis.' for word,count in pos_words: word_freqdist.inc(word, count=count) label_word_freqdist['pos'].inc(word, count=count) for word,count in neg_words: word_freqdist.inc(word, count=count) label_word_freqdist['neg'].inc(word, count=count) #storing for use later, these values are always computed self.r.set('word_fd', pickle.dumps(word_freqdist)) self.r.set('label_fd', pickle.dumps(label_word_freqdist))
def high_information_words(labelled_words, score_fn=BigramAssocMeasures.chi_sq, min_score=5): word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for label, words in labelled_words: for word in words: word_fd.inc(word) label_word_fd[label].inc(word) n_xx = label_word_fd.N() high_info_words = set() for label in label_word_fd.conditions(): n_xi = label_word_fd[label].N() word_scores = collections.defaultdict(int) for word, n_ii in label_word_fd[label].iteritems(): n_ix = word_fd[word] score = score_fn(n_ii, (n_ix, n_xi), n_xx) word_scores[word] = score bestwords = [word for word, score in word_scores.iteritems() if score >= min_score] high_info_words |= set(bestwords) return high_info_words
def train_emission_number_distribution(self, inputs): """ Trains the distribution over the number of notes emitted from a chord class. It's not conditioned on the chord class, so the only training data needed is a segmented MIDI corpus. @type inputs: list of lists @param inputs: training data. The same format as is produced by L{jazzparser.taggers.segmidi.midi.midi_to_emission_stream} """ self.add_history( "Training emission number probabilities using %d MIDI segments"\ % len(inputs)) emission_number_counts = FreqDist() for sequence in inputs: for segment in sequence: notes = len(segment) # There should very rarely be more than the max num of notes if notes <= self.max_notes: emission_number_counts.inc(notes) # Apply simple laplace smoothing for notes in range(self.max_notes): emission_number_counts.inc(notes) # Make a prob dist out of this emission_number_dist = prob_dist_to_dictionary_prob_dist(\ mle_estimator(emission_number_counts, None)) self.emission_number_dist = emission_number_dist
def create_word_scores(self): [posWords, negWords] = self.getAllWords() posWords = list(itertools.chain(*posWords)) negWords = list(itertools.chain(*negWords)) word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in posWords: word_fd.inc(word) cond_word_fd['pos'].inc(word) for word in negWords: word_fd.inc(word) cond_word_fd['neg'].inc(word) pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count log("Total number of words: %d" % total_word_count) word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores
def setup(): global bestwords word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for word in movie_reviews.words(categories=['pos']): word_fd.inc(word.strip('\'"?,.').lower()) label_word_fd['pos'].inc(word.lower()) for word in movie_reviews.words(categories=['neg']): word_fd.inc(word.strip('\'"?,.').lower()) label_word_fd['neg'].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:10000] bestwords = set([w for w, s in best]) return train(best_bigram_word_features)
def train_supervised(self, labelled_sequences, **kwargs): """ Supervised training maximising the joint probability of the symbol and state sequences. This is done via collecting frequencies of transitions between states, symbol observations while within each state and which states start a sentence. These frequency distributions are then normalised into probability estimates, which can be smoothed if desired. :return: the trained model :rtype: HiddenMarkovModelTagger :param labelled_sequences: the training data, a set of labelled sequences of observations :type labelled_sequences: list :param kwargs: may include an 'estimator' parameter, a function taking a FreqDist and a number of bins and returning a CProbDistI; otherwise a MLE estimate is used """ # default to the MLE estimate estimator = kwargs.get('estimator') if estimator is None: estimator = lambda fdist, bins: MLEProbDist(fdist) # count occurrences of starting states, transitions out of each state # and output symbols observed in each state known_symbols = set(self._symbols) known_states = set(self._states) starting = FreqDist() transitions = ConditionalFreqDist() outputs = ConditionalFreqDist() for sequence in labelled_sequences: lasts = None for token in sequence: state = token[_TAG] symbol = token[_TEXT] if lasts is None: starting.inc(state) else: transitions[lasts].inc(state) outputs[state].inc(symbol) lasts = state # update the state and symbol lists if state not in known_states: self._states.append(state) known_states.add(state) if symbol not in known_symbols: self._symbols.append(symbol) known_symbols.add(symbol) # create probability distributions (with smoothing) N = len(self._states) pi = estimator(starting, N) A = ConditionalProbDist(transitions, estimator, N) B = ConditionalProbDist(outputs, estimator, len(self._symbols)) return HiddenMarkovModelTagger(self._symbols, self._states, A, B, pi)
class VocabBuilder: """ Creates a vocabulary after scanning a corpus. """ def __init__(self, lang="english", min_length=3, cut_first=100): """ Set the minimum length of words and which stopword list (by language) to use. """ self._counts = FreqDist() self._stop = set(stopwords.words(lang)) self._min_length = min_length self._cut_first = cut_first print("Using stopwords: %s ... " % " ".join(list(self._stop)[:10])) def scan(self, words): """ Add a list of words as observed. """ for ii in [x.lower() for x in words if x.lower() not in self._stop \ and len(x) >= self._min_length]: self._counts.inc(ii) def vocab(self, size=5000): """ Return a list of the top words sorted by frequency. """ if len(self._counts) > self._cut_first + size: return self._counts.keys()[self._cut_first:(size + self._cut_first)] else: return self._counts.keys()[:size]
def __init__(self): ## Best words feature extraction word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for word in movie_reviews.words(categories=['pos']): word_fd.inc(word.lower()) label_word_fd['pos'].inc(word.lower()) for word in movie_reviews.words(categories=['neg']): word_fd.inc(word.lower()) label_word_fd['neg'].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:10000] self.bestwords = set([w for w, s in best]) self.train_classifier()
def get_best_words(words_list, num_best_words): from nltk.probability import FreqDist, ConditionalFreqDist from nltk.metrics import BigramAssocMeasures word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for pair in words_list: line,sent = pair for word in nltk.word_tokenize(line): word_fd.inc(word.lower()) label_word_fd[sent].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word],(freq, pos_word_count),total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word],(freq, neg_word_count),total_word_count) word_scores[word] = pos_score + neg_score best = sorted(word_scores.iteritems(), key=lambda (w,s): s, reverse=True)[:num_best_words] bestwords = set([w for w, s in best]) return bestwords
def clean_train_data_and_find_best_features(self): #Top n best unigram features are selected freq_dist_obj = FreqDist() cond_freq_dist_obj = ConditionalFreqDist() self.book_category_set = set() for instance in self.book_instances: try: raw_data = instance and instance.strip() and instance.strip().split("\t") if not raw_data or len(raw_data) != 4 : continue bookid = raw_data[0] self.book_category_set.add(bookid) features = [] features.extend(self.clean_book_title(raw_data[2])) features.extend(self.clean_author_name(raw_data[3])) features.extend(self.bookid_to_toc_dict.get(raw_data[1], [])) for feat in features: freq_dist_obj.inc(feat) cond_freq_dist_obj[bookid].inc(feat) except: self.logging.info("Exception while running this instance %s \n" % instance) total_word_count = 0 for bookid in self.book_category_set: total_word_count += cond_freq_dist_obj[bookid].N() word_score_dict = {} for word, freq in freq_dist_obj.iteritems(): score = 0 if word and word.lower() in self.stopwords_set:continue for bookid in self.book_category_set: score += BigramAssocMeasures.chi_sq(cond_freq_dist_obj[bookid][word], (freq, cond_freq_dist_obj[bookid].N()), total_word_count) word_score_dict[word] = score self.select_top_n_best_features(word_score_dict)
def mostCommWords(self, tag, pos_tag_pattern): """ This is a help method for mostCommNouns and mostCommVerbs. Argument: tag -- a hashtag that we want to compute the most commonly hashtag with pos_tag_pattern -- the regular expression that used to match the POS tags return: a list of the top 20 nouns associated with the input hashtag """ words={} topTwenty=[] j = 0 for line in self.lines: hasTag = False for t in self.tokenizer(line, hashtag_pattern): if t == tag: hasTag = True break if hasTag: counts = FreqDist() tokens = self.tokenizer(line, word_pattern) pos = nltk.pos_tag(tokens) for p in pos: if re.match(pos_tag_pattern,p[1]): counts.inc(p[0]) for n in counts.keys(): if words.has_key(n): words[n] = words[n]+counts[n] else: words[n] = counts[n] words_sorted_by_counts = sorted(words.items(), key=lambda x: x[1], reverse=True) for i in range(0,20): topTwenty.append(words_sorted_by_counts[i][0]) return topTwenty
def create_word_scores(): posdata = tp.seg_fil_senti_excel("D:/code/sentiment_test/pos_review.xlsx", "1", "1") negdata = tp.seg_fil_senti_excel("D:/code/sentiment_test/neg_review.xlsx", "1", "1") posWords = list(itertools.chain(*posdata)) negWords = list(itertools.chain(*negdata)) word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in posWords: word_fd.inc(word) cond_word_fd['pos'].inc(word) for word in negWords: word_fd.inc(word) cond_word_fd['neg'].inc(word) pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores
def train(labeled_featuresets, estimator=ELEProbDist): label_freqdist = FreqDist() feature_freqdist = defaultdict(FreqDist) feature_values = defaultdict(set) fnames = set() for featureset, label in labeled_featuresets: label_freqdist.inc(label) for fname, fval in featureset.items(): feature_freqdist[label, fname].inc(fval) feature_values[fname].add(fval) fnames.add(fname) for label in label_freqdist: num_samples = label_freqdist[label] for fname in fnames: count = feature_freqdist[label, fname].N() feature_freqdist[label, fname].inc(None, num_samples-count) feature_values[fname].add(None) label_probdist = estimator(label_freqdist) feature_probdist = {} for ((label, fname), freqdist) in feature_freqdist.items(): probdist = estimator(freqdist, bins=len(feature_values[fname])) feature_probdist[label,fname] = probdist return NaiveBayesClassifier(label_probdist, feature_probdist)
def classify(self, feats): counts = FreqDist() for classifier in self._classifiers: counts.inc(classifier.classify(feats)) return counts.max()
def conditionalProportion(self, tag): """ Compute top 20 rank of the hashtags that are commonly associated with input hashtags Argument: tag -- a hashtag that we want to compute the most commonly hashtag with return: a list of the top 20 verbs associated with the input hashtag """ tags = {} total = {} topTwenty = [] for line in self.lines: tokens = self.tokenizer(line, hashtag_pattern) counts = FreqDist() for t in tokens: counts.inc(t) if total.has_key(t) and t != tag: total[t] = total[t] + 1 else: total[t] = 1 if counts[tag] > 0: for t in counts.keys(): if t != tag: if tags.has_key(t): tags[t] = tags[t] + counts[t] else: tags[t] = counts[t] prop = {} for t in total.keys(): if tags.has_key(t): prop[t] = float(tags[t]) / total[t] else: prop[t] = 0.0 prop_sorted_by_counts = sorted(prop.items(), key=lambda x: x[1], reverse=True) for i in range(0, 20): topTwenty.append(prop_sorted_by_counts[i][0]) return topTwenty
def text_to_vector(docs, metric): """ Create frequency based feature-vector from text Metric must be either :attr:`FrequencyMetrics.TF` or :attr:`FrequencyMetrics.TF_IDF`. """ doc_freqs = FreqDist( ) # Distribution over how many documents each word appear in. tf_dists = [] # List of TF distributions per document # Create freq_dist for each document for doc in docs: doc = preprocess.preprocess_text(doc) fd = FreqDist() for word in doc: fd.inc(word) doc_freqs.update(fd.samples()) tf_dists.append(fd) all_tokens = doc_freqs.keys() num_docs = len(docs) num_features = len(all_tokens) # Build feature x document matrix matrix = np.zeros((num_features, num_docs)) for i, fd in enumerate(tf_dists): if metric == FrequencyMetrics.TF: v = [fd.freq(word) for word in all_tokens] elif metric == FrequencyMetrics.TF_IDF: v = [ fd.freq(word) * math.log(float(num_docs) / doc_freqs[word]) for word in all_tokens ] else: raise ValueError("No such feature type: %s" % feature_type) matrix[:, i] = v return matrix
def count_words(reader, group, directorSet): fdist = FreqDist() dirRegexp = DirectionCorpus.constructItemRegexp(directorSet, Maps, mapversions='[01]') Instructions = [ item for item in reader.items(group) if re.match(dirRegexp, item) ] Instructions.sort() # Delete duplicate director routes, keep last Remove = [] for i in range(1, len(Instructions)): if Instructions[i][:-5] == Instructions[i - 1][:-5]: Remove.append(Instructions[i - 1]) for i in Remove: Instructions.remove(i) for item in Instructions: for token in reader.read(item)['WORDS']: if token['TEXT'].isalnum(): fdist.inc(token['TEXT']) elif token['TEXT'] == '.': fdist.inc('<SENTENCES>') return fdist, len(Instructions)
def create_words_bigrams_scores(): posNegDir='D:/ReviewHelpfulnessPrediction\FeatureExtractionModule\SentimentFeature\MachineLearningFeature\SentimentReviewSet' posdata = tp.seg_fil_senti_excel(posNegDir+'/pos_review.xlsx', 1, 1) negdata = tp.seg_fil_senti_excel(posNegDir+'/neg_review.xlsx', 1, 1) posWords = list(itertools.chain(*posdata)) negWords = list(itertools.chain(*negdata)) bigram_finder = BigramCollocationFinder.from_words(posWords) bigram_finder = BigramCollocationFinder.from_words(negWords) posBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000) negBigrams = bigram_finder.nbest(BigramAssocMeasures.chi_sq, 5000) pos = posWords + posBigrams neg = negWords + negBigrams word_fd = FreqDist() cond_word_fd = ConditionalFreqDist() for word in pos: word_fd.inc(word) cond_word_fd['pos'].inc(word) for word in neg: word_fd.inc(word) cond_word_fd['neg'].inc(word) pos_word_count = cond_word_fd['pos'].N() neg_word_count = cond_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_word_fd['neg'][word], (freq, neg_word_count), total_word_count) word_scores[word] = pos_score + neg_score return word_scores
def mostCommHashtag(self, tag): """ Compute top 20 hashtags that appear the most commonly in the same tweet with input hashtag Argument: tag -- a hashtag that we want to compute the most commonly hashtag with return: a list of the top 20 hashtags associated with the input hashtag """ tags = {} topTwenty = [] for line in self.lines: tokens = self.tokenizer(line, hashtag_pattern) counts = FreqDist() for t in tokens: counts.inc(t) if counts[tag] > 0: for t in counts.keys(): if t != tag: if tags.has_key(t): tags[t] = tags[t] + counts[t] else: tags[t] = counts[t] tags_sorted_by_counts = sorted(tags.items(), key=lambda x: x[1], reverse=True) for i in range(0, 20): topTwenty.append(tags_sorted_by_counts[i][0]) return topTwenty
def compute_word_scores(self): #Core module which assigns scores to features and top features are selected based on this score. freq_dist_obj = FreqDist() cond_freq_dist_obj = ConditionalFreqDist() #Iterating over pos reviews, to calcutate scores for pos feats for review in self.pos_reviews_list: review_words = self.apply_preprocessing(review) for word in review_words: freq_dist_obj.inc(word) cond_freq_dist_obj['pos'].inc(word) #Iterating over neg reviews, to calculate scores for neg feats for review in self.neg_reviews_list: review_words = self.apply_preprocessing(review) for word in review_words: freq_dist_obj.inc(word) cond_freq_dist_obj['neg'].inc(word) pos_word_count = cond_freq_dist_obj['pos'].N() neg_word_count = cond_freq_dist_obj['neg'].N() total_word_count = pos_word_count + neg_word_count word_score_dict = {} #Finding the scores using chi square for word, freq in freq_dist_obj.iteritems(): pos_score = BigramAssocMeasures.chi_sq(cond_freq_dist_obj['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(cond_freq_dist_obj['neg'][word], (freq, neg_word_count), total_word_count) word_score_dict[word] = pos_score + neg_score #self.best = sorted(word_score_dict.iteritems(), key=lambda (w,s): s, reverse=True) self.best = sorted(word_score_dict.iteritems(), key=operator.itemgetter(1), reverse=True)
def high_information_words(labelled_words, score_fn=BigramAssocMeasures.chi_sq, min_score=5): word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for label, words in labelled_words: for word in words: word_fd.inc(word) label_word_fd[label].inc(word) n_xx = label_word_fd.N() high_info_words = set() for label in label_word_fd.conditions(): n_xi = label_word_fd[label].N() word_scores = collections.defaultdict(int) for word, n_ii in label_word_fd[label].iteritems(): n_ix = word_fd[word] score = score_fn(n_ii, (n_ix, n_xi), n_xx) word_scores[word] = score bestwords = [ word for word, score in word_scores.iteritems() if score >= min_score ] high_info_words |= set(bestwords) return high_info_words
def binary_stump(feature_name, feature_value, labeled_featuresets): label = FreqDist(label for (featureset, label) in labeled_featuresets).max() # Find the best label for each value. pos_fdist = FreqDist() neg_fdist = FreqDist() for featureset, label in labeled_featuresets: if featureset.get(feature_name) == feature_value: pos_fdist.inc(label) else: neg_fdist.inc(label) decisions = {} default = label # But hopefully we have observations! if pos_fdist.N() > 0: decisions = { feature_value: DecisionTreeClassifier(pos_fdist.max()) } if neg_fdist.N() > 0: default = DecisionTreeClassifier(neg_fdist.max()) return DecisionTreeClassifier(label, feature_name, decisions, default)
def from_words(cls, words): """Construct a TrigramCollocationFinder for all trigrams in the given sequence. """ wfd = FreqDist() wildfd = FreqDist() bfd = FreqDist() tfd = FreqDist() for w1, w2, w3 in ingrams(words, 3, pad_right=True): wfd.inc(w1) if w2 is None: continue bfd.inc((w1, w2)) if w3 is None: continue wildfd.inc((w1, w3)) tfd.inc((w1, w2, w3)) return cls(wfd, bfd, wildfd, tfd)
if args.metrics: tags_actual = FreqDist() tag_refs = [] tag_test = [] tag_word_refs = collections.defaultdict(set) tag_word_test = collections.defaultdict(set) tagged_sents = corpus.tagged_sents(**kwargs) taglen = 7 if args.fraction != 1.0: cutoff = int(math.ceil(len(tagged_sents) * args.fraction)) tagged_sents = tagged_sents[:cutoff] for tagged_sent in tagged_sents: for word, tag in tagged_sent: tags_actual.inc(tag) tag_refs.append(tag) tag_word_refs[tag].add(word) if len(tag) > taglen: taglen = len(tag) for word, tag in tagger.tag(nltk.tag.untag(tagged_sent)): tags_found.inc(tag) tag_test.append(tag) tag_word_test[tag].add(word) if tag == '-NONE-': unknown_words.add(word) print 'Accuracy: %f' % nltk.metrics.accuracy(tag_refs, tag_test)
def from_words(cls, words): """Construct a QuadgramCollocationFinder for all quadgrams in the given sequence. """ wfd = FreqDist() bfd = FreqDist() wild2fd = FreqDist() tfd = FreqDist() wild3fd = FreqDist() qfd = FreqDist() wild4fd = FreqDist() pfd = FreqDist() for w1, w2, w3, w4, w5 in ingrams(words, 5, pad_right=True): wfd.inc(w1) if w2 is None: continue bfd.inc((w1, w2)) if w3 is None: continue wild2fd.inc((w1, w3)) tfd.inc((w1, w2, w3)) if w4 is None: continue wild2fd.inc((w1, w4)) wild3fd.inc((w1, w2, w4)) wild3fd.inc((w1, w3, w4)) qfd.inc((w1, w2, w3, w4)) if w5 is None: continue wild2fd.inc((w1, w5)) wild3fd.inc((w1, w2, w5)) wild3fd.inc((w1, w3, w5)) wild3fd.inc((w1, w4, w5)) wild4fd.inc((w1, w3, w4, w5)) wild4fd.inc((w1, w2, w4, w5)) wild4fd.inc((w1, w2, w3, w5)) pfd.inc((w1, w2, w3, w4, w5)) return cls(wfd, bfd, wild2fd, tfd, wild3fd, qfd, wild4fd, pfd)
def topwords(): """ inspired by http://www.huffingtonpost.com/brian-honigman/the-100-most-popular-hash_b_2463195.html http://editd.com/features/monitor/ used these resources for understanding nltk usage http://www.laurentluce.com/posts/twitter-sentiment-analysis-using-python-and-nltk/ http://text-processing.com/demo/sentiment/ http://ravikiranj.net/drupal/201205/code/machine-learning/how-build-twitter-sentiment-analyzer http://streamhacker.com/2010/05/24/text-classification-sentiment-analysis-stopwords-collocations/ http://fashionweekdates.com/world-fashion-week-dates-schedule.html """ ## place tweets into morning and afternoon bins ru = db.GqlQuery("SELECT * FROM Tweets where iso!=:1", 'en').fetch(limit=1000) en = db.GqlQuery("SELECT * FROM Tweets where iso=:1", 'en').fetch(limit=1000) #this is used because nltk.corpus.stopwords.words('english') doesnt work in GAE # from https://github.com/arc12/Text-Mining-Weak-Signals/wiki/Standard-set-of-english-stopwords stop = "a, about, above, across, after, again, against, all, almost, alone, along, already, also, although, always, am, among, an, and, another, any, anybody, anyone, anything, anywhere, are, area, areas, aren't, around, as, ask, asked, asking, asks, at, away, b, back, backed, backing, backs, be, became, because, become, becomes, been, before, began, behind, being, beings, below, best, better, between, big, both, but, by, c, came, can, cannot, can't, case, cases, certain, certainly, clear, clearly, come, could, couldn't, d, did, didn't, differ, different, differently, do, does, doesn't, doing, done, don't, down, downed, downing, downs, during, e, each, early, either, end, ended, ending, ends, enough, even, evenly, ever, every, everybody, everyone, everything, everywhere, f, face, faces, fact, facts, far, felt, few, find, finds, first, for, four, from, full, fully, further, furthered, furthering, furthers, g, gave, general, generally, get, gets, give, given, gives, go, going, good, goods, got, great, greater, greatest, group, grouped, grouping, groups, h, had, hadn't, has, hasn't, have, haven't, having, he, he'd, he'll, her, here, here's, hers, herself, he's, high, higher, highest, him, himself, his, how, however, how's, i, i'd, if, i'll, i'm, important, in, interest, interested, interesting, interests, into, is, isn't, it, its, it's, itself, i've, j, just, k, keep, keeps, kind, knew, know, known, knows, l, large, largely, last, later, latest, least, less, let, lets, let's, like, likely, long, longer, longest, m, made, make, making, man, many, may, me, member, members, men, might, more, most, mostly, mr, mrs, much, must, mustn't, my, myself, n, necessary, need, needed, needing, needs, never, new, newer, newest, next, no, nobody, non, noone, nor, not, nothing, now, nowhere, number, numbers, o, of, off, often, old, older, oldest, on, once, one, only, open, opened, opening, opens, or, order, ordered, ordering, orders, other, others, ought, our, ours, ourselves, out, over, own, p, part, parted, parting, parts, per, perhaps, place, places, point, pointed, pointing, points, possible, present, presented, presenting, presents, problem, problems, put, puts, q, quite, r, rather, really, right, room, rooms, s, said, same, saw, say, says, second, seconds, see, seem, seemed, seeming, seems, sees, several, shall, shan't, she, she'd, she'll, she's, should, shouldn't, show, showed, showing, shows, side, sides, since, small, smaller, smallest, so, some, somebody, someone, something, somewhere, state, states, still, such, sure, t, take, taken, than, that, that's, the, their, theirs, them, themselves, then, there, therefore, there's, these, they, they'd, they'll, they're, they've, thing, things, think, thinks, this, those, though, thought, thoughts, three, through, thus, to, today, together, too, took, toward, turn, turned, turning, turns, two, u, under, until, up, upon, us, use, used, uses, v, very, w, want, wanted, wanting, wants, was, wasn't, way, ways, we, we'd, well, we'll, wells, went, were, we're, weren't, we've, what, what's, when, when's, where, where's, whether, which, while, who, whole, whom, who's, whose, why, why's, will, with, within, without, won't, work, worked, working, works, would, wouldn't, x, y, year, years, yes, yet, you, you'd, you'll, young, younger, youngest, your, you're, yours, yourself, yourselves, you've, z" stopwordsenglish = re.findall(r'\w+', stop, flags = re.UNICODE | re.LOCALE) stopwordstwitter = ['http', '#', '@', '!', ':', ';', '&', '\'', '-', 't', 'co', 'rt'] stopwords_list = stopwordsenglish + stopwordstwitter freq1 = FreqDist() freq2 = FreqDist() for t in ru: #We only want to work with lowercase for the comparisons sentence = t.tweet.lower() #remove punctuation and split into seperate words words = re.findall(r'\w+', sentence, flags=re.UNICODE | re.LOCALE) #corpus = nltk.word_tokenize(words) for a in words: if a not in stopwords_list: freq1.inc(a) for t in en: #We only want to work with lowercase for the comparisons sentence = t.tweet.lower() #remove punctuation and split into seperate words words = re.findall(r'\w+', sentence, flags=re.UNICODE | re.LOCALE) #corpus = nltk.word_tokenize(t.tweet) for a in words: if a not in stopwords_list: freq2.inc(a) #display results #bins = freq1.B() # Returns: The total number of sample bins with counts > 0 f1 = freq1.items()[:90] # Returns: List of all items in tuple format f2 = freq2.items()[:90] context = {'one': f1, 'two': f2, 'stop': stopwords_list } return render_template('topwords.html', **context)
class ActiveNaiveBayesWeaselClassifier(ActiveWeaselClassifier): """ An implementation of the interface for weasel classifiers which uses active learning to minimize the size of the training examples set. The underlaying model is the nltk.classify.naivebayes.NaiveBayesClassifier class. """ def buildInitClassifier(self,examples): """ Trains the underlying classifier with the given examples. @param examples: An object of type [(Dict,String)], where the Dict is a feature set representation of a sentence, and the String is its label. """ print 'Initial examples: '+str(len(examples)) self._load_counters(examples) self._loadClassifier() def addExample(self,example): """ Add the given example to the underlying classifier variables, allowing its incremental training. Not implemented at this level. @param example: An object of type (Dict,String), where the Dict is a feature set representation of a sentence, and the String is its label. """ self._updateCounters(example) self._loadClassifier() def addExamples(self,examples): """ Add the given example to the underlying classifier variables, allowing its incremental training. Not implemented at this level. @param example: An object of type (Dict,String), where the Dict is a feature set representation of a sentence, and the String is its label. """ for example in examples: self._updateCounters(example) self._loadClassifier() def _load_counters(self,labeled_featuresets): """ This method is exactly the train method of the NaiveBayesClassifier, except that it does not create a classifier, and keeps the counter variables, so as to update them later """ self._label_freqdist = FreqDist() self._feature_freqdist = defaultdict(FreqDist) self._feature_values = defaultdict(set) self._fnames = set() # Count up how many times each feature value occured, given # the label and featurename. for featureset, label in labeled_featuresets: self._label_freqdist.inc(label) for fname, fval in featureset.items(): # Increment freq(fval|label, fname) self._feature_freqdist[label, fname].inc(fval) # Record that fname can take the value fval. self._feature_values[fname].add(fval) # Keep a list of all feature names. self._fnames.add(fname) # If a feature didn't have a value given for an instance, then # we assume that it gets the implicit value 'None.' This loop # counts up the number of 'missing' feature values for each # (label,fname) pair, and increments the count of the fval # 'None' by that amount. for label in self._label_freqdist: num_samples = self._label_freqdist[label] for fname in self._fnames: count = self._feature_freqdist[label, fname].N() self._feature_freqdist[label, fname].inc(None, num_samples-count) self._feature_values[fname].add(None) def _updateCounters(self,labeled_featureset): fset = labeled_featureset[0] label = labeled_featureset[1] self._label_freqdist.inc(label) for fname, fval in fset.items(): # Increment freq(fval|label, fname) self._feature_freqdist[label, fname].inc(fval) # Record that fname can take the value fval. self._feature_values[fname].add(fval) # Keep a list of all feature names. self._fnames.add(fname) def _loadClassifier(self): # Choose estimator estimator = ELEProbDist # Create the P(label) distribution label_probdist = estimator(self._label_freqdist) # Create the P(fval|label, fname) distribution feature_probdist = {} for ((label, fname), freqdist) in self._feature_freqdist.items(): probdist = estimator(freqdist, bins=len(self._feature_values[fname])) feature_probdist[label,fname] = probdist self._classifier = NaiveBayesClassifier(label_probdist, feature_probdist)
import nltk from nltk.probability import FreqDist, ConditionalFreqDist, ProbDist #def default_tag(tagged_sents): tag_fd = FreqDist() postags=[3,4,5,3,2,5,6,8,65,5,3,3,5,6,7,3] for postag in postags: tag_fd.inc(postag) print tag_fd print ProbDist(tag_fd)
class TnT(TaggerI): ''' TnT - Statistical POS tagger IMPORTANT NOTES: * DOES NOT AUTOMATICALLY DEAL WITH UNSEEN WORDS - It is possible to provide an untrained POS tagger to create tags for unknown words, see __init__ function * SHOULD BE USED WITH SENTENCE-DELIMITED INPUT - Due to the nature of this tagger, it works best when trained over sentence delimited input. - However it still produces good results if the training data and testing data are separated on all punctuation eg: [,.?!] - Input for training is expected to be a list of sentences where each sentence is a list of (word, tag) tuples - Input for tag function is a single sentence Input for tagdata function is a list of sentences Output is of a similar form * Function provided to process text that is unsegmented - Please see basic_sent_chop() TnT uses a second order Markov model to produce tags for a sequence of input, specifically: argmax [Proj(P(t_i|t_i-1,t_i-2)P(w_i|t_i))] P(t_T+1 | t_T) IE: the maximum projection of a set of probabilities The set of possible tags for a given word is derived from the training data. It is the set of all tags that exact word has been assigned. To speed up and get more precision, we can use log addition to instead multiplication, specifically: argmax [Sigma(log(P(t_i|t_i-1,t_i-2))+log(P(w_i|t_i)))] + log(P(t_T+1|t_T)) The probability of a tag for a given word is the linear interpolation of 3 markov models; a zero-order, first-order, and a second order model. P(t_i| t_i-1, t_i-2) = l1*P(t_i) + l2*P(t_i| t_i-1) + l3*P(t_i| t_i-1, t_i-2) A beam search is used to limit the memory usage of the algorithm. The degree of the beam can be changed using N in the initialization. N represents the maximum number of possible solutions to maintain while tagging. It is possible to differentiate the tags which are assigned to capitalized words. However this does not result in a significant gain in the accuracy of the results. ''' def __init__(self, unk=None, Trained=False, N=1000, C=False): ''' Construct a TnT statistical tagger. Tagger must be trained before being used to tag input. :param unk: instance of a POS tagger, conforms to TaggerI :type unk:(TaggerI) :param Trained: Indication that the POS tagger is trained or not :type Trained: boolean :param N: Beam search degree (see above) :type N:(int) :param C: Capitalization flag :type C: boolean Initializer, creates frequency distributions to be used for tagging _lx values represent the portion of the tri/bi/uni taggers to be used to calculate the probability N value is the number of possible solutions to maintain while tagging. A good value for this is 1000 C is a boolean value which specifies to use or not use the Capitalization of the word as additional information for tagging. NOTE: using capitalization may not increase the accuracy of the tagger ''' self._uni = FreqDist() self._bi = ConditionalFreqDist() self._tri = ConditionalFreqDist() self._wd = ConditionalFreqDist() self._eos = ConditionalFreqDist() self._l1 = 0.0 self._l2 = 0.0 self._l3 = 0.0 self._N = N self._C = C self._T = Trained self._unk = unk # statistical tools (ignore or delete me) self.unknown = 0 self.known = 0 def train(self, data): ''' Uses a set of tagged data to train the tagger. If an unknown word tagger is specified, it is trained on the same data. :param data: List of lists of (word, tag) tuples :type data: tuple(str) ''' # Ensure that local C flag is initialized before use C = False if self._unk is not None and self._T == False: self._unk.train(data) for sent in data: history = [('BOS', False), ('BOS', False)] for w, t in sent: # if capitalization is requested, # and the word begins with a capital # set local flag C to True if self._C and w[0].isupper(): C = True self._wd[w].inc(t) self._uni.inc((t, C)) self._bi[history[1]].inc((t, C)) self._tri[tuple(history)].inc((t, C)) history.append((t, C)) history.pop(0) # set local flag C to false for the next word C = False self._eos[t].inc('EOS') # compute lambda values from the trained frequency distributions self._compute_lambda() #(debugging -- ignore or delete me) #print "lambdas" #print i, self._l1, i, self._l2, i, self._l3 def _compute_lambda(self): ''' creates lambda values based upon training data NOTE: no need to explicitly reference C, it is contained within the tag variable :: tag == (tag,C) for each tag trigram (t1, t2, t3) depending on the maximum value of - f(t1,t2,t3)-1 / f(t1,t2)-1 - f(t2,t3)-1 / f(t2)-1 - f(t3)-1 / N-1 increment l3,l2, or l1 by f(t1,t2,t3) ISSUES -- Resolutions: if 2 values are equal, increment both lambda values by (f(t1,t2,t3) / 2) ''' # temporary lambda variables tl1 = 0.0 tl2 = 0.0 tl3 = 0.0 # for each t1,t2 in system for history in self._tri.conditions(): (h1, h2) = history # for each t3 given t1,t2 in system # (NOTE: tag actually represents (tag,C)) # However no effect within this function for tag in self._tri[history].samples(): # if there has only been 1 occurrence of this tag in the data # then ignore this trigram. if self._uni[tag] == 1: continue # safe_div provides a safe floating point division # it returns -1 if the denominator is 0 c3 = self._safe_div((self._tri[history][tag] - 1), (self._tri[history].N() - 1)) c2 = self._safe_div((self._bi[h2][tag] - 1), (self._bi[h2].N() - 1)) c1 = self._safe_div((self._uni[tag] - 1), (self._uni.N() - 1)) # if c1 is the maximum value: if (c1 > c3) and (c1 > c2): tl1 += self._tri[history][tag] # if c2 is the maximum value elif (c2 > c3) and (c2 > c1): tl2 += self._tri[history][tag] # if c3 is the maximum value elif (c3 > c2) and (c3 > c1): tl3 += self._tri[history][tag] # if c3, and c2 are equal and larger than c1 elif (c3 == c2) and (c3 > c1): tl2 += float(self._tri[history][tag]) / 2.0 tl3 += float(self._tri[history][tag]) / 2.0 # if c1, and c2 are equal and larger than c3 # this might be a dumb thing to do....(not sure yet) elif (c2 == c1) and (c1 > c3): tl1 += float(self._tri[history][tag]) / 2.0 tl2 += float(self._tri[history][tag]) / 2.0 # otherwise there might be a problem # eg: all values = 0 else: #print "Problem", c1, c2 ,c3 pass # Lambda normalisation: # ensures that l1+l2+l3 = 1 self._l1 = tl1 / (tl1 + tl2 + tl3) self._l2 = tl2 / (tl1 + tl2 + tl3) self._l3 = tl3 / (tl1 + tl2 + tl3) def _safe_div(self, v1, v2): ''' Safe floating point division function, does not allow division by 0 returns -1 if the denominator is 0 ''' if v2 == 0: return -1 else: return float(v1) / float(v2) def tagdata(self, data): ''' Tags each sentence in a list of sentences :param data:list of list of words :type data: [[string,],] :return: list of list of (word, tag) tuples Invokes tag(sent) function for each sentence compiles the results into a list of tagged sentences each tagged sentence is a list of (word, tag) tuples ''' res = [] for sent in data: res1 = self.tag(sent) res.append(res1) return res def tag(self, data): ''' Tags a single sentence :param data: list of words :type data: [string,] :return: [(word, tag),] Calls recursive function '_tagword' to produce a list of tags Associates the sequence of returned tags with the correct words in the input sequence returns a list of (word, tag) tuples ''' current_state = [(['BOS', 'BOS'], 0.0)] sent = list(data) tags = self._tagword(sent, current_state) res = [] for i in range(len(sent)): # unpack and discard the C flags (t, C) = tags[i + 2] res.append((sent[i], t)) return res def _tagword(self, sent, current_states): ''' :param sent : List of words remaining in the sentence :type sent : [word,] :param current_states : List of possible tag combinations for the sentence so far, and the log probability associated with each tag combination :type current_states : [([tag, ], logprob), ] Tags the first word in the sentence and recursively tags the reminder of sentence Uses formula specified above to calculate the probability of a particular tag ''' # if this word marks the end of the sentance, # return the most probable tag if sent == []: (h, logp) = current_states[0] return h # otherwise there are more words to be tagged word = sent[0] sent = sent[1:] new_states = [] # if the Capitalisation is requested, # initalise the flag for this word C = False if self._C and word[0].isupper(): C = True # if word is known # compute the set of possible tags # and their associated log probabilities if word in self._wd.conditions(): self.known += 1 for (history, curr_sent_logprob) in current_states: logprobs = [] for t in self._wd[word].samples(): p_uni = self._uni.freq((t, C)) p_bi = self._bi[history[-1]].freq((t, C)) p_tri = self._tri[tuple(history[-2:])].freq((t, C)) p_wd = float(self._wd[word][t]) / float(self._uni[(t, C)]) p = self._l1 * p_uni + self._l2 * p_bi + self._l3 * p_tri p2 = log(p, 2) + log(p_wd, 2) logprobs.append(((t, C), p2)) # compute the result of appending each tag to this history for (tag, logprob) in logprobs: new_states.append( (history + [tag], curr_sent_logprob + logprob)) # otherwise a new word, set of possible tags is unknown else: self.unknown += 1 # since a set of possible tags, # and the probability of each specific tag # can not be returned from most classifiers: # specify that any unknown words are tagged with certainty p = 1 # if no unknown word tagger has been specified # then use the tag 'Unk' if self._unk is None: tag = ('Unk', C) # otherwise apply the unknown word tagger else: [(_w, t)] = list(self._unk.tag([word])) tag = (t, C) for (history, logprob) in current_states: history.append(tag) new_states = current_states # now have computed a set of possible new_states # sort states by log prob # set is now ordered greatest to least log probability new_states.sort(reverse=True, key=itemgetter(1)) # del everything after N (threshold) # this is the beam search cut if len(new_states) > self._N: new_states = new_states[:self._N] # compute the tags for the rest of the sentence # return the best list of tags for the sentence return self._tagword(sent, new_states)
def extract_entity_names(t): entity_names = [] if hasattr(t, 'node') and t.node: if t.node == 'NE': entity_names.append(' '.join([child[0] for child in t])) else: for child in t: entity_names.extend(extract_entity_names(child)) return entity_names #corpus_root = "samples" corpus_root = "abstracts" text = PlaintextCorpusReader(corpus_root, '.*') fd = FreqDist() for fid in text.fileids(): try: tagged_words = nltk.pos_tag(text.words(fid)) chunked_words = nltk.ne_chunk(tagged_words, binary=True) for name in extract_entity_names(chunked_words): fd.inc(name) except Exception as e: print e print fd.keys()[:50]
def default_tag(tagged_sents): tag_fd = FreqDist() for sent in tagged_sents: for word, postag in sent: tag_fd.inc(postag) return str(tag_fd.max())
from nltk.token import * from nltk.tokenizer import WhitespaceTokenizer from nltk.probability import FreqDist from nltk.draw.plot import Plot # Qual é a distribuição do tamanho das palavras no corpus? freq_dist = FreqDist() for token in corpus['SUBTOKENS']: freq_dist.inc(len(token['TEXT'])) # Desenha os resultados wordlens = freq_dist.samples() # Ordena a lista wordlens.sort() # cria uma tupla com um numero de frequencia e a sua # respectiva distribuicao # para visualizar execute o comanto print points points = [(l, freq_dist.freq(l)) for l in wordlens] Plot(points) print points # Qual é a distribuição do tamanho das palavras que terminal com # vogais? VOWELS = ('a', 'e', 'i', 'o', 'u') freq_dist = FreqDist() for token in corpus['SUBTOKENS']: if token['TEXT'][-1].lower() in VOWELS: freq_dist.inc(len(token['TEXT']))
return dict([(word, True) for word in words if word in bestwords]) eng_all = codecs.open('wordlist.txt', 'r', 'utf8') ws = set([w.rstrip().lower() for w in eng_all.readlines()]) eng_all.close() sw = set(stopwords.words('english')) word_data = defaultdict(Counter) data = defaultdict(Counter) s_data = defaultdict(list) print "initializing classifier (this takes awhile)" word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for word in movie_reviews.words(categories=['pos']): word_fd.inc(word.lower()) label_word_fd['pos'].inc(word.lower()) for word in movie_reviews.words(categories=['neg']): word_fd.inc(word.lower()) label_word_fd['neg'].inc(word.lower()) pos_word_count = label_word_fd['pos'].N() neg_word_count = label_word_fd['neg'].N() total_word_count = pos_word_count + neg_word_count word_scores = {} for word, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['pos'][word], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['neg'][word],
def word_rarity_freq(self): "Returns the frequency distribution of groups of word rarities" rarity_dist = FreqDist() for common in self.counts: if common > 500000000: rarity_dist.inc(0) elif common > 450000000: rarity_dist.inc(1) elif common > 400000000: rarity_dist.inc(2) elif common > 350000000: rarity_dist.inc(3) elif common > 300000000: rarity_dist.inc(4) elif common > 250000000: rarity_dist.inc(5) elif common > 200000000: rarity_dist.inc(6) elif common > 150000000: rarity_dist.inc(7) elif common > 100000000: rarity_dist.inc(8) elif common > 80000000: rarity_dist.inc(9) elif common > 65000000: rarity_dist.inc(10) elif common > 50000000: rarity_dist.inc(11) elif common > 30000000: rarity_dist.inc(12) elif common > 10000000: rarity_dist.inc(13) elif common > 8000000: rarity_dist.inc(14) elif common > 5500000: rarity_dist.inc(15) elif common > 3000000: rarity_dist.inc(16) elif common > 1000000: rarity_dist.inc(17) elif common > 500000: rarity_dist.inc(18) else: rarity_dist.inc(19) return rarity_dist
num_train = int(0.8 * len(tweets)) #fvecs = [(tweet_features.make_tweet_dict(t),s) for (t,s) in tweets] fvecs = [(tweet_features.get_tweet_features(t, set()), s) for (t, s) in tweets] v_train = fvecs[0:num_train] #v_train = fvecs v_test = fvecs[num_train:len(tweets)] #extract best word features word_fd = FreqDist() label_word_fd = ConditionalFreqDist() for (feats, label) in fvecs: for key in feats: if feats[key]: word_fd.inc(key) label_word_fd[label].inc(key) pos_word_count = label_word_fd['positive'].N() print pos_word_count neg_word_count = label_word_fd['negative'].N() print neg_word_count total_word_count = pos_word_count + neg_word_count feature_scores = {} for feature, freq in word_fd.iteritems(): pos_score = BigramAssocMeasures.chi_sq(label_word_fd['positive'][feature], (freq, pos_word_count), total_word_count) neg_score = BigramAssocMeasures.chi_sq(label_word_fd['negative'][feature],
chunked_sents = chunked_sents[:cutoff] print chunker.evaluate(chunked_sents), '\n' if args.trace: print 'analyzing chunker coverage of %s with %s\n' % ( args.corpus, chunker.__class__.__name__) iobs_found = FreqDist() sents = corpus.sents() if args.fraction != 1.0: cutoff = int(math.ceil(len(sents) * args.fraction)) sents = sents[:cutoff] for sent in sents: tree = chunker.parse(tagger.tag(sent)) for child in tree.subtrees(lambda t: t.node != 'S'): iobs_found.inc(child.node) iobs = iobs_found.samples() justify = max(7, *[len(iob) for iob in iobs]) print 'IOB'.center(justify) + ' Found ' print '=' * justify + ' =========' for iob in sorted(iobs): print ' '.join([iob.ljust(justify), str(iobs_found[iob]).rjust(9)]) print '=' * justify + ' ========='
class TextFeatures: parts_of_speech = [ "NN", "NNS", "NNP", "NNPS", "DT", "RB", "IN", "PRP", "CC", "CD", "VB", "VBD", "VBN", "VBG", "JJ", "EX", "FW" ] most_common_words = [ "the", "of", "and", "to", "a", "in", "for", "is" "on", "that", "by", "this", "with", "i", "you", "it", "not", "or", "be", "are", "from", "at", "as", "your", "all", "have", "new", "more", "an", "was", "we", "will", "home", "can", "us", "about", "if", "page", "my", "has", "search", "free" ] punctuation = [".", ",", "!", "?", ";", ":"] def __init__(self, text, session): self.session = session self.tokens = nltk.word_tokenize(text) self.text = text self.fdist = FreqDist() for token in self.tokens: self.fdist.inc(token.lower()) self.tagged = nltk.pos_tag(self.tokens) self.counts = self.__get_word_commonality_counts(self.text.split()) self.word_lengths = [len(word) for word in self.tokens] self.sentences = nltk.sent_tokenize(self.text) self.sentence_lengths = [len(sen.split()) for sen in self.sentences] def __get_word_commonality_counts(self, words): results = [ self.session.query(WordCount).filter_by(word=w).first() for w in words ] results = [w.count for w in results if w is not None] if len(results) == 0: return [0] return results def _word_freq_to_vector(self): dist = self.word_freq() return [dist.freq(word) for word in TextFeatures.most_common_words] def _punctuation_freq_vector(self): dist = self.word_freq() return [dist.freq(mark) for mark in TextFeatures.punctuation] def _word_length_freq_to_vector(self): dist = self.word_length_freq() return [dist.freq(length) for length in range(1, 12)] def _POS_freq_to_vector(self): dist = self.POS_freq() return [dist.freq(pos) for pos in TextFeatures.parts_of_speech] def _POS_cond_freq_to_vector(self): dist = self.POS_cond_freq() freq_vector = [] for pos0 in TextFeatures.parts_of_speech: for pos1 in TextFeatures.parts_of_speech: freq_vector.append(dist[pos0].freq(pos1)) return freq_vector def _word_rarity_freq_to_vector(self): dist = self.word_rarity_freq() return [dist.freq(i) for i in range(20)] def to_vector(self): return ([ self.avg_word_length(), self.std_dev_word_length(), float(self.max_word_length()), float(self.max_sentence_length()), float(self.min_sentence_length()), self.avg_sentence_length(), self.std_sentence_length(), float(self.avg_word_commonality()), float(self.std_word_commonality()), self.unique_word_freq() ] + self._word_rarity_freq_to_vector() + self._word_freq_to_vector() + self._punctuation_freq_vector() + self._word_length_freq_to_vector() + self._POS_freq_to_vector() #self._POS_cond_freq_to_vector() ) def word_freq(self): return self.fdist def word_length_freq(self): return FreqDist(len(word) for word in self.tokens) def POS_freq(self): "Returns the frequency distribution of parts of speech" pos_dist = FreqDist() for pos_pair in self.tagged: pos_dist.inc(pos_pair[1]) return pos_dist def POS_cond_freq(self): "Returns the conditional frequency distribution of parts of speech" cond_dist = ConditionalFreqDist() pos = [word_pos[1] for word_pos in self.tagged] [cond_dist[pair[0]].inc(pair[1]) for pair in pairwise(pos)] return cond_dist def word_rarity_freq(self): "Returns the frequency distribution of groups of word rarities" rarity_dist = FreqDist() for common in self.counts: if common > 500000000: rarity_dist.inc(0) elif common > 450000000: rarity_dist.inc(1) elif common > 400000000: rarity_dist.inc(2) elif common > 350000000: rarity_dist.inc(3) elif common > 300000000: rarity_dist.inc(4) elif common > 250000000: rarity_dist.inc(5) elif common > 200000000: rarity_dist.inc(6) elif common > 150000000: rarity_dist.inc(7) elif common > 100000000: rarity_dist.inc(8) elif common > 80000000: rarity_dist.inc(9) elif common > 65000000: rarity_dist.inc(10) elif common > 50000000: rarity_dist.inc(11) elif common > 30000000: rarity_dist.inc(12) elif common > 10000000: rarity_dist.inc(13) elif common > 8000000: rarity_dist.inc(14) elif common > 5500000: rarity_dist.inc(15) elif common > 3000000: rarity_dist.inc(16) elif common > 1000000: rarity_dist.inc(17) elif common > 500000: rarity_dist.inc(18) else: rarity_dist.inc(19) return rarity_dist def avg_word_length(self): return numpy.average(self.word_lengths) def std_dev_word_length(self): return numpy.std(self.word_lengths) def max_word_length(self): return max(self.word_lengths) def unique_word_freq(self): return float(self.fdist.B()) / self.fdist.N() def max_sentence_length(self): return max(self.sentence_lengths) def min_sentence_length(self): return min(self.sentence_lengths) def avg_sentence_length(self): return numpy.average(self.sentence_lengths) def std_sentence_length(self): return numpy.std(self.sentence_lengths) def avg_word_commonality(self): return numpy.average(self.counts) def std_word_commonality(self): return numpy.std(self.counts)
from nltk.token import * from nltk.tokenizer import WhitespaceTokenizer from nltk.probability import FreqDist from nltk.draw.plot import Plot freq_dist = FreqDist() corpus = Token(TEXT=open('dados/may2001_pdf.torto').read()) print corpus WhitespaceTokenizer().tokenize(corpus) print corpus for token in corpus['SUBTOKENS']: freq_dist.inc(token['TEXT']) # Quantas vezes a palavra form aparece no corpus? freq_dist.count('form') # Qual é a freqüência da palavra form? freq_dist.freq('form') # Quantas palavras foram contadas? freq_dist.N() # Quais foram os tipos de palavras encontradas? freq_dist.samples() # Qual é a palavra mais comum? freq_dist.max()
def POS_freq(self): "Returns the frequency distribution of parts of speech" pos_dist = FreqDist() for pos_pair in self.tagged: pos_dist.inc(pos_pair[1]) return pos_dist
from nltk.token import * from nltk.tokenizer import WhitespaceTokenizer from nltk.probability import FreqDist from nltk.draw.plot import Plot freq_dist = FreqDist() corpus = Token(TEXT=open('dados/may2001_pdf.torto').read()) print corpus WhitespaceTokenizer().tokenize(corpus) print corpus for token in corpus['SUBTOKENS']: freq_dist.inc(token['TEXT']) # How many times did "the" occur? freq_dist.count('the') # What was the frequency of the word "the"? freq_dist.freq('the') # How many word tokens were counted? freq_dist.N() # What word types were encountered? freq_dist.samples() # What was the most common word? freq_dist.max() # What is the distribution of word lengths in a corpus? freq_dist = FreqDist()
def extract_features(sentence, target): features = FreqDist() # convert to 0-indexed sentence = [t.zero_indexed() for t in sentence] # features for each word in the sentence for token in sentence: features.inc(u"sTP:%s" % INTRA_TOKEN_JOIN((token.form, token.cpostag))) features.inc(u"sLP:%s" % INTRA_TOKEN_JOIN((token.lemma, token.cpostag))) # features for each word in the target for token in target: features.inc(u"tTP:%s" % INTRA_TOKEN_JOIN((token.form, token.cpostag))) features.inc(u"tLP:%s" % INTRA_TOKEN_JOIN((token.lemma, token.cpostag))) # syntactic features head = get_heuristic_head(target) children = get_children(sentence, head) subcat = [child.deprel.upper() for child in children] # unordered set of arc labels of children features.inc(u"d:%s" % TOKEN_JOIN(sorted(set(subcat)))) # ordered list of arc labels of children if head.cpostag == "V": # TODO(smt): why exclude "sub"? subcat = [deprel for deprel in subcat if deprel != "SUB" and deprel != "P" and deprel != "CC"] features.inc(u"sC:%s" % TOKEN_JOIN(subcat)) if head.head < len(sentence): parent = sentence[head.head] features.inc(u"pP:%s" % parent.cpostag.upper()) else: features.inc(u"pP:NULL") features.inc(u"pL:%s" % head.deprel.upper()) return features
def choose_tag(self, tokens, index, history): word = tokens[index] fd = FreqDist() for synset in wordnet.synsets(word): fd.inc(synset.pos) return self.wordnet_tag_map.get(fd.max())
""" Throwaway script that processes the olac classification results on June 29th to see how many iso codes were identified for each record and for how many records were that number of iso codes identified. """ from operator import itemgetter from nltk.probability import FreqDist fd = FreqDist() results_file = open('olac_iso_identification_results').readlines() # this took like 15 minutes to get. # not too bad, considering it's like all of olac. num_records = len(results_file)+0.0 for line in results_file: record = line.strip().split('\t') iso_list = record[-1].split() fd.inc(len(iso_list)) print "num\tfreq\tpercentage of records" for num, freq in sorted(fd.items(), key=itemgetter(1), reverse=True): print str(num)+'\t'+str(freq)+'\t'+str(freq/num_records) print '' print 'Number of records: '+str(num_records)
if args.trace: print 'loading %s' % args.corpus ############## ## counting ## ############## wc = 0 tag_counts = FreqDist() iob_counts = FreqDist() tag_iob_counts = ConditionalFreqDist() word_set = set() for obj in chunked_corpus.chunked_words(): if isinstance(obj, Tree): iob_counts.inc(obj.node) for word, tag in obj.leaves(): wc += 1 word_set.add(word) tag_counts.inc(tag) tag_iob_counts[tag].inc(obj.node) else: word, tag = obj wc += 1 word_set.add(word) tag_counts.inc(tag) ############ ## output ## ############