def train():
  positive_tweets = read_tweets('/root/295/new/positive.txt', 'positive')
  negative_tweets = read_tweets('/root/295/new/negative.txt', 'negative')
  print len(positive_tweets)
  print len(negative_tweets)

  #pos_train = positive_tweets[:2000]
  #neg_train = negative_tweets[:2000]
  #pos_test = positive_tweets[2001:3000]
  #neg_test = negative_tweets[2001:3000]
  pos_train = positive_tweets[:len(positive_tweets)*80/100]
  neg_train = negative_tweets[:len(negative_tweets)*80/100]
  pos_test = positive_tweets[len(positive_tweets)*80/100+1:]
  neg_test = negative_tweets[len(positive_tweets)*80/100+1:]

  training_data = pos_train + neg_train
  test_data = pos_test + neg_test

  sentim_analyzer = SentimentAnalyzer()
  all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_data])
  #print all_words_neg
  unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
  #print unigram_feats
  print len(unigram_feats)
  sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
  training_set = sentim_analyzer.apply_features(training_data)
  test_set = sentim_analyzer.apply_features(test_data)
  print test_set  
  trainer = NaiveBayesClassifier.train
  classifier = sentim_analyzer.train(trainer, training_set)
  for key,value in sorted(sentim_analyzer.evaluate(test_set).items()):
    print('{0}: {1}'.format(key, value))
  print sentim_analyzer.classify(tokenize_sentance('I hate driving car at night'))
  
  return sentim_analyzer
Beispiel #2
0
    def sentiment_analysis(self, testing_data, training_data=None):
        if training_data is None:
            training_data = self.training_data
            ## Apply sentiment analysis to data to extract new "features"

            # Initialize sentiment analyzer object
        sentiment_analyzer = SentimentAnalyzer()

        # Mark all negative words in training data, using existing list of negative words
        all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training_data])

        unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4)
        len(unigram_features)
        sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features)

        training_final = sentiment_analyzer.apply_features(training_data)
        testing_final = sentiment_analyzer.apply_features(testing_data)

        ## Traing model and test

        model = NaiveBayesClassifier.train
        classifer = sentiment_analyzer.train(model, training_final)

        for key, value in sorted(sentiment_analyzer.evaluate(testing_final).items()):
            print ("{0}: {1}".format(key, value))
Beispiel #3
0
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None):
    """
    Train and test a classifier on instances of the Subjective Dataset by Pang and
    Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
    All tokens (words and punctuation marks) are separated by a whitespace, so
    we use the basic WhitespaceTokenizer to parse the data.

    :param trainer: `train` method of a classifier.
    :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
    :param n_instances: the number of total sentences that have to be used for
        training and testing. Sentences will be equally split between positive
        and negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.sentiment import SentimentAnalyzer
    from nltk.corpus import subjectivity

    if n_instances is not None:
        n_instances = int(n_instances/2)

    subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
    obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]

    # We separately split subjective and objective instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_subj_docs, test_subj_docs = split_train_test(subj_docs)
    train_obj_docs, test_obj_docs = split_train_test(obj_docs)

    training_docs = train_subj_docs+train_obj_docs
    testing_docs = test_subj_docs+test_obj_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])

    # Add simple unigram word features handling negation
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)

    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print('Your classifier does not provide a show_most_informative_features() method.')
    results = sentim_analyzer.evaluate(test_set)

    if save_analyzer == True:
        save_file(sentim_analyzer, 'sa_subjectivity.pickle')

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__,
                        Tokenizer='WhitespaceTokenizer', Feats=extr,
                        Instances=n_instances, Results=results)

    return sentim_analyzer
Beispiel #4
0
def demo_movie_reviews(trainer, n_instances=None, output=None):
    """
    Train classifier on all instances of the Movie Reviews dataset.
    The corpus has been preprocessed using the default sentence tokenizer and
    WordPunctTokenizer.
    Features are composed of:
        - most frequent unigrams

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total reviews that have to be used for
        training and testing. Reviews will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.corpus import movie_reviews
    from nltk.sentiment import SentimentAnalyzer

    if n_instances is not None:
        n_instances = int(n_instances/2)

    pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]]
    neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]]
    # We separately split positive and negative instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_docs = train_pos_docs+train_neg_docs
    testing_docs = test_pos_docs+test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words = sentim_analyzer.all_words(training_docs)

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print('Your classifier does not provide a show_most_informative_features() method.')
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__,
                        Tokenizer='WordPunctTokenizer', Feats=extr, Results=results,
                        Instances=n_instances)
Beispiel #5
0
def demo_tweets(trainer, n_instances=None, output=None):
    """
    Train and test Naive Bayes classifier on 10000 tweets, tokenized using
    TweetTokenizer.
    Features are composed of:
        - 1000 most frequent unigrams
        - 100 top bigrams (using BigramAssocMeasures.pmi)

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total tweets that have to be used for
        training and testing. Tweets will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.tokenize import TweetTokenizer
    from nltk.sentiment import SentimentAnalyzer
    from nltk.corpus import twitter_samples, stopwords

    # Different customizations for the TweetTokenizer
    tokenizer = TweetTokenizer(preserve_case=False)
    # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True)
    # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True)

    if n_instances is not None:
        n_instances = int(n_instances/2)

    fields = ['id', 'text']
    positive_json = twitter_samples.abspath("positive_tweets.json")
    positive_csv = 'positive_tweets.csv'
    json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances)

    negative_json = twitter_samples.abspath("negative_tweets.json")
    negative_csv = 'negative_tweets.csv'
    json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances)

    neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer)
    pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer)

    # We separately split subjective and objective instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_tweets = train_pos_docs+train_neg_docs
    testing_tweets = test_pos_docs+test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    # stopwords = stopwords.words('english')
    # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords]
    all_words = [word for word in sentim_analyzer.all_words(training_tweets)]

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)

    # Add bigram collocation features
    bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets],
        top_n=100, min_freq=12)
    sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats)

    training_set = sentim_analyzer.apply_features(training_tweets)
    test_set = sentim_analyzer.apply_features(testing_tweets)

    classifier = sentim_analyzer.train(trainer, training_set)
    # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print('Your classifier does not provide a show_most_informative_features() method.')
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__,
                        Tokenizer=tokenizer.__class__.__name__, Feats=extr,
                        Results=results, Instances=n_instances)
train_docs = train[:3359]
test_docs = train[3359:]


top_ns = [10, 20, 50, 100, 200, 300]
min_freqq = 4
for top_nn in top_ns:

    sentim_analyzer = SentimentAnalyzer()
    all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in train_docs])
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, top_n=top_nn, min_freq=min_freqq)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
    training_set = sentim_analyzer.apply_features(train_docs)
    testing_set = sentim_analyzer.apply_features(test_docs)

    trainer = MaxentClassifier.train
    classifierme = sentim_analyzer.train(trainer, training_set)

    f = open('results/maxent/noemoticons/maxent_top_n_' + str(top_nn) + '_min_freq_' + str(min_freqq) + '-noemoticons.txt', 'w')
    for key, value in sorted(sentim_analyzer.evaluate(testing_set, classifier=classifierme).items()):
        print('{0}: {1}'.format(key, value))
        f.write('{0}: {1}'.format(key, value))
    f.close()

#f = open('maxent_trained_with_80_percent_2.pickle', 'wb')
#pickle.dump(classifierme, f)
#f.close()


Beispiel #7
0
# Now aggregate the training and test sets

training = training_subjective + training_objective
test = test_subjective + test_objective

## Apply sentiment analysis to data to extract new "features"

# Initialize sentiment analyzer object

sentiment_analyzer = SentimentAnalyzer()

# Mark all negative words in training data, using existing list of negative words

all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training])

unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4)
len(unigram_features)
sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features)

training_final = sentiment_analyzer.apply_features(training)
test_final = sentiment_analyzer.apply_features(test)

## Traing model and test

model = NaiveBayesClassifier.train
classifer = sentiment_analyzer.train(model, training_final)

for key, value in sorted(sentiment_analyzer.evaluate(test_final).items()):
    print("{0}: {1}".format(key, value))
Beispiel #8
0
def demo_movie_reviews(trainer, n_instances=None, output=None):
    """
    Train classifier on all instances of the Movie Reviews dataset.
    The corpus has been preprocessed using the default sentence tokenizer and
    WordPunctTokenizer.
    Features are composed of:
        - most frequent unigrams

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total reviews that have to be used for
        training and testing. Reviews will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.corpus import movie_reviews
    from nltk.sentiment import SentimentAnalyzer

    if n_instances is not None:
        n_instances = int(n_instances / 2)

    pos_docs = [(list(movie_reviews.words(pos_id)), 'pos')
                for pos_id in movie_reviews.fileids('pos')[:n_instances]]
    neg_docs = [(list(movie_reviews.words(neg_id)), 'neg')
                for neg_id in movie_reviews.fileids('neg')[:n_instances]]
    # We separately split positive and negative instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_docs = train_pos_docs + train_neg_docs
    testing_docs = test_pos_docs + test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words = sentim_analyzer.all_words(training_docs)

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats,
                                       unigrams=unigram_feats)
    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print(
            'Your classifier does not provide a show_most_informative_features() method.'
        )
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(
            output,
            Dataset='Movie_reviews',
            Classifier=type(classifier).__name__,
            Tokenizer='WordPunctTokenizer',
            Feats=extr,
            Results=results,
            Instances=n_instances,
        )
Beispiel #9
0
def demo_tweets(trainer, n_instances=None, output=None):
    """
    Train and test Naive Bayes classifier on 10000 tweets, tokenized using
    TweetTokenizer.
    Features are composed of:
        - 1000 most frequent unigrams
        - 100 top bigrams (using BigramAssocMeasures.pmi)

    :param trainer: `train` method of a classifier.
    :param n_instances: the number of total tweets that have to be used for
        training and testing. Tweets will be equally split between positive and
        negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.tokenize import TweetTokenizer
    from nltk.sentiment import SentimentAnalyzer
    from nltk.corpus import twitter_samples, stopwords

    # Different customizations for the TweetTokenizer
    tokenizer = TweetTokenizer(preserve_case=False)
    # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True)
    # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True)

    if n_instances is not None:
        n_instances = int(n_instances / 2)

    fields = ['id', 'text']
    positive_json = twitter_samples.abspath("positive_tweets.json")
    positive_csv = 'positive_tweets.csv'
    json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances)

    negative_json = twitter_samples.abspath("negative_tweets.json")
    negative_csv = 'negative_tweets.csv'
    json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances)

    neg_docs = parse_tweets_set(negative_csv,
                                label='neg',
                                word_tokenizer=tokenizer)
    pos_docs = parse_tweets_set(positive_csv,
                                label='pos',
                                word_tokenizer=tokenizer)

    # We separately split subjective and objective instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_pos_docs, test_pos_docs = split_train_test(pos_docs)
    train_neg_docs, test_neg_docs = split_train_test(neg_docs)

    training_tweets = train_pos_docs + train_neg_docs
    testing_tweets = test_pos_docs + test_neg_docs

    sentim_analyzer = SentimentAnalyzer()
    # stopwords = stopwords.words('english')
    # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords]
    all_words = [word for word in sentim_analyzer.all_words(training_tweets)]

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats,
                                       unigrams=unigram_feats)

    # Add bigram collocation features
    bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats(
        [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12)
    sentim_analyzer.add_feat_extractor(extract_bigram_feats,
                                       bigrams=bigram_collocs_feats)

    training_set = sentim_analyzer.apply_features(training_tweets)
    test_set = sentim_analyzer.apply_features(testing_tweets)

    classifier = sentim_analyzer.train(trainer, training_set)
    # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print(
            'Your classifier does not provide a show_most_informative_features() method.'
        )
    results = sentim_analyzer.evaluate(test_set)

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(
            output,
            Dataset='labeled_tweets',
            Classifier=type(classifier).__name__,
            Tokenizer=tokenizer.__class__.__name__,
            Feats=extr,
            Results=results,
            Instances=n_instances,
        )
Beispiel #10
0
    all_words_neg = sentim_analyzer.all_words(
        [mark_negation(doc) for doc in training_docs])

    unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg,
                                                       min_freq=4)
    len(unigram_feats)

    sentim_analyzer.add_feat_extractor(extract_unigram_feats,
                                       unigrams=unigram_feats)

    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    trainer = NaiveBayesClassifier.train
    classifier = sentim_analyzer.train(trainer, training_set)

    for key, value in sorted(sentim_analyzer.evaluate(test_set).items()):
        print('{0}: {1}'.format(key, value))

    sid = SentimentIntensityAnalyzer()

    auth = OAuthHandler(ckey, csecret)
    auth.set_access_token(atoken, asecret)
    twitterStream = Stream(auth, listener())
    #twitterStream.filter(track=["googl", "google", "goog"])
    twitterStream.filter(track=["msft", "microsoft", "windows"])
    #ply.show()

except Exception as e:
    print(e)
                                   labeled=False)

# Build the test set
_test_X = analyzer.apply_features([
    mark_negation(word_tokenize(unidecode(clean_text(instance))))
    for instance in test_X
],
                                  labeled=False)

print "Vader Classifier:"

print vader.polarity_scores(train_X[0])
print vader_polarity(train_X[0]), train_y[0]  # 0 1
print vader_polarity(train_X[1]), train_y[1]  # 0 0
print vader_polarity(train_X[2]), train_y[2]  # 1 1
print vader_polarity(train_X[3]), train_y[3]  # 0 1
print vader_polarity(train_X[4]), train_y[4]  # 0 0

pred_y = [vader_polarity(text) for text in test_X]
print "Vader Accuracy:", accuracy_score(test_y, pred_y)  # 0.6892
print "Vader Precision:", precision_score(test_y, pred_y, average='binary')
print "Vader Recall:", recall_score(test_y, pred_y, average='binary')

trainer = NaiveBayesClassifier.train
classifier = analyzer.train(trainer, zip(_train_X, train_y[:TRAINING_COUNT]))
score = analyzer.evaluate(zip(_test_X, test_y))
print score
print "NB Accuracy: ", score['Accuracy']  # 0.8064 for TRAINING_COUNT=5000
classifyed = NaiveBayesClassifier.classify(_test_X)
print classifyed
class SuicideClassifier(object):

    def __init__(self, sentiment_only, num_phrases_to_track=20):
        # neg_phrases = filter_negative_phrases(load_csv_sentences('thoughtsandfeelings.csv'))
        # pos_phrases = filter_positive_phrases(load_csv_sentences('spiritualforums.csv'))
        # file_pos = open("pos_phrases.txt", 'w')
        # file_neg = open("neg_phrases.txt", 'w')

        # for item in pos_phrases:
        #     print>>file_pos, item
        # for item in neg_phrases:
        #     print>>file_neg, item
        self.recent_sentiment_scores = []

        neg_file = open("ALL_neg_phrases_filtered.txt", "r")
        pos_file = open("webtext_phrases_with_lots_of_words.txt", "r")
        neg_phrases = neg_file.readlines()
        pos_phrases = pos_file.readlines()

        neg_docs = []
        pos_docs = []
        for phrase in neg_phrases:
            neg_docs.append((phrase.split(), 'suicidal'))
        for phrase in pos_phrases[:len(neg_phrases)]:
            pos_docs.append((phrase.split(), 'alright'))

        print len(neg_docs)
        print len(pos_docs)
        # negcutoff = len(neg_docs) * 3 / 4
        # poscutoff = len(pos_docs) * 3 / 4
        negcutoff = -200
        poscutoff = -200

        train_pos_docs = pos_docs[:poscutoff]
        test_pos_docs = pos_docs[poscutoff:]
        train_neg_docs = neg_docs[:negcutoff]
        test_neg_docs = neg_docs[negcutoff:]
        training_docs = train_pos_docs + train_neg_docs
        testing_docs = test_pos_docs + test_neg_docs

        self.sentim_analyzer = SentimentAnalyzer()

        if not sentiment_only:
            all_words = self.sentim_analyzer.all_words([doc for doc in training_docs])
            unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words, min_freq=1)
            self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)

        self.sentim_analyzer.add_feat_extractor(vader_sentiment_feat)

        # bigram_feats = self.sentim_analyzer.bigram_collocation_feats(all_words, min_freq=1)
        # self.sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats)

        training_set = self.sentim_analyzer.apply_features(training_docs)
        test_set = self.sentim_analyzer.apply_features(testing_docs)
        trainer = NaiveBayesClassifier.train
        self.classifier = self.sentim_analyzer.train(trainer, training_set)
        for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()):
            print('{0}: {1}'.format(key, value))
        self.classifier.show_most_informative_features(20)

    def test(self, phrase):
        return self.sentim_analyzer.classify(phrase.split())

    def update_sentiments(self, value):
        now = datetime.datetime.now()
        self.recent_sentiment_scores.append([now, value])
        self.recent_sentiment_scores = [x for x in self.recent_sentiment_scores if x[
            0] > now - datetime.timedelta(seconds=60)]
        print sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores)
        return sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores)
model.compile(optimizer='adam',
              loss='binary_crossentropy',
              metrics=['accuracy'])

history = model.fit(X_train, y_train,
                    epochs=10,
                    verbose=False,
                    batch_size=10,
                    validation_split=0.1)


class SentimentIntensityAnalyzerWrapper(ClassifierI):
    def __init__(self, keras_model):
        self.sid = SentimentIntensityAnalyzer()
        self.keras_model = keras_model

    def classify_many(self, featuresets):
        results = []
        for features in featuresets:
            sentiment_scores = self.keras_model.predict_classes(numpy.array([features]))
            results.append(sentiment_scores[0][0])
        return results


siaw = SentimentIntensityAnalyzerWrapper(model)
sentim_analyzer = SentimentAnalyzer()

for key, value in sorted(sentim_analyzer.evaluate([(x, y) for (x, y) in zip(X_test, y_test)], classifier=siaw).items()):
    print('{0}: {1}'.format(key, value))

plot_history(history)
Beispiel #14
0
def train_classifier(classifier, num_of_tweets, gram, lang, lemmas):
    sentim_analyzer = SentimentAnalyzer()

    print("num_of_tweets, gram, lang, lemmas_bool:")
    print(num_of_tweets, gram, lang, lemmas)

    training = []
    testing = []
    if lang == "rus":
        training = get_train_test("train.csv")
        testing = get_train_test("test.csv")
    if lang == "ger":
        training = get_train_test("train_de.csv")
        testing = get_train_test("test_de.csv")

    data = training + testing

    def removeStopWords(item):
        item[0] = delete_stop_words(lang, item[0])
        return item

    data_neg = []
    data_pos = []
    for i in data:
        if i[1] == 'neg':
            data_neg.append(i)
        if i[1] == 'pos':
            data_pos.append(i)

    data_even = []
    for i in range(len(data_neg)):
        data_even.append(data_neg[i])
        data_even.append(data_pos[i])

    training_data = data_even[:num_of_tweets]

    dict_1 = {}
    dict_1["Accuracy"] = 0
    dict_1["Precision [pos]"] = 0
    dict_1["Recall [pos]"] = 0
    dict_1["F-measure [pos]"] = 0
    dict_1["Precision [neg]"] = 0
    dict_1["Recall [neg]"] = 0
    dict_1["F-measure [neg]"] = 0
    vocab = 0
    unigram = 0
    bigram = 0

    for i in range(5):

        test = training_data[int(len(training_data) / 5) * i:int((len(training_data) / 5)) * (i + 1)]
        train = training_data[:int(len(training_data) / 5) * i] + training_data[
                                                                  int((len(training_data) / 5)) * (i + 1):len(
                                                                      training_data)]

        train = list(map(removeStopWords, train))
        test = list(map(removeStopWords, test))

        # print(train)

        shuffle(train)
        shuffle(test)

        print("len(train+test):")
        print(len(train) + len(test))

        print("train: pos, neg:")
        print(count_tags(train))

        print("test: pos, neg:")
        print(count_tags(test))

        vocabulary = sentim_analyzer.all_words(tokenize_set(train, lemmas, lang))
        print("vocab len:")
        print(len(vocabulary))
        vocab += len(vocabulary)
        # print("vocabulary[0]:")
        # print(vocabulary[0])

        if gram == "unigram":
            unigram_features = sentim_analyzer.unigram_word_feats(vocabulary)
            print("unigram feats len:")
            print(len(unigram_features))
            unigram += len(unigram_features)
            # print("unigram_features[0]:")
            # print(unigram_features[0])

            sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features)
        if gram == "bigram":
            bigram_features = sentim_analyzer.bigram_collocation_feats(tokenize_set(train, lemmas, lang))

            print("bigram feats len:")
            print(len(bigram_features))
            bigram += len(bigram_features)
            # print("bigram_features[0]:")
            # print(bigram_features[0])
            # print("bigram_features[5]:")
            # print(bigram_features[5])
            sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_features)

        _train_X = sentim_analyzer.apply_features(tokenize_set(train, lemmas, lang), labeled=False)
        _train_Y = get_y(train)

        _test_X = sentim_analyzer.apply_features(tokenize_set(test, lemmas, lang), labeled=False)
        _test_Y = get_y(test)
        sentim_analyzer.train(classifier.train, list(zip(_train_X, _train_Y)))
        dict = sentim_analyzer.evaluate(list(zip(_test_X, _test_Y)))
        print(dict)
        dict_1["Accuracy"] += dict.get('Accuracy')
        dict_1["Precision [pos]"] += dict.get('Precision [pos]')
        dict_1["Recall [pos]"] += dict.get('Recall [pos]')
        dict_1["F-measure [pos]"] += dict.get('F-measure [pos]')
        dict_1["Precision [neg]"] += dict.get('Precision [neg]')
        dict_1["Recall [neg]"] += dict.get('Recall [neg]')
        dict_1["F-measure [neg]"] += dict.get('F-measure [neg]')

    print("Accuracy:")
    print(dict_1.get('Accuracy') / 5)
    print("Precision [pos]:")
    print(dict_1.get('Precision [pos]') / 5)
    print("Precision [neg]:")
    print(dict_1.get('Precision [neg]') / 5)
    print("F-measure [pos]:")
    print(dict_1.get('F-measure [pos]') / 5)
    print("F-measure [neg]:")
    print(dict_1.get('F-measure [neg]') / 5)
    print("Recall [pos]:")
    print(dict_1.get('Recall [pos]') / 5)
    print("Recall [neg]:")
    print(dict_1.get('Recall [neg]') / 5)
    print("vocab length: ")
    print(vocab / 5)
    if gram == "bigram":
        print("bigram features:")
        print(bigram / 5)
    if gram == "unigram":
        print("unigram features:")
        print(unigram / 5)
Beispiel #15
0
#print(vocabulary)

print("Computing Unigran Features ...")
unigram_features = analyzer.unigram_word_feats(vocabulary, min_freq=10)
print("Unigram Features: ", len(unigram_features))  # 8237
#print(unigram_features)
'''Document missing'''

analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features)

# Build the training set
_train_X = analyzer.apply_features([
    mark_negation(word_tokenize(unidecode(clean_text(instance))))
    for instance in train_X[:TRAINING_COUNT]
],
                                   labeled=False)

# Build the test set
_test_X = analyzer.apply_features([
    mark_negation(word_tokenize(unidecode(clean_text(instance))))
    for instance in test_X
],
                                  labeled=False)

trainer = NaiveBayesClassifier.train
classifier = analyzer.train(trainer, zip(_train_X, train_y[:TRAINING_COUNT]))
print(_train_X)
print(_test_X)
score = analyzer.evaluate(zip(_test_X, test_y), classifier, accuracy=True)
print("Accuracy: ", score['Accuracy'])  # 0.8064 for TRAINING_COUNT=5000
def train_sentiment_analyzer_subjectivity(n_instances=None):
    if n_instances is not None:
        n_instances = int(n_instances / 2)

    # NLTK's integrated  and subjectivity dataset for the subj training
    subj_docs = [
        (sent, 'subj')
        for sent in subjectivity.sents(categories='subj')[:n_instances]
    ]
    obj_docs = [(sent, 'obj')
                for sent in subjectivity.sents(categories='obj')[:n_instances]]

    # We separately split positive and negative instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_subj_docs, test_subj_docs = split_train_test(subj_docs)
    train_obj_docs, test_obj_docs = split_train_test(obj_docs)

    training_docs = train_subj_docs + train_obj_docs
    testing_docs = test_subj_docs + test_obj_docs

    sentim_analyzer = SentimentAnalyzer()

    all_words = sentim_analyzer.all_words(
        [mark_negation(doc) for doc in training_docs])

    stopwords_english = stopwords.words('english')
    punctuation = list(string.punctuation)
    punctuation.append("''")
    punctuation.append("``")
    punctuation.append("—")
    punctuation.append("…")
    punctuation.append("...")
    punctuation.append("--")
    punctuation.append("..")
    stopwords_english.extend(punctuation)
    all_words_clean = []
    for word in all_words:
        if word not in stopwords_english and word not in string.digits:
            all_words_clean.append(word)

    # Add simple unigram word features
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words_clean,
                                                       min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats,
                                       unigrams=unigram_feats)

    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    testing_set = sentim_analyzer.apply_features(testing_docs)

    trainer = NaiveBayesClassifier.train
    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        message = "Your classifier does not provide a show_most_informative_features() method."
        print(message)
        read_write.log_message(message)
        sentim_analyzer.evaluate(testing_set)
    classifier_accuracy_percent = (classify.accuracy(classifier,
                                                     testing_set)) * 100
    message_acc = 'Accuracy of classifier = ' + str(
        classifier_accuracy_percent) + '%'
    print(message_acc)
    read_write.log_message("[INFO]" + LOG_NAME + message_acc)

    save_file(sentim_analyzer, 'files/sa_subjectivity.pickle')
    message = "sa_subjectivity.pickle file saved."
    print(message)
    read_write.log_message(message)
def main():
    global tagger

    if constants.corpus == constants.Corpus.movie_review:
        neg_docs, pos_docs = get_movie_corpus()
    if constants.corpus == constants.Corpus.pol_debates:
        neg_docs, pos_docs = get_political_debates()

    if constants.mark_negation:
        neg_docs = [nltk.sentiment.util.mark_negation(doc) for doc in neg_docs]
        pos_docs = [nltk.sentiment.util.mark_negation(doc) for doc in pos_docs]

    # Split betweeen the training set and the testing set
    num_train_neg = int(3 / 4 * len(neg_docs))
    num_test_neg = len(neg_docs) - num_train_neg
    num_train_pos = int(3 / 4 * len(pos_docs))
    num_test_pos = len(pos_docs) - num_train_pos

    train_neg, test_neg = sklearn.cross_validation.train_test_split(
        neg_docs, train_size=num_train_neg, test_size=num_test_neg)
    train_pos, test_pos = sklearn.cross_validation.train_test_split(
        pos_docs, train_size=num_train_pos, test_size=num_test_pos)

    # Make the final train set and test set
    train_docs = train_pos + train_neg
    test_docs = test_pos + test_neg

    # Set up the Sentiment Analyzer
    analyzer = SentimentAnalyzer()

    if constants.feature_extractor == constants.FeatureExtractor.bag_of_words:
        analyzer.add_feat_extractor(extract_bag_of_words_feats)
    if constants.feature_extractor == constants.FeatureExtractor.freq_dist:
        analyzer.add_feat_extractor(extract_freq_dist)
    elif constants.feature_extractor == constants.FeatureExtractor.unigram:
        all_words = analyzer.all_words(train_docs, labeled=True)
        unigram_features = analyzer.unigram_word_feats(all_words,
                                                       min_freq=1000)
        print("Length of unigram features: %d" % len(unigram_features))
        analyzer.add_feat_extractor(nltk.sentiment.util.extract_unigram_feats,
                                    unigrams=unigram_features)
    elif constants.feature_extractor == constants.FeatureExtractor.bigram_bag_of_words:
        analyzer.add_feat_extractor(extract_sig_bigram_feats)
    elif constants.feature_extractor == constants.FeatureExtractor.adjective_bag_of_words:
        tagger = nltk.tag.HunposTagger(constants.hunpos_english_model)
        analyzer.add_feat_extractor(adjective_bag_of_words)
    elif constants.feature_extractor == constants.FeatureExtractor.pos_bag_of_words:
        tagger = nltk.tag.HunposTagger(constants.hunpos_english_model)
        analyzer.add_feat_extractor(adjective_bag_of_words)

    train_feat = list(analyzer.apply_features(train_docs, labeled=True))
    test_feat = list(analyzer.apply_features(test_docs, labeled=True))

    print('train on %d instances, test on %d instances' %
          (len(train_feat), len(test_feat)))

    if constants.classifier == constants.Classifier.naive_bays:
        classifier = NaiveBayesClassifier.train(train_feat)
        analyzer.evaluate(test_feat,
                          classifier,
                          accuracy=True,
                          f_measure=True,
                          precision=True,
                          recall=True,
                          verbose=True)
        classifier.show_most_informative_features()
    # elif constants.classifier == constants.Classifier.maxent:
    #     classifier = MaxentClassifier.train(train_feat)
    #     analyzer.evaluate(test_feat, classifier, accuracy=True, f_measure=True, precision=True, recall=True,
    #                       verbose=True)
    #     classifier.show_most_informative_features()
    elif constants.classifier == constants.Classifier.decision_tree:
        classifier = SklearnClassifier(
            DecisionTreeClassifier()).train(train_feat)
        analyzer.evaluate(test_feat,
                          classifier,
                          accuracy=True,
                          f_measure=True,
                          precision=True,
                          recall=True,
                          verbose=True)
    elif constants.classifier == constants.Classifier.linear_svm:
        classifier = SklearnClassifier(LinearSVC()).train(train_feat)
        analyzer.evaluate(test_feat,
                          classifier,
                          accuracy=True,
                          f_measure=True,
                          precision=True,
                          recall=True,
                          verbose=True)
    elif constants.classifier == constants.Classifier.random_forest:
        classifier = SklearnClassifier(
            RandomForestClassifier()).train(train_feat)
        analyzer.evaluate(test_feat,
                          classifier,
                          accuracy=True,
                          f_measure=True,
                          precision=True,
                          recall=True,
                          verbose=True)
    elif constants.classifier == constants.Classifier.logistic:
        classifier = SklearnClassifier(LogisticRegression()).train(train_feat)
        analyzer.evaluate(test_feat,
                          classifier,
                          accuracy=True,
                          f_measure=True,
                          precision=True,
                          recall=True,
                          verbose=True)
Beispiel #18
0
        'Average Recall': recall_avg
    })
    print(results)


data_dev = randomOversampler(data_dev)
data_train = randomOversampler(data_train)
data_devtest = randomOversampler(data_devtest)

train_tokens = preprocessing(data_train)
dev_tokens = preprocessing(data_dev)
devtest_tokens = preprocessing(data_devtest)
test_tokens = preprocessing(data_test)

training_features = train_tokens + dev_tokens + devtest_tokens
test_final = test_tokens  # + devtest_tokens

sentiment_analyzer = SentimentAnalyzer()
trainer = NaiveBayesClassifier.train
classifier = sentiment_analyzer.train(trainer=trainer,
                                      training_set=training_features)
# Evaluating model on training data.
#sentiment_analyzer.evaluate(training_features, classifier)

sentiment_analyzer.evaluate(test_final, classifier)

#vader(data_dev)
#vader(data_train)
#vader(data_devtest)
#vader(data_test)
Beispiel #19
0
import preprocessor as tweet_preprocessor
from nltk.corpus import twitter_samples
from nltk.sentiment import SentimentAnalyzer

positive_tweets = twitter_samples.strings("positive_tweets.json")
negative_tweets = twitter_samples.strings("negative_tweets.json")

# Reset the tweet preprocessor to it's default settings
tweet_preprocessor.set_options(tweet_preprocessor.OPT.URL,
                               tweet_preprocessor.OPT.MENTION,
                               tweet_preprocessor.OPT.HASHTAG,
                               tweet_preprocessor.OPT.RESERVED,
                               tweet_preprocessor.OPT.NUMBER,
                               tweet_preprocessor.OPT.EMOJI,
                               tweet_preprocessor.OPT.SMILEY,
                               )

cleaned_postive_tweets = [tweet_preprocessor.clean(html.unescape(x)) for x in positive_tweets]
cleaned_negative_tweets = [tweet_preprocessor.clean(html.unescape(x)) for x in negative_tweets]

test_samples = [(t, 1) for t in cleaned_postive_tweets[4000:]] + [(t, 0) for t in cleaned_negative_tweets[4000:]]

siaw = SentimentIntensityAnalyzerWrapper()
sentim_analyzer = SentimentAnalyzer()

for key, value in sorted(sentim_analyzer.evaluate(test_samples, classifier=siaw).items()):
    print('{0}: {1}'.format(key, value))

print("\n~x~\n")
Beispiel #20
0
def demo_subjectivity(trainer,
                      save_analyzer=False,
                      n_instances=None,
                      output=None):
    """
    Train and test a classifier on instances of the Subjective Dataset by Pang and
    Lee. The dataset is made of 5000 subjective and 5000 objective sentences.
    All tokens (words and punctuation marks) are separated by a whitespace, so
    we use the basic WhitespaceTokenizer to parse the data.

    :param trainer: `train` method of a classifier.
    :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file.
    :param n_instances: the number of total sentences that have to be used for
        training and testing. Sentences will be equally split between positive
        and negative.
    :param output: the output file where results have to be reported.
    """
    from nltk.sentiment import SentimentAnalyzer
    from nltk.corpus import subjectivity

    if n_instances is not None:
        n_instances = int(n_instances / 2)

    subj_docs = [
        (sent, 'subj')
        for sent in subjectivity.sents(categories='subj')[:n_instances]
    ]
    obj_docs = [(sent, 'obj')
                for sent in subjectivity.sents(categories='obj')[:n_instances]]

    # We separately split subjective and objective instances to keep a balanced
    # uniform class distribution in both train and test sets.
    train_subj_docs, test_subj_docs = split_train_test(subj_docs)
    train_obj_docs, test_obj_docs = split_train_test(obj_docs)

    training_docs = train_subj_docs + train_obj_docs
    testing_docs = test_subj_docs + test_obj_docs

    sentim_analyzer = SentimentAnalyzer()
    all_words_neg = sentim_analyzer.all_words(
        [mark_negation(doc) for doc in training_docs])

    # Add simple unigram word features handling negation
    unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg,
                                                       min_freq=4)
    sentim_analyzer.add_feat_extractor(extract_unigram_feats,
                                       unigrams=unigram_feats)

    # Apply features to obtain a feature-value representation of our datasets
    training_set = sentim_analyzer.apply_features(training_docs)
    test_set = sentim_analyzer.apply_features(testing_docs)

    classifier = sentim_analyzer.train(trainer, training_set)
    try:
        classifier.show_most_informative_features()
    except AttributeError:
        print(
            'Your classifier does not provide a show_most_informative_features() method.'
        )
    results = sentim_analyzer.evaluate(test_set)

    if save_analyzer == True:
        save_file(sentim_analyzer, 'sa_subjectivity.pickle')

    if output:
        extr = [f.__name__ for f in sentim_analyzer.feat_extractors]
        output_markdown(output,
                        Dataset='subjectivity',
                        Classifier=type(classifier).__name__,
                        Tokenizer='WhitespaceTokenizer',
                        Feats=extr,
                        Instances=n_instances,
                        Results=results)

    return sentim_analyzer
Beispiel #21
0
train_obj_docs = obj_docs[:80]
test_obj_docs = obj_docs[80:100]
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=1)
len(unigram_feats)

sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
trainer = NaiveBayesClassifier.train
classifier = sentim_analyzer.train(trainer, training_set)

for key,value in sorted(sentim_analyzer.evaluate(test_set).items()):
    print('{0}: {1}'.format(key, value))

    from nltk.sentiment.vader import SentimentIntensityAnalyzer
sentences = ["VADER is smart, handsome, and funny.", # positive sentence example
    "VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
    "VADER is very smart, handsome, and funny.",  # booster words handled correctly (sentiment intensity adjusted)
    "VADER is VERY SMART, handsome, and FUNNY.",  # emphasis for ALLCAPS handled
    "VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity
    "VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score
    "The book was good.",         # positive sentence
    "The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted)
    "The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence
    "A really bad, horrible book.",       # negative sentence with booster words
    "At least it isn't a horrible book.", # negated negative sentence with contraction
    ":) and :D",     # emoticons handled
class QuoteFinder:
    def __init__(self):
        self.sentim_analyzer = SentimentAnalyzer()
        self.genre_dict = read_file("jsons/movie_genre_quote_dict_2.json")
        context_file = "jsons/final_context.json"
        movie_file = "jsons/final_movies.json"
        quote_file = "jsons/final_quotes.json"
        year_rating_file = "jsons/final_year_rating.json"

        self.context = read_file(context_file)
        self.movies = read_file(movie_file)
        self.quotes = read_file(quote_file)
        self.year_rating_dict = read_file(year_rating_file)

        # Reincode to unicode
        for i in range(len(self.context)):
            self.context[i] = self.context[i].encode("utf-8").decode("utf-8")
            self.movies[i] = self.movies[i].encode("utf-8").decode("utf-8")
            self.quotes[i] = self.quotes[i].encode("utf-8").decode("utf-8")

        self.context, self.quotes, self.movies = quote_pruner(self.context, self.quotes, self.movies)

        self.inverted_index = read_file("jsons/f_inverted_index.json")
        self.idf = read_file("jsons/f_idf.json")

        # Initialize query tokenizer
        self.tokenizer = TreebankWordTokenizer()
        # Compute document norms
        self.norms = compute_doc_norms(self.inverted_index, self.idf, len(self.context))

        word_co_filename = "jsons/word_co.json"
        word_count_filename = "jsons/word_count_dict.json"
        pmi_dict_filename = "jsons/pmi_dict.json"
        # Read files
        self.word_co = read_file(word_co_filename)
        self.word_count_dict = read_file(word_count_filename)
        self.pmi_dict = read_file(pmi_dict_filename)

    def find_basic_cooccurence(self, word_list):
        """ Initialize the base word co-occurrance list from our context and quotes.

        Arguments
        =========

        word_list: the list of words which are in our movie space

        Returns
        =======

        word_co : a dictionary representing the word_occurrance matrix
        """
        # Get English stop words
        stop_words = stopwords.words('english')

        # Merge context and quotes
        quote_list = self.quotes
        new_quote_list = []
        for q in quote_list:
            new_q = punct_strip(q)
            if new_q not in self.context:
                new_quote_list.append(new_q)
        context_quotes = self.context + new_quote_list

        # Find co occurences in context data, based co-occurences in a document
        word_co = defaultdict(list)
        word_count_dict = defaultdict(int)
        for doc in context_quotes:
            # Double loop to count word co-occurences
            tkns = self.tokenizer.tokenize(doc)
            for i in range(len(tkns)):
                if tkns[i] not in stop_words:
                    word_count_dict[tkns[i]] += 1
                    for j in range(len(tkns)):
                        if not (j == i) and (tkns[j] in word_list):
                            word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j])
        return word_co, word_count_dict

    def update_cooccurence(self, word_co_old, word_count_dict_old, word_list, docs):
        """ Updates the word co-occurrance mat and the word count dict with a new set of data.

        Arguments
        =========

        word_co_old: a word co-occurrance matrix in the form of a dictionary

        word_count_dict_old: a dictionary that keeps track of the total occurences of a word

        word_list: the list of words which are in our movie space

        docs: a list of new docs we're using to update our word co-occurence

        Returns
        =======

        word_co, word_count_dict : new word co-occurence dict/mat and new word count dictionary
        """
        # Get English stop words
        stop_words = stopwords.words('english')

        # Make init dict
        word_co = defaultdict(list)
        word_count_dict = defaultdict(int)
        word_co.update(word_co_old)
        word_count_dict.update(word_count_dict_old)

        # Find co occurences in context data, based on document (content)
        for doc in docs:
            # Double loop to count word co-occurences
            tkns = self.tokenizer.tokenize(punct_strip(doc))
            for i in range(len(tkns)):
                if tkns[i] not in stop_words:
                    word_count_dict[tkns[i]] += 1
                    for j in range(len(tkns)):
                        if not (j == i) and (tkns[j] in word_list):
                            word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j])
        return word_co, word_count_dict

    def query_vectorize(self, q, sw=False):
        # Remove punctuation, lowercase, and encode to utf
        query = punct_strip(q.lower().encode("utf-8").decode("utf-8"))

        # Tokenize query and check query stopword cutoff
        query_words = self.tokenizer.tokenize(query)

        # Remove stop words if necessary
        stop_words = stopwords.words('english')  # Get English stop words
        if (sw):
            new_query = []
            for x in query_words:
                if x not in stop_words:
                    new_query.append(x)
            query_words = new_query

        # Make query tfidf
        query_tfidf = defaultdict(int)
        for word in query_words:
            query_tfidf[word] += 1
        for word in query_tfidf:
            if word in self.idf:
                query_tfidf[word] *= self.idf[word]
            else:
                query_tfidf[word] = 0

        # Find query norm
        query_norm = 0
        for word in query_tfidf:
            query_norm += math.pow(query_tfidf[word], 2)
        query_norm = math.sqrt(query_norm)

        return query_tfidf, query_norm

    def pseudo_rocchio(self, query_tfidf, query_norm, relevant, sw=False, a=.3, b=.4, clip=True):
        """
        Arguments:
            query: a string representing the name of the movie being queried for

            relevant: a list of int representing the indices of relevant movies for query

            irrelevant: a list of strings representing the names of irrelevant movies for query

            a,b: floats, corresponding to the weighting of the original query, relevant queriesrespectively.

            clip: boolean, whether or not to clip all returned negative values to 0

        Returns:
            q_mod: a dict representing the modified query vector. this vector should have no negatve
            weights in it!
        """

        relevant_id = []
        for s, i in relevant:
            relevant_id.append(i)

        if query_norm == 0:
            return self.find_random()

        # Calculate alpha*query_vec
        query_vec = query_tfidf
        for word in query_vec:
            query_vec[word] /= query_norm
            query_vec[word] *= a

        # Get words in relevant docs
        relevant_words = []
        relevant_context = []
        for i in relevant_id:
            relevant_context.append(self.context[i])
        for context in relevant_context:
            context_tkns = self.tokenizer.tokenize(context)
            for tkn in context_tkns:
                if tkn not in relevant_words:
                    relevant_words.append(tkn)

        # Collect relevant doc vector sums
        relevant_docs = defaultdict(int)
        for word in relevant_words:
            if word in self.inverted_index:
                for quote_id, tf in self.inverted_index[word]:
                    if quote_id in relevant_id:
                        relevant_docs[word] += (tf / self.norms[quote_id])

        # Calculate beta term
        beta_term = b * (1.0 / len(relevant))
        for key in relevant_docs:
            relevant_docs[key] *= beta_term

        # Sum query and relevant
        q_mod = {k: query_vec.get(k, 0) + relevant_docs.get(k, 0.0) for k in set(query_vec) | set(relevant_docs)}

        # negative checks for terms, if clip
        if (clip):
            for key in q_mod:
                if q_mod[key] < 0:
                    q_mod[key] = 0
            return q_mod
        else:
            return q_mod

    def find_random(self):
        r = random.randint(0, len(self.quotes))
        return [[self.quotes[r], self.movies[r], self.context[r]]]

    def find_similar(self, query):
        query_words = self.tokenizer.tokenize(query)
        query_tfidf = defaultdict(int)
        for word in query_words:
            query_tfidf[word] += 1
        for word in query_tfidf:
            if word in self.idf:
                query_tfidf[word] *= self.idf[word]
            else:
                query_tfidf[word] = 0
        query_norm = 0
        for word in query_tfidf:
            query_norm += math.pow(query_tfidf[word], 2)
        query_norm = math.sqrt(query_norm)

        if query_norm == 0:
            return self.find_random()

        scores = [0 for _ in self.quotes]
        for word in query_tfidf:
            if word in self.inverted_index:
                for quote_id, tf in self.inverted_index[word]:
                    scores[quote_id] += query_tfidf[word] * tf * self.idf[word]

        results = []
        for i, s in enumerate(scores):
            if self.norms[i] != 0:
                results.append((s / (self.norms[i] * query_norm), i))

        top_res_num = 5
        results.sort(reverse=True)
        return [[self.quotes[i], self.movies[i], self.context[i]] for _, i in results[:top_res_num]]

    def find_final(self, q, rocchio=True, pseudo_rocchio_num=5, sw=False, pmi_num=8, ml=False):
        """
        Arguments:
            q: a string representing the query

            rocchio: a boolean representing whether or not to use pseudo relevance feedback with Rocchio

            psudo_rocchio_num: and int representing the number of top documents to consider relevant for rocchio

            sw: a boolean on whether or not to include stop words.

            pmi_num: an int representing the number of items to add to the query to expand it with PMI.

        Returns:
            result_quotes: a list of the top x results
        """

        # Vectorize query
        query_tfidf, query_norm = self.query_vectorize(q, sw)

        if query_norm == 0:
            r = random.randint(0, len(self.quotes))
            return [[self.quotes[r], self.movies[r], self.context[r]]]

        # Expand query using PMI
        # http://www.jofcis.com/publishedpapers/2011_7_1_17_24.pdf
        pmi_expansion = defaultdict(float)
        pmi_norm = 1
        for word in query_tfidf:  # Sum PMI lists
            if word in self.pmi_dict.keys():
                pmi_list = self.pmi_dict[word][:pmi_num]
                pmi_score_list = []
                for word, score in pmi_list:
                    pmi_expansion[word] += score
                    pmi_score_list.append(score)
                temp_norm = 0
                for s in pmi_score_list:
                    temp_norm += math.pow(s, 2)
                temp_norm = math.sqrt(query_norm)
                pmi_norm *= temp_norm
        query_tfidf.update(pmi_expansion)
        query_norm = query_norm * 2 * pmi_num * pmi_norm

        # Find query norm
        query_norm = 0
        for word in query_tfidf:
            query_norm += math.pow(query_tfidf[word], 2)
        query_norm = math.sqrt(query_norm)

        # Get scores
        scores = [0 for _ in self.quotes]
        for word in query_tfidf:
            if word in self.inverted_index:
                for quote_id, tf in self.inverted_index[word]:
                    scores[quote_id] += query_tfidf[word] * tf * self.idf[word]

        results = []
        for i, s in enumerate(scores):
            if self.norms[i] != 0:
                results.append((s / (self.norms[i] * query_norm), i))

        # Weight scores with year and rating
        for i in range(len(results)):
            score = results[i][0]
            index = results[i][1]
            year = self.year_rating_dict[self.movies[i]][0]
            rating = self.year_rating_dict[self.movies[i]][1]
            results[i] = (year_rating_weight(float(year), float(rating), score), index)

        # sort results
        results.sort(reverse=True)

        if rocchio:
            # Do pseudo-relevance feedback with Rocchio
            mod_query = self.pseudo_rocchio(query_tfidf, query_norm, results[:pseudo_rocchio_num], sw)
            mod_query_norm = 0
            for word in mod_query:
                mod_query_norm += math.pow(mod_query[word], 2)
            mod_query_norm = math.sqrt(mod_query_norm)

            # Re-find scores and reweight with year and rating
            scores = [0 for _ in self.quotes]
            for word in mod_query:
                if word in self.inverted_index:
                    for quote_id, tf in self.inverted_index[word]:
                        scores[quote_id] += mod_query[word] * tf * self.idf[word]

            results = []
            for i, s in enumerate(scores):
                if self.norms[i] != 0:
                    results.append((s / (self.norms[i] * mod_query_norm), i))

            d_score_updates = {}
            if ml is True:
                d_score_updates = self.find_ml(q)

            # Weight scores with year and rating
            for i in range(len(results)):
                score = results[i][0]
                index = results[i][1]
                year = self.year_rating_dict[self.movies[i]][0]
                rating = self.year_rating_dict[self.movies[i]][1]
                results[i] = (year_rating_weight(float(year), float(rating), score), index)
                if ml is True and index in d_score_updates:
                    results[i] = (results[i][0]*0.9 + d_score_updates[index], results[i][1])

        # Sort and return results
        top_res_num = 5
        results.sort(reverse=True)
        used_quotes = []
        return_res = []
        counter = 0
        while len(return_res) <= top_res_num:  # Avoid duplicate quotes
            score, i = results[counter]
            if self.quotes[i] not in used_quotes:
                used_quotes.append(self.quotes[i])
                return_res.append((score, i))
            else:
                counter += 1

        result_quotes = [[self.quotes[i], self.movies[i], self.context[i]] for _, i in
                         return_res[:top_res_num]]
        return result_quotes

    def sentiment_analysis(self, td):
        with open('jsons/all_words_neg.pickle', 'rb') as f:
            all_words_neg = pickle.load(f)

        with open('jsons/training_docs.pickle', 'rb') as f:
            training_docs = pickle.load(f)

        genres = ['action', 'crime', 'comedy', 'drama']
        testing_docs = [(td, genre) for genre in genres]

        all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
        unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
        all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
        self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
        training_set = self.sentim_analyzer.apply_features(training_docs)
        test_set = self.sentim_analyzer.apply_features(testing_docs)

        trainer = NaiveBayesClassifier.train

        classifier = self.sentim_analyzer.train(trainer, training_set)
        # f = open('my_classifier_test.pickle', 'rb')
        # classifier = pickle.load(f)
        # f.close()

        # classifier = nltk.data.load("my_classifier.pickle")

        genre_accuracy = []

        for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()):
            # print('{0}: {1}'.format(key, value))
            if key == 'Precision [action]':
                genre_accuracy.append(('action', value))
            if key == 'Precision [comedy]':
                genre_accuracy.append(('comedy', value))
            if key == 'Precision [drama]':
                genre_accuracy.append(('drama', value))
            if key == 'Precision [crime]':
                genre_accuracy.append(('crime', value))

        return genre_accuracy

    # Takes in a query
    # Outputs a dictionary of movie indices movies to weights where weight is to be added to all quote scores of movies
    def find_ml(self, td):
        f_tokenizer = TreebankWordTokenizer()
        query_words = f_tokenizer.tokenize(td)
        genres = self.sentiment_analysis(query_words)
        weighted_genres = []
        genre_weights = {}
        for x in genres:
            if x[1] is not None:
                weighted_genres.append(x[0])
                genre_weights[x[0]] = x[1]

        d_score_updates = {}
        for movie in self.movies:
            g = self.genre_dict[movie][0]
            total_genre_score = 0
            if u'Comedy' in g and 'comedy' in weighted_genres:
                total_genre_score += genre_weights['comedy']
            if u'Action' in g and 'action' in weighted_genres:
                total_genre_score += genre_weights['action']
            if u'Crime' in g and 'crime' in weighted_genres:
                total_genre_score += genre_weights['crime']
            if u'Drama' in g and 'drana' in weighted_genres:
                total_genre_score += genre_weights['drama']
            d_score_updates[self.movies.index(movie)] = total_genre_score * .1

        return d_score_updates