def train(): positive_tweets = read_tweets('positive.txt', 'positive') negative_tweets = read_tweets('negative.txt', 'negative') print len(positive_tweets) print len(negative_tweets) pos_train = positive_tweets[:len(positive_tweets)] neg_train = negative_tweets[:len(negative_tweets)] # pos_test = positive_tweets[len(positive_tweets)*80/100+1:] # neg_test = negative_tweets[len(positive_tweets)*80/100+1:] training_data = pos_train + neg_train # test_data = pos_test + neg_test sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_data]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) print len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_data) # test_set = sentim_analyzer.apply_features(test_data) # print test_set trainer = NaiveBayesClassifier.train sentim_analyzer.train(trainer, training_set) # for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): # print('{0}: {1}'.format(key, value)) # print sentim_analyzer.classify(tokenize_sentence('I hate driving car at night')) return sentim_analyzer
def main(): x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"]) stopwords = set() with open('../stopwords.txt', 'r') as f: for w in f: stopwords.add(w.strip()) tok = TweetTokenizer() x = [remove_stopwords(tok.tokenize(s.lower()), stopwords) for s in x] x = np.array(x) accumulate = dict() folds = 10 for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True): train_x, train_y = x[train_idx], y[train_idx] test_x, test_y = x[test_idx], y[test_idx] # train_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in train_x] # test_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in test_x] train_docs = [(sent, label) for sent, label in zip(train_x, train_y)] test_docs = [(sent, label) for sent, label in zip(test_x, test_y)] cls = SentimentAnalyzer() # train words_with_neg = cls.all_words([mark_negation(a) for a in train_x]) unigram_feats = cls.unigram_word_feats(words_with_neg) # bigram_feats = cls.bigram_collocation_feats(train_x) cls.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats, handle_negation=True) # cls.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats) training_set = cls.apply_features(train_docs, labeled=True) cls.train(MaxentClassifier.train, training_set, max_iter=10, trace=0) # test & evaluate test_set = cls.apply_features(test_docs) for key, value in sorted(cls.evaluate(test_set).items()): print('\t{0}: {1}'.format(key, value)) accumulate.setdefault(key, 0.0) accumulate[key] += value if value is not None else 0.0 print("Averages") for key, value in sorted(accumulate.items()): print('\tAverage {0}: {1}'.format(key, value / folds))
def __init__(self): #document represented by a tuple (sentence,labelt) n_instances = 100 subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] #split subj and objinstances to keep a balanced uniform class distribution in both train and test sets. train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs #train classifier sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) #use simple unigram word features, handling negation unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) #apply features to obtain a feature_value representations of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) self.trainer = NaiveBayesClassifier.train self.classifier = sentim_analyzer.train(self.trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) self.sid = SentimentIntensityAnalyzer()
def trainSubjectivity(): # Subjective vs. objective sentence classifier. Borrows from NLTK Documentation. # Plan on using it in larger machine learning sentiment model as pre-processing # Must differentiate between objective and subjective subjDocs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')] objDocs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')] nSubj = len(subjDocs) nObj = len(objDocs) # 90% Training, 10% Test subjTrain = int(.9 * nSubj) objTrain = int(.9 * nObj) trainSubj = subjDocs[:subjTrain] testSubj = subjDocs[subjTrain:nSubj] trainObj = objDocs[:objTrain] testObj = objDocs[objTrain:nObj] trainDocs = trainSubj + trainObj testDocs = testSubj + testObj # Create sentiment class, mark negation, create features (unigram) sentiment = SentimentAnalyzer() markNegation = sentiment.all_words([mark_negation(doc) for doc in trainDocs]) unigramFeats = sentiment.unigram_word_feats(markNegation, min_freq=4) sentiment.add_feat_extractor(extract_unigram_feats, unigrams=unigramFeats) training = sentiment.apply_features(trainDocs) testing = sentiment.apply_features(testDocs) # Train classifier trainer = NaiveBayesClassifier.train subjectivityClassifier = sentiment.train(trainer, training) joblib.dump(subjectivityClassifier, 'subjectivity.pkl') for key, value in sorted(sentiment.evaluate(testing).items()): print('{0}: {1}'.format(key, value)) '''
def sentiment_classifier(df): df = df.copy() # prepping data df = df[['txgot_binary', 'Convo_1']].dropna() text_process_col = pre.process_corpus(np.asarray(df['Convo_1']), []) txgot_col = np.asarray(df['txgot_binary']) # turns into list of tuples (convo, label) docs = list(zip(text_process_col, txgot_col)) shuffle(docs) training_docs = docs[:int(len(docs) * 2 / 3)] test_docs = docs[int(len(docs) * 2 / 3):] # sentiment analyzer sentim_analyzer = SentimentAnalyzer() # simple unigram word features, handling negation all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # train classifier training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(test_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) # show results for key, value in sentim_analyzer.evaluate(test_set).items(): print('{}: {}'.format(key, value))
def train(): subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value))
def train(): positive_tweets = read_tweets('/root/295/new/positive.txt', 'positive') negative_tweets = read_tweets('/root/295/new/negative.txt', 'negative') print len(positive_tweets) print len(negative_tweets) #pos_train = positive_tweets[:2000] #neg_train = negative_tweets[:2000] #pos_test = positive_tweets[2001:3000] #neg_test = negative_tweets[2001:3000] pos_train = positive_tweets[:len(positive_tweets)*80/100] neg_train = negative_tweets[:len(negative_tweets)*80/100] pos_test = positive_tweets[len(positive_tweets)*80/100+1:] neg_test = negative_tweets[len(positive_tweets)*80/100+1:] training_data = pos_train + neg_train test_data = pos_test + neg_test sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_data]) #print all_words_neg unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) #print unigram_feats print len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_data) test_set = sentim_analyzer.apply_features(test_data) print test_set trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) print sentim_analyzer.classify(tokenize_sentance('I hate driving car at night')) return sentim_analyzer
def nb_cv(cleaned_df): # Get Features training_set = get_nb_features(cleaned_df) # Get 10-Fold. Important: Shuffle=True cv = KFold(n_splits=10, random_state=0, shuffle=True) # Model sentiment_analyzer = SentimentAnalyzer() trainer = NaiveBayesClassifier.train # Store Result Accuracy = [] # For each fold, train model, evaluate for train_index, test_index in cv.split(training_set): classifier = sentiment_analyzer.train( trainer, np.array(training_set)[train_index].tolist()) truth_list = np.array(training_set)[test_index].tolist() performance = sentiment_analyzer.evaluate(truth_list, classifier) Accuracy.append(performance['Accuracy']) '''## Can add all other measures here. Sample Result as below: {'Accuracy': 0.525, 'Precision [negative]': 0.28337874659400547, 'Recall [negative]': 0.7272727272727273, 'F-measure [negative]': 0.407843137254902, 'Precision [neutral]': 0.5011933174224343, 'Recall [neutral]': 0.30837004405286345, 'F-measure [neutral]': 0.38181818181818183, 'Precision [positive]': 0.7461629279811098, 'Recall [positive]': 0.611810261374637, 'F-measure [positive]': 0.672340425531915} ''' return np.mean(np.asarray(Accuracy))
def sentiment_analysis(self, testing_data, training_data=None): if training_data is None: training_data = self.training_data ## Apply sentiment analysis to data to extract new "features" # Initialize sentiment analyzer object sentiment_analyzer = SentimentAnalyzer() # Mark all negative words in training data, using existing list of negative words all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training_data]) unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4) len(unigram_features) sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) training_final = sentiment_analyzer.apply_features(training_data) testing_final = sentiment_analyzer.apply_features(testing_data) ## Traing model and test model = NaiveBayesClassifier.train classifer = sentiment_analyzer.train(model, training_final) for key, value in sorted(sentiment_analyzer.evaluate(testing_final).items()): print ("{0}: {1}".format(key, value))
def runSentanal(train, test): sentanal = SentimentAnalyzer() all_words_neg = sentanal.all_words([mark_negation(doc) for doc in train]) unigramFeats = sentanal.unigram_word_feats(all_words_neg, min_freq=4) sentanal.add_feat_extractor(extract_unigram_feats, unigrams=unigramFeats, handle_negation=True) # bigramFeats = sentanal. # sentanal.add_feat_extractor(extract_bigram_feats, bigrams=bigramFeats) trainList = sentanal.apply_features(train) testList = sentanal.apply_features(test) trainer = NaiveBayesClassifier.train classifier = sentanal.train(trainer, trainList) classifier.show_most_informative_features() # creates array for storing values values = [] # display results for key, value in sorted(sentanal.evaluate(testList).items()): print('{0}: {1}'.format(key, value)) values.append(value) # write results to csv with open(OUTPUT_CSV, mode='a') as csvFile: writer = csv.writer(csvFile, delimiter=',') writer.writerow(values)
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): """ Train and test a classifier on instances of the Subjective Dataset by Pang and Lee. The dataset is made of 5000 subjective and 5000 objective sentences. All tokens (words and punctuation marks) are separated by a whitespace, so we use the basic WhitespaceTokenizer to parse the data. :param trainer: `train` method of a classifier. :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file. :param n_instances: the number of total sentences that have to be used for training and testing. Sentences will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.sentiment import SentimentAnalyzer from nltk.corpus import subjectivity if n_instances is not None: n_instances = int(n_instances/2) subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_subj_docs, test_subj_docs = split_train_test(subj_docs) train_obj_docs, test_obj_docs = split_train_test(obj_docs) training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) # Add simple unigram word features handling negation unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if save_analyzer == True: save_file(sentim_analyzer, 'sa_subjectivity.pickle') if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__, Tokenizer='WhitespaceTokenizer', Feats=extr, Instances=n_instances, Results=results) return sentim_analyzer
def train_lr(training_set): sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_set]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_set) trainer = logreg.train classifier = sentim_analyzer.train(trainer, training_set) return [sentim_analyzer,classifier]
def demo_movie_reviews(trainer, n_instances=None, output=None): """ Train classifier on all instances of the Movie Reviews dataset. The corpus has been preprocessed using the default sentence tokenizer and WordPunctTokenizer. Features are composed of: - most frequent unigrams :param trainer: `train` method of a classifier. :param n_instances: the number of total reviews that have to be used for training and testing. Reviews will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.corpus import movie_reviews from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances/2) pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]] neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]] # We separately split positive and negative instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_docs = train_pos_docs+train_neg_docs testing_docs = test_pos_docs+test_neg_docs sentim_analyzer = SentimentAnalyzer() all_words = sentim_analyzer.all_words(training_docs) # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__, Tokenizer='WordPunctTokenizer', Feats=extr, Results=results, Instances=n_instances)
def getTrainer(): if (LOAD == False): dic = getData("/home/cioni/git/sentimentw/inputFolder/positive.csv", "/home/cioni/git/sentimentw/inputFolder/negative.csv", 5000) train = dic snt = SentimentAnalyzer() wrds = snt.all_words(dic, True) feat = snt.unigram_word_feats(wrds, min_freq=3) snt.add_feat_extractor(nltk.sentiment.util.extract_unigram_feats, unigrams=feat) train = snt.apply_features(train) trainer = NaiveBayesClassifier.train classifier = snt.apply_features(train, True) snt.train(trainer, train) clFile = open("classifierSmall2.pickle", "wb+") pickle.dump(snt, clFile) return snt else: load_cls = open("classifier.pickle", "rb") snt = pickle.load(load_cls) return snt
def sentiment_analysis(data): from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) from nltk.sentiment.vader import SentimentIntensityAnalyzer from nltk import tokenize sid = SentimentIntensityAnalyzer() for line in data: ss = sid.polarity_scores(line['line_text']) line['compound'] = ss['compound'] line['neg'] = ss['neg'] line['pos'] = ss['pos'] line['neu'] = ss['neu']
def train_sentiment(): instances = 8000 subj = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:instances]] obj = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:instances]] train_subj = subj train_obj = obj train_set = train_subj + train_obj sentiment = SentimentAnalyzer() all_neg = sentiment.all_words([mark_negation(doc) for doc in train_set]) uni_g = sentiment.unigram_word_feats(all_neg, min_freq=4) sentiment.add_feat_extractor(extract_unigram_feats, unigrams=uni_g) trained_set = sentiment.apply_features(train_set) nb = NaiveBayesClassifier.train classifier = sentiment.train(nb, trained_set) return classifier
def run_sa_mov(train, test): a = SentimentAnalyzer() tr = NaiveBayesClassifier.train all_words = mov_analyzer.all_words(train) # Add simple unigram word features unigram_feats = a.unigram_word_feats(all_words, min_freq=4) a.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets tr_set = a.apply_features(train) test_set = a.apply_features(test) #Training clf = a.train(tr, tr_set) res = a.evaluate(test_set) print(res)
def NB(df_train, df_dev): # Feature extraction # n=1200 df_train['clean_text'] = df_train['clean_text'].apply( lambda x: stem_stop(x)) df_dev['clean_text'] = df_dev['clean_text'].apply(lambda x: stem_stop(x)) df_pos_train = df_train[df_train['tweet_sentiment'] == 'positive'] # df_pos_train= df_pos_train.sample(n=n, random_state=1) pos_tweets = df_pos_train['clean_text'].tolist() df_neg_train = df_train[df_train['tweet_sentiment'] == 'negative'] # df_neg_train= df_neg_train.sample(n=n, random_state=1) neg_tweets = df_neg_train['clean_text'].tolist() df_neutral_train = df_train[df_train['tweet_sentiment'] == 'neutral'] # df_neutral_train= df_neutral_train.sample(n=n, random_state=1) neutral_tweets = df_neutral_train['clean_text'].tolist() positive_featuresets = [(features(tweet), 'positive') for tweet in pos_tweets] negative_featuresets = [(features(tweet), 'negative') for tweet in neg_tweets] neutral_featuresets = [(features(tweet), 'neutral') for tweet in neutral_tweets] training_features = positive_featuresets + negative_featuresets + neutral_featuresets ngram_vectorizer = CountVectorizer(analyzer='word', binary=True, lowercase=False, ngram_range=(1, 2)) # train the model sentiment_analyzer = SentimentAnalyzer() trainer = NaiveBayesClassifier.train classifier = sentiment_analyzer.train(trainer, training_features) truth_list = list(df_dev[['clean_text', 'tweet_sentiment']].itertuples(index=False, name=None)) # test the model for i, (text, expected) in enumerate(truth_list): text_feats = features(text) truth_list[i] = (text_feats, expected) re = sentiment_analyzer.evaluate(truth_list, classifier) print(re) return classifier
def subjectivity_classifier(): from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * """ Initializes and trains categorical subjectivity analyzer """ N_INSTANCES = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:N_INSTANCES] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:N_INSTANCES]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sent_analyzer = SentimentAnalyzer() all_words_neg = sent_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sent_analyzer.unigram_word_feats(all_words_neg, min_freq=4) print(f"unigram feats: {len(unigram_feats)}") sent_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sent_analyzer.apply_features(training_docs) test_set = sent_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sent_analyzer.train(trainer, training_set) for k, v in sorted(sent_analyzer.evaluate(test_set).items()): print(f"{k}: {v}") return sent_analyzer
def get_nltk_NB(NEG_DATA, POS_DATA, num_train): train_neg, test_neg = get_nltk_train_test(NEG_DATA, 'neg', num_train) train_pos, test_pos = get_nltk_train_test(POS_DATA, 'pos', num_train) training_docs = train_neg + train_pos testing_docs = test_neg + test_pos sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) #results = [] for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key,value))
def analyze_sentiment(paragraph): n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) sid = SentimentIntensityAnalyzer() total_sum = 0 count = 0.0 sentences = sent_tokenize(paragraph) for sentence in sentences: total_sum += sid.polarity_scores(sentence)["compound"] count += 1 return total_sum * 10 / count
def get_objectivity_analyzer(): n_instances = 100 subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs train_obj_docs = obj_docs training_docs = train_subj_docs+train_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) trainer = NaiveBayesClassifier.train sentiment_classifier = sentim_analyzer.train(trainer, training_set) return sentim_analyzer
def run_sa_twitt(train, test): a = SentimentAnalyzer() tr = NaiveBayesClassifier.train all_words = [word for word in a.all_words(train)] # Add simple unigram word features unigram_feats = a.unigram_word_feats(all_words, top_n=1000) a.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = a.bigram_collocation_feats( [tweet[0] for tweet in train_twitt], top_n=100, min_freq=12) a.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) tr_set = a.apply_features(train) test_set = a.apply_features(test) #Training clf = a.train(tr, tr_set) res = a.evaluate(test_set) print(res)
class SentimentAnalyzerTry(object): def __init__(self): self.n_instances = 1000 self.n_training = int(self.n_instances * 0.8) self.n_testing = int(self.n_instances * 0.2) self.sentim_analyzer = SentimentAnalyzer() def prepare_training_and_test_data(self): """ Each document is represented by a tuple (sentence, label). The sentence is tokenized, so it is represented by a list of strings. E.g: (['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one', 'thing', 'is', 'a', 'small', 'gem', '.'], 'subj') """ subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:self.n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:self.n_instances]] # We separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets. training_end = self.n_training testing_start = training_end testing_end = testing_start + self.n_testing train_subj_docs = subj_docs[:training_end] test_subj_docs = subj_docs[testing_start:testing_end] train_obj_docs = obj_docs[:training_end] test_obj_docs = obj_docs[testing_start:testing_end] self.training_docs = train_subj_docs + train_obj_docs self.testing_docs = test_subj_docs + test_obj_docs def extract_training_test_features(self): # We use simple unigram word features, handling negation. self.all_words_neg = self.mark_negative_sentence(self.training_docs) self.unigram_feats = self.sentim_analyzer.unigram_word_feats(self.all_words_neg, min_freq=4) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=self.unigram_feats) # We apply features to obtain a feature-value representation of our datasets. self.training_set = self.sentim_analyzer.apply_features(self.training_docs) self.test_set = self.sentim_analyzer.apply_features(self.testing_docs) def mark_negative_sentence(self, docs): all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in docs]) return all_words_neg def train_sentiment_analyzer(self, evaluate=True): self.prepare_training_and_test_data() self.extract_training_test_features() # We can now train our classifier on the training set, and subsequently output the evaluation results self.trainer = NaiveBayesClassifier.train self.classifier = self.sentim_analyzer.train(self.trainer, self.training_set) if evaluate: self.evaluate_classifier() def evaluate_classifier(self): for key, value in sorted(self.sentim_analyzer.evaluate(self.test_set).items()): print('{0}: {1}'.format(key, value)) def classify_text(self, text): self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=self.unigram_feats) return self.classifier.classify(self.sentim_analyzer.extract_features(tokenize.word_tokenize(text)))
train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=1) len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) from nltk.sentiment.vader import SentimentIntensityAnalyzer sentences = ["VADER is smart, handsome, and funny.", # positive sentence example "VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted) "VADER is very smart, handsome, and funny.", # booster words handled correctly (sentiment intensity adjusted) "VADER is VERY SMART, handsome, and FUNNY.", # emphasis for ALLCAPS handled "VADER is VERY SMART, handsome, and FUNNY!!!",# combination of signals - VADER appropriately adjusts intensity "VADER is VERY SMART, really handsome, and INCREDIBLY FUNNY!!!",# booster words & punctuation make this close to ceiling for score "The book was good.", # positive sentence "The book was kind of good.", # qualified positive sentence is handled correctly (intensity adjusted) "The plot was good, but the characters are uncompelling and the dialog is not great.", # mixed negation sentence "A really bad, horrible book.", # negative sentence with booster words
def get_tweets(self, query, count=10): tweets = [] try: #get the tweets from twitter fetched_tweets = self.api.search(q=query, count=count) n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [ (sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances] ] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs emotion_analyzer = SentimentAnalyzer() #get the negative words for feature extraction all_radical_slurs = emotion_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = emotion_analyzer.unigram_word_feats( all_radical_slurs, min_freq=4) emotion_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = emotion_analyzer.apply_features(training_docs) test_set = emotion_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = emotion_analyzer.train(trainer, training_set) #test sentences sentences = [ "Ravi is the worst boy in class", "The story is full of mean bitchy characters", "I had a good day!", "The day was okay", "The day was very bad", "Harry potter is a good book", "New Tata electric car is a piece of shit", "It has been a long time since I had a good food", "Stop acting as a asshole" ] sid = SentimentIntensityAnalyzer() for sentence in sentences: print(sentence) ss = sid.polarity_scores(sentence) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() for tweet in fetched_tweets: print(tweet.text) ss = sid.polarity_scores(tweet.text) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() return tweets except tweepy.TweepError as e: print("Error : " + str(e))
def trainAllClassifiers(): #Get all subjective and objective sentences. #Note: The "encode/decode" statement is used to parse the unicode representation of the text to an #Ascii representation. The "apply_features()" method throws an error if this isn't done. This is most #likely because python 3 uses unicode characters to perform operations on string, while python 2 doesn't. print("Splitting positive and negative documents...") positive_docs = [ ([string.encode('ascii', 'ignore').decode('ascii') for string in sent], 'pos') for sent in movie_reviews.sents(categories='pos') ] negative_docs = [ ([string.encode('ascii', 'ignore').decode('ascii') for string in sent], 'neg') for sent in movie_reviews.sents(categories='neg') ] #obj_docs = [(sent.encode('ascii', 'ignore').decode('ascii'), 'obj') for sent in subjectivity.sents(categories='obj')] #Randomly split data sets into train and test sets. train_pos, test_pos = train_test_split(positive_docs, test_size=1000, train_size=4000) train_neg, test_neg = train_test_split(negative_docs, test_size=1000, train_size=4000) #Aggregate train and test data sets. train = train_pos + train_neg test = test_pos + test_neg #Create a sentiment analyzer to analyze the text documents. This analyzer #provides an abstraction for managing a classifier, and feature extractor. #It also provides convinence data metrics on classifier performance. sentim_analyzer = SentimentAnalyzer() #Mark negations in the tokenized training text, and count all negative words. #all_words() returns all tokens from the document, which is used to create a set #of features with a feature extractor. print("Creating feature set...") all_words_with_neg_tags = sentim_analyzer.all_words( [mark_negation(doc) for doc in train]) #Create the unigram features, only taking features that occur more than 4 time. unigram_features = sentim_analyzer.unigram_word_feats( all_words_with_neg_tags, min_freq=2) #Save the unigram feature list to a file so it can be used later. #These features need to be applied to the email set. f = open("./bow_features.pkl", "w") pickle.dump(unigram_features, f) f.close() #Create a feature extractor based on the unigram word features created. #The unigram feature extractor is found in the sentiment utils package. sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) #Create feature-value representations of the data. train_set = sentim_analyzer.apply_features(train) test_set = sentim_analyzer.apply_features(test) #Collect some memory. positive_docs = None negative_docs = None gc.collect() #Note, training may take a long time. #Create a trainer and train the sentiment analyzer on the training set. print("Beginning the classifier training...") #SVM startTime = time.time() print("Linear Support Vector Machine.") clf = SklearnClassifier(LinearSVC()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "lsvm") saveMetricsToFile("lsvm", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Naive Bayes startTime = time.time() print("Naive Bayes.") trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "nb") saveMetricsToFile("nb", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Stochastic Gradient Descent. (Performed first since it takes the least amount of time.) startTime = time.time() print("Stochastic Gradient Descent.") clf = SklearnClassifier(SGDClassifier()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "sgd") saveMetricsToFile("sgd", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #SVM startTime = time.time() print("RBF Support Vector Machine.") clf = SklearnClassifier(svm.SVC(kernel='rbf')) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "svm") saveMetricsToFile("svm", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Multinomial Naive Bayes. startTime = time.time() print("Multinomial Naive Bayes.") clf = SklearnClassifier(MultinomialNB()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "mnb") saveMetricsToFile("mnb", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Logistic Regression. startTime = time.time() print("Logistic Regression.") clf = SklearnClassifier(LogisticRegression()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "lr") saveMetricsToFile("lr", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Descision tree startTime = time.time() print("Decision Tree.") clf = SklearnClassifier(DecisionTreeClassifier()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "dt") saveMetricsToFile("dt", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Random Forrest. startTime = time.time() print("Random Forrest.") clf = SklearnClassifier(RandomForestClassifier()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "rf") saveMetricsToFile("rf", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes." #Adaboost startTime = time.time() print("Ada Boost") clf = SklearnClassifier(AdaBoostClassifier()) trainer = clf.train classifier = sentim_analyzer.train(trainer, train_set) endTime = time.time() timeDiff = endTime - startTime saveModel(classifier, "ab") saveMetricsToFile("ab", sentim_analyzer, test_set, timeDiff / 60.0) print "Total time to train: " + str(timeDiff / 60.0) + " minutes."
def demo_tweets(trainer, n_instances=None, output=None): """ Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.tokenize import TweetTokenizer from nltk.sentiment import SentimentAnalyzer from nltk.corpus import twitter_samples, stopwords # Different customizations for the TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) if n_instances is not None: n_instances = int(n_instances/2) fields = ['id', 'text'] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = 'positive_tweets.csv' json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = 'negative_tweets.csv' json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer) # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs+train_neg_docs testing_tweets = test_pos_docs+test_neg_docs sentim_analyzer = SentimentAnalyzer() # stopwords = stopwords.words('english') # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] all_words = [word for word in sentim_analyzer.all_words(training_tweets)] # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets], top_n=100, min_freq=12) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances)
# Now aggregate the training and test sets training = training_subjective + training_objective test = test_subjective + test_objective ## Apply sentiment analysis to data to extract new "features" # Initialize sentiment analyzer object sentiment_analyzer = SentimentAnalyzer() # Mark all negative words in training data, using existing list of negative words all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training]) unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4) len(unigram_features) sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) training_final = sentiment_analyzer.apply_features(training) test_final = sentiment_analyzer.apply_features(test) ## Traing model and test model = NaiveBayesClassifier.train classifer = sentiment_analyzer.train(model, training_final) for key, value in sorted(sentiment_analyzer.evaluate(test_final).items()): print("{0}: {1}".format(key, value))
return dict(('contains(%s)' % w, True) for w in words) positive_featuresets = [(features(tweet), 'positive') for tweet in pos_tweets] negative_featuresets = [(features(tweet), 'negative') for tweet in neg_tweets] neutral_featuresets = [(features(tweet), 'neutral') for tweet in neutral_tweets] training_features = positive_featuresets + negative_featuresets + neutral_featuresets # %% len(training_features) # %% sentiment_analyzer = SentimentAnalyzer() trainer = NaiveBayesClassifier.train classifier = sentiment_analyzer.train(trainer, training_features) # %% # Create evaluation data #df_dev = pd.DataFrame(df_dev,columns=['id','label','text']) truth_list = list(df_dev[['text', 'label']].itertuples(index=False, name=None)) len(truth_list) # %% # sanity check to make sure we manipulated the dataframe properly truth_list[100] # %% # The evaluation method needs the feature extractor that was run to train the classifier # Specifically, it wants a list of tuples (features,truth), where features is a dict
training_docs = train_objective + train_subjective testing_docs = test_objective + test_subjective analyzer = SentimentAnalyzer() negative_words = analyzer.all_words( [mark_negation(doc) for doc in training_docs]) features = analyzer.unigram_word_feats(negative_words, min_freq=4) analyzer.add_feat_extractor(extract_unigram_feats, unigrams=features) training_set = analyzer.apply_features(training_docs) test_set = analyzer.apply_features(testing_docs) ## Training the classifier trainer = NaiveBayesClassifier.train classifier = analyzer.train(trainer, training_set) for key, value in sorted(analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) print("NLP classifier ready") def sentiment(text): """ text: str return: 'positive'| 'negative' | 'neutral' classifies a text into 'positive', 'negative' or 'neutral' averages the classification of all internal sentences """ lines_list = tokenize.sent_tokenize(text)
def demo_tweets(trainer, n_instances=None, output=None): """ Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.tokenize import TweetTokenizer from nltk.sentiment import SentimentAnalyzer from nltk.corpus import twitter_samples, stopwords # Different customizations for the TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) if n_instances is not None: n_instances = int(n_instances / 2) fields = ['id', 'text'] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = 'positive_tweets.csv' json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = 'negative_tweets.csv' json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer) # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() # stopwords = stopwords.words('english') # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] all_words = [word for word in sentim_analyzer.all_words(training_tweets)] # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) try: classifier.show_most_informative_features() except AttributeError: print( 'Your classifier does not provide a show_most_informative_features() method.' ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances)
class SuicideClassifier(object): def __init__(self, sentiment_only, num_phrases_to_track=20): # neg_phrases = filter_negative_phrases(load_csv_sentences('thoughtsandfeelings.csv')) # pos_phrases = filter_positive_phrases(load_csv_sentences('spiritualforums.csv')) # file_pos = open("pos_phrases.txt", 'w') # file_neg = open("neg_phrases.txt", 'w') # for item in pos_phrases: # print>>file_pos, item # for item in neg_phrases: # print>>file_neg, item self.recent_sentiment_scores = [] neg_file = open("ALL_neg_phrases_filtered.txt", "r") pos_file = open("webtext_phrases_with_lots_of_words.txt", "r") neg_phrases = neg_file.readlines() pos_phrases = pos_file.readlines() neg_docs = [] pos_docs = [] for phrase in neg_phrases: neg_docs.append((phrase.split(), 'suicidal')) for phrase in pos_phrases[:len(neg_phrases)]: pos_docs.append((phrase.split(), 'alright')) print len(neg_docs) print len(pos_docs) # negcutoff = len(neg_docs) * 3 / 4 # poscutoff = len(pos_docs) * 3 / 4 negcutoff = -200 poscutoff = -200 train_pos_docs = pos_docs[:poscutoff] test_pos_docs = pos_docs[poscutoff:] train_neg_docs = neg_docs[:negcutoff] test_neg_docs = neg_docs[negcutoff:] training_docs = train_pos_docs + train_neg_docs testing_docs = test_pos_docs + test_neg_docs self.sentim_analyzer = SentimentAnalyzer() if not sentiment_only: all_words = self.sentim_analyzer.all_words([doc for doc in training_docs]) unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words, min_freq=1) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) self.sentim_analyzer.add_feat_extractor(vader_sentiment_feat) # bigram_feats = self.sentim_analyzer.bigram_collocation_feats(all_words, min_freq=1) # self.sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats) training_set = self.sentim_analyzer.apply_features(training_docs) test_set = self.sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train self.classifier = self.sentim_analyzer.train(trainer, training_set) for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) self.classifier.show_most_informative_features(20) def test(self, phrase): return self.sentim_analyzer.classify(phrase.split()) def update_sentiments(self, value): now = datetime.datetime.now() self.recent_sentiment_scores.append([now, value]) self.recent_sentiment_scores = [x for x in self.recent_sentiment_scores if x[ 0] > now - datetime.timedelta(seconds=60)] print sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores) return sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores)
def perform_ml(total_terms, training_data, testing_data, type): #print("total_terms="+xstr(total_terms)) #print("traning_tweets="+xstr(training_data)) #print("testing_tweets="+xstr(testing_data)) sentim_analyzer = SentimentAnalyzer() all_words = sentim_analyzer.all_words([terms for terms in total_terms]) # use unigram feats from class specific unigram lists unigram_feats = [] if (type == "nlp_terms"): unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) else: unigram_feats = harmful_search_unigrams + other_search_unigrams + physical_search_unigrams + sexual_search_unigrams #print("unigram_feats="+xstr(unigram_feats)) print(str(len(unigram_feats))) # use bigram feats from class specific bigram lists bigram_feats = [] if (type == "nlp_terms"): bigram_measures = nltk.collocations.BigramAssocMeasures() bi_finder = BigramCollocationFinder.from_words(all_words) bi_finder.apply_freq_filter(3) bigram_feats = bi_finder.nbest(bigram_measures.pmi, -1) #bigram_feats = bi_finder.nbest(bigram_measures.pmi, 100) #bigram_feats = bi_finder.nbest(bigram_measures.chi_sq, -1) #bigram_feats = bi_finder.nbest(bigram_measures.likelihood_ratio, 100) else: bigram_feats = harmful_search_bigrams + other_search_bigrams + physical_search_bigrams + sexual_search_bigrams #print("bigram_feats="+xstr(bigram_feats)) print(str(len(bigram_feats))) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats) training_set = sentim_analyzer.apply_features(training_data) test_set = sentim_analyzer.apply_features(testing_data) #print("training_set="+xstr(training_set)) print(str(len(training_set))) #print("test_set="+xstr(test_set)) print(str(len(test_set))) test_data_only = [] test_labels_only = [] for test_data_row in test_set: test_data_only.append(test_data_row[0]) test_labels_only.append(test_data_row[1]) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) nltk_pred_labels = classifier.classify_many(test_data_only) cm = nltk.ConfusionMatrix(test_labels_only, nltk_pred_labels) print(cm.pretty_format(sort_by_count=True, show_percents=False, truncate=9)) informative_features = classifier.show_most_informative_features(25) print("Most Informative Features=" + xstr(informative_features)) return
unigram_features = analyzer.unigram_word_feats(vocabulary, min_freq=10) print("Unigram Features: ", len(unigram_features)) # 8237 analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) # Build the training set _train_X = analyzer.apply_features([mark_negation(word_tokenize(unidecode(clean_text(instance)))) for instance in train_X[:TRAINING_COUNT]], labeled=False) # Build the test set _test_X = analyzer.apply_features([mark_negation(word_tokenize(unidecode(clean_text(instance)))) for instance in test_X], labeled=False) trainer = NaiveBayesClassifier.train classifier = analyzer.train(trainer, zip(_train_X, train_y[:TRAINING_COUNT])) score = analyzer.evaluate(zip(_test_X, test_y)) print("Accuracy of SentimentAnalyzer: ", score['Accuracy']) # 0.8064 for TRAINING_COUNT=5000 vader = SentimentIntensityAnalyzer() def vader_polarity(text): """ Transform the output to a binary 0/1 result """ score = vader.polarity_scores(text) return 1 if score['pos'] > score['neg'] else 0 print(vader_polarity(train_X[0]), train_y[0]) # 0 1 print(vader_polarity(train_X[1]), train_y[1]) # 0 0
def plot(): global df global author1 global author2 global startDate global endDate global author1_wpm global author2_wpm global more_words global author1_messages global author2_messages global more_messages #restricting range of data mask = (df['Date'] >= startDate) & (df['Date'] <= endDate) df = df.loc[mask] author1_df = df.loc[(df['Author'] == author1)] author2_df = df.loc[(df['Author'] == author2)] author1_hour = author1_df['Hour'].value_counts() author2_hour = author2_df['Hour'].value_counts() def time_of_day_data(): hours_dictionary = {} hours_dictionary['hourlist'] = ['Author 1', 'Author 2'] for i in range(0, 24): t_list = [0, 0] j = str(i) if i < 10: j = '0' + j if i == 0: j = '00' if j in author1_hour.index.tolist(): t_list[0] = author1_hour.loc[j].item() if j in author2_hour.index.tolist(): t_list[1] = author2_hour.loc[j].item() hours_dictionary[j] = t_list for x in hours_dictionary: if x == 'hourlist': counter = 0 elif int(hours_dictionary[x][0]) > counter: counter = int(hours_dictionary[x][0]) elif int(hours_dictionary[x][1]) > counter: counter = int(hours_dictionary[x][1]) return hours_dictionary, counter def roundup(x): return int(x) if x % 100 == 0 else int(x + 100 - x % 100) ### start of FIRST: time of day ### def plot_time_of_day(): plt.style.use('fivethirtyeight') plt.style.use('bmh') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 16}) tod_data, maxcount = time_of_day_data() time_of_day_df = pd.DataFrame(tod_data) maxcount = roundup(maxcount) + 200 a = roundup(maxcount / 4) b = roundup(maxcount / 2) c = roundup(3 * maxcount / 4) # No. of variable categories = list(time_of_day_df)[1:] N = len(categories) # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(N) * 2 * pi for n in range(N)] angles += angles[:1] # Initialise the spider plot ax = plt.subplot(111, polar=True, label='time of day') # If you want the first axis to be on top: ax.set_theta_offset(pi / 2) ax.set_theta_direction(-1) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], categories, fontsize=16) # Draw ylabels ax.set_rlabel_position(0) plt.yticks([a, b, c], [str(a), str(b), str(c)], color="grey", size=12) plt.ylim(0, maxcount) # Ind1 values = time_of_day_df.loc[0].drop( 'hourlist').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label=author1_name, color=author1_colour) ax.fill(angles, values, author1_colour, alpha=0.1) # Ind2 values = time_of_day_df.loc[1].drop( 'hourlist').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label=author2_name, color=author2_colour) ax.fill(angles, values, author2_colour, alpha=0.1) # Add legend plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1)) plt.savefig(os.path.join("uploads", 'timeofday.png'), bbox_inches='tight') plot_time_of_day() ### end of FIRST: time of day ### author1_day = author1_df['Day_of_week'].value_counts() author2_day = author2_df['Day_of_week'].value_counts() days_in_order = [ 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday' ] def day_of_week_data(): day_dictionary = {} day_dictionary['Day'] = ['Author 1', 'Author 2'] for dayname in days_in_order: t_list = [0, 0] if dayname in author1_day.index.tolist(): t_list[0] = author1_day.loc[dayname].item() if dayname in author2_day.index.tolist(): t_list[1] = author2_day.loc[dayname].item() day_dictionary[dayname] = t_list for x in day_dictionary: if x == 'Day': counter = 0 else: temp = max(int(day_dictionary[x][0]), int(day_dictionary[x][1])) if temp > counter: counter = temp return day_dictionary, counter ### start of SECOND: Day of week ### def plot_day_of_week(): plt.style.use('fivethirtyeight') plt.style.use('bmh') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 16}) dow_data, maxcount = day_of_week_data() day_of_week_df = pd.DataFrame(dow_data) maxcount = roundup(maxcount) + 200 a = roundup(maxcount / 4) b = roundup(maxcount / 2) c = roundup(3 * maxcount / 4) # number of variable categories = list(day_of_week_df)[1:] N = len(categories) # What will be the angle of each axis in the plot? (we divide the plot / number of variable) angles = [n / float(N) * 2 * pi for n in range(N)] angles += angles[:1] # Initialise the spider plot ax = plt.subplot(111, polar=True, label='day of week') # If you want the first axis to be on top: ax.set_theta_offset(pi / 2) ax.set_theta_direction(-1) # Draw one axe per variable + add labels labels yet plt.xticks(angles[:-1], categories, fontsize=16) for label, i in zip(ax.get_xticklabels(), range(0, len(angles))): angle_rad = angles[i] if angle_rad == 0: ha = 'center' elif angle_rad <= pi / 2: ha = 'left' elif pi / 2 < angle_rad <= pi: ha = 'left' elif pi < angle_rad <= (3 * pi / 2): ha = 'right' else: ha = 'right' label.set_horizontalalignment(ha) # Draw ylabels ax.set_rlabel_position(0) plt.yticks([a, b, c], [str(a), str(b), str(c)], color="grey", size=12) plt.ylim(0, maxcount) # Ind1 values = day_of_week_df.loc[0].drop('Day').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label=author1_name, color=author1_colour) ax.fill(angles, values, author1_colour, alpha=0.1) # Ind2 values = day_of_week_df.loc[1].drop('Day').values.flatten().tolist() values += values[:1] ax.plot(angles, values, linewidth=1, linestyle='solid', label=author2_name, color=author2_colour) ax.fill(angles, values, author2_colour, alpha=0.1) # Add legend plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1)) plt.savefig(os.path.join("uploads", 'dayofweek.png'), bbox_inches='tight') plot_day_of_week() ### end of SECOND: Day of week ### def timeline_data(): timeline_dictionary = {} timeline_dictionary['date'] = ['Author1', 'Author2'] for i in range(len(df)): t_list = [0, 0] day, author = df.iloc[i, 0], df.iloc[i, 2] if day not in timeline_dictionary: timeline_dictionary[day] = t_list t_list = timeline_dictionary[day] if author == author1: t_list[0] += 1 if author == author2: t_list[1] += 1 timeline_dictionary[day] = t_list return timeline_dictionary timeline_df = pd.DataFrame(timeline_data()) timeline_df = timeline_df.T new_header = timeline_df.iloc[0] timeline_df = timeline_df[1:] timeline_df.columns = new_header ### start of THIRD: timeline ### def plot_timeline(): plt.style.use('fivethirtyeight') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 24}) plt.figure(figsize=(20, 8)) plt.xlabel('Timeline', fontsize=30) ax1 = timeline_df.Author1.plot(color=author1_colour) ax2 = timeline_df.Author2.plot(color=author2_colour) ax1.xaxis.set_label_position('top') ax1.legend([author1_name, author2_name], loc='upper right') plt.savefig(os.path.join("uploads", 'timeline.png'), bbox_inches='tight') plot_timeline() ### end of THIRD: timeline ### def top_words(df): top_N = 40 stopwords = nltk.corpus.stopwords.words('english') # RegEx for stopwords RE_stopwords = r'\b(?:{})\b'.format('|'.join(stopwords)) #RE_stopwords.extend(['from', 'subject', 're', 'edu', 'use']) # replace '|'-->' ' and drop all stopwords words = (df.Message\ .str.lower()\ .replace([RE_stopwords], [''], regex=True)\ .str.cat(sep=' ')\ .split()) words = [word for word in words if len(word) > 3] # generate DF out of Counter rslt = pd.DataFrame(Counter(words).most_common(top_N), columns=['Word', 'Frequency']).set_index('Word') return rslt def hex_to_rgb(hex): hex = hex.lstrip('#') hlen = len(hex) return tuple( int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3)) def rgb_to_hsl(r, g, b): r = float(r) g = float(g) b = float(b) high = max(r, g, b) low = min(r, g, b) h, s, l = ((high + low) / 2, ) * 3 if high == low: h = 0.0 s = 0.0 else: d = high - low s = d / (2 - high - low) if l > 0.5 else d / (high + low) h = { r: (g - b) / d + (6 if g < b else 0), g: (b - r) / d + 2, b: (r - g) / d + 4, }[high] h /= 6 return h, s, l a1_rgb = hex_to_rgb(author1_colour) a2_rgb = hex_to_rgb(author2_colour) a1_hlsva = rgb_to_hsl(a1_rgb[0] / 255, a1_rgb[1] / 255, a1_rgb[2] / 255) a2_hlsva = rgb_to_hsl(a2_rgb[0] / 255, a2_rgb[1] / 255, a2_rgb[2] / 255) a1_hlsva0 = round(a1_hlsva[0] * 355) a1_hlsva1 = round(a1_hlsva[1] * 100) a1_hlsva2 = round(a1_hlsva[2] * 100) a2_hlsva0 = round(a2_hlsva[0] * 355) a2_hlsva1 = round(a2_hlsva[1] * 100) a2_hlsva2 = round(a2_hlsva[2] * 100) ############################ df_1 = top_words(author1_df) df_1.columns d = dict(zip(df_1.index, df_1.Frequency)) plt.style.use('fivethirtyeight') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 16}) fileloc = os.path.join("static", shape + '.jpg') mask = np.array(Image.open(fileloc)) wordcloud = WordCloud(background_color='#F0F0F0', mask=mask, width=mask.shape[1], height=mask.shape[0]) wordcloud.generate_from_frequencies(frequencies=d) plt.figure() def a1_color_func(word, font_size, position, orientation, random_state=None, **kwargs): return "hsl({0}, {1}%%, %d%%)".format( str(a1_hlsva0), str(a1_hlsva1)) % random.randint(60, 90) plt.imshow(wordcloud.recolor(color_func=a1_color_func), interpolation="bilinear") plt.axis("off") wordcloud.to_file(os.path.join("uploads", 'author1cloud.png')) df_2 = top_words(author2_df) df_2.columns d = dict(zip(df_2.index, df_2.Frequency)) plt.style.use('fivethirtyeight') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 16}) wordcloud = WordCloud(background_color='#F0F0F0', mask=mask, width=mask.shape[1], height=mask.shape[0]) wordcloud.generate_from_frequencies(frequencies=d) plt.figure() def a2_color_func(word, font_size, position, orientation, random_state=None, **kwargs): return "hsl({0}, {1}%%, %d%%)".format( str(a2_hlsva0), str(a2_hlsva1)) % random.randint(60, 90) plt.imshow(wordcloud.recolor(color_func=a2_color_func), interpolation="bilinear") plt.axis("off") wordcloud.to_file(os.path.join("uploads", 'author2cloud.png')) n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) ''' for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) ''' df['Message'].unique()[10:20] def sentiment(message): sid = SentimentIntensityAnalyzer() ss = sid.polarity_scores(message) return ss['compound'] df["Sentiment"] = df.apply(lambda row: sentiment(row['Message']), axis=1) def sentiment_data(): sentiment_dictionary = {} sentiment_dictionary['date'] = ['Author1', 'Author2'] for i in range(len(df)): t_list = [[0, 0.0], [0, 0.0]] month, author, sentiment = str(df.iloc[i, 0]), df.iloc[i, 2], df.iloc[i, 6] if sentiment != 0.0: month = month.split('-')[0] + '-' + month.split('-')[1] if month not in sentiment_dictionary: sentiment_dictionary[month] = t_list t_list = sentiment_dictionary[month] if author == author1: t_list[0][0] += 1 t_list[0][1] += sentiment if author == author2: t_list[1][0] += 1 t_list[1][1] += sentiment sentiment_dictionary[month] = t_list for x in sentiment_dictionary: if x != 'date': t_list = sentiment_dictionary[x] if t_list[0][0] != 0: t_list[0] = float(t_list[0][1]) / float(t_list[0][0]) else: t_list[0] = 0 if t_list[1][0] != 0: t_list[1] = float(t_list[1][1]) / float(t_list[1][0]) else: t_list[1] = 0 sentiment_dictionary[x] = t_list return sentiment_dictionary sentiment_df = pd.DataFrame(sentiment_data()) sentiment_df = sentiment_df.T new_header = sentiment_df.iloc[0] sentiment_df = sentiment_df[1:] sentiment_df.columns = new_header def plot_sentiment(): plt.style.use('fivethirtyeight') plt.rcParams["font.family"] = "Gabriola" plt.rcParams.update({'font.size': 24}) plt.figure(figsize=(20, 8)) plt.xlabel('Sentiment Analysis', fontsize=30) ax1 = sentiment_df.Author1.plot(color=author1_colour) ax2 = sentiment_df.Author2.plot(color=author2_colour) ax1.xaxis.set_label_position('top') h1, l1 = ax1.get_legend_handles_labels() ax1.legend([author1_name, author2_name], loc='upper right') plt.savefig(os.path.join("uploads", 'sentiment.png'), bbox_inches='tight') plot_sentiment() #number of words def no_of_words(message): return len(message.split()) df["WordCount"] = df.apply(lambda row: no_of_words(row['Message']), axis=1) author1_df = df.loc[(df['Author'] == author1)] author2_df = df.loc[(df['Author'] == author2)] author1_wpm = author1_name + "'s average word per message is {:0.2f}".format( author1_df["WordCount"].mean()) author2_wpm = author2_name + "'s average word per message is {:0.2f}".format( author2_df["WordCount"].mean()) def who_sent_more_words(): if author1_df["WordCount"].sum() > author2_df["WordCount"].sum(): num = author1_df["WordCount"].sum() / author2_df["WordCount"].sum() num = num * 100 - 100 return (author1_name + " sent {:0.0f}% more words than ".format(num) + author2_name) elif author2_df["WordCount"].sum() > author1_df["WordCount"].sum(): num = author2_df["WordCount"].sum() / author1_df["WordCount"].sum() num = num * 100 - 100 return (author2_name + " sent {:0.0f}% more words than ".format(num) + author1_name) else: return ("You both sent the same number of words somehow!") more_words = who_sent_more_words() days = "Number of days of texting: " + str(len(df["Date"].unique())) author1_messages = author1_name + " sent " + str(len( author1_df.index)) + " messages" author2_messages = author2_name + " sent " + str(len( author2_df.index)) + " messages" def who_sent_more(): if len(author1_df.index) > len(author2_df.index): num = len(author1_df.index) / len(author2_df.index) return (author1_name + " sent {:0.2f} times more messages than ".format(num) + author2_name) elif len(author2_df.index) > len(author1_df.index): num = len(author2_df.index) / len(author1_df.index) return (author2_name + " sent {:0.2f} times more messages than ".format(num) + author1_name) else: return ("You both sent the same number of messages somehow!") more_messages = who_sent_more()
test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) sid = SentimentIntensityAnalyzer() auth = OAuthHandler(ckey, csecret) auth.set_access_token(atoken, asecret) twitterStream = Stream(auth, listener()) #twitterStream.filter(track=["googl", "google", "goog"]) twitterStream.filter(track=["parker hannifin"]) except Exception as e: print(e)
class QuoteFinder: def __init__(self): self.sentim_analyzer = SentimentAnalyzer() self.genre_dict = read_file("jsons/movie_genre_quote_dict_2.json") context_file = "jsons/final_context.json" movie_file = "jsons/final_movies.json" quote_file = "jsons/final_quotes.json" year_rating_file = "jsons/final_year_rating.json" self.context = read_file(context_file) self.movies = read_file(movie_file) self.quotes = read_file(quote_file) self.year_rating_dict = read_file(year_rating_file) # Reincode to unicode for i in range(len(self.context)): self.context[i] = self.context[i].encode("utf-8").decode("utf-8") self.movies[i] = self.movies[i].encode("utf-8").decode("utf-8") self.quotes[i] = self.quotes[i].encode("utf-8").decode("utf-8") self.context, self.quotes, self.movies = quote_pruner(self.context, self.quotes, self.movies) self.inverted_index = read_file("jsons/f_inverted_index.json") self.idf = read_file("jsons/f_idf.json") # Initialize query tokenizer self.tokenizer = TreebankWordTokenizer() # Compute document norms self.norms = compute_doc_norms(self.inverted_index, self.idf, len(self.context)) word_co_filename = "jsons/word_co.json" word_count_filename = "jsons/word_count_dict.json" pmi_dict_filename = "jsons/pmi_dict.json" # Read files self.word_co = read_file(word_co_filename) self.word_count_dict = read_file(word_count_filename) self.pmi_dict = read_file(pmi_dict_filename) def find_basic_cooccurence(self, word_list): """ Initialize the base word co-occurrance list from our context and quotes. Arguments ========= word_list: the list of words which are in our movie space Returns ======= word_co : a dictionary representing the word_occurrance matrix """ # Get English stop words stop_words = stopwords.words('english') # Merge context and quotes quote_list = self.quotes new_quote_list = [] for q in quote_list: new_q = punct_strip(q) if new_q not in self.context: new_quote_list.append(new_q) context_quotes = self.context + new_quote_list # Find co occurences in context data, based co-occurences in a document word_co = defaultdict(list) word_count_dict = defaultdict(int) for doc in context_quotes: # Double loop to count word co-occurences tkns = self.tokenizer.tokenize(doc) for i in range(len(tkns)): if tkns[i] not in stop_words: word_count_dict[tkns[i]] += 1 for j in range(len(tkns)): if not (j == i) and (tkns[j] in word_list): word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j]) return word_co, word_count_dict def update_cooccurence(self, word_co_old, word_count_dict_old, word_list, docs): """ Updates the word co-occurrance mat and the word count dict with a new set of data. Arguments ========= word_co_old: a word co-occurrance matrix in the form of a dictionary word_count_dict_old: a dictionary that keeps track of the total occurences of a word word_list: the list of words which are in our movie space docs: a list of new docs we're using to update our word co-occurence Returns ======= word_co, word_count_dict : new word co-occurence dict/mat and new word count dictionary """ # Get English stop words stop_words = stopwords.words('english') # Make init dict word_co = defaultdict(list) word_count_dict = defaultdict(int) word_co.update(word_co_old) word_count_dict.update(word_count_dict_old) # Find co occurences in context data, based on document (content) for doc in docs: # Double loop to count word co-occurences tkns = self.tokenizer.tokenize(punct_strip(doc)) for i in range(len(tkns)): if tkns[i] not in stop_words: word_count_dict[tkns[i]] += 1 for j in range(len(tkns)): if not (j == i) and (tkns[j] in word_list): word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j]) return word_co, word_count_dict def query_vectorize(self, q, sw=False): # Remove punctuation, lowercase, and encode to utf query = punct_strip(q.lower().encode("utf-8").decode("utf-8")) # Tokenize query and check query stopword cutoff query_words = self.tokenizer.tokenize(query) # Remove stop words if necessary stop_words = stopwords.words('english') # Get English stop words if (sw): new_query = [] for x in query_words: if x not in stop_words: new_query.append(x) query_words = new_query # Make query tfidf query_tfidf = defaultdict(int) for word in query_words: query_tfidf[word] += 1 for word in query_tfidf: if word in self.idf: query_tfidf[word] *= self.idf[word] else: query_tfidf[word] = 0 # Find query norm query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) return query_tfidf, query_norm def pseudo_rocchio(self, query_tfidf, query_norm, relevant, sw=False, a=.3, b=.4, clip=True): """ Arguments: query: a string representing the name of the movie being queried for relevant: a list of int representing the indices of relevant movies for query irrelevant: a list of strings representing the names of irrelevant movies for query a,b: floats, corresponding to the weighting of the original query, relevant queriesrespectively. clip: boolean, whether or not to clip all returned negative values to 0 Returns: q_mod: a dict representing the modified query vector. this vector should have no negatve weights in it! """ relevant_id = [] for s, i in relevant: relevant_id.append(i) if query_norm == 0: return self.find_random() # Calculate alpha*query_vec query_vec = query_tfidf for word in query_vec: query_vec[word] /= query_norm query_vec[word] *= a # Get words in relevant docs relevant_words = [] relevant_context = [] for i in relevant_id: relevant_context.append(self.context[i]) for context in relevant_context: context_tkns = self.tokenizer.tokenize(context) for tkn in context_tkns: if tkn not in relevant_words: relevant_words.append(tkn) # Collect relevant doc vector sums relevant_docs = defaultdict(int) for word in relevant_words: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: if quote_id in relevant_id: relevant_docs[word] += (tf / self.norms[quote_id]) # Calculate beta term beta_term = b * (1.0 / len(relevant)) for key in relevant_docs: relevant_docs[key] *= beta_term # Sum query and relevant q_mod = {k: query_vec.get(k, 0) + relevant_docs.get(k, 0.0) for k in set(query_vec) | set(relevant_docs)} # negative checks for terms, if clip if (clip): for key in q_mod: if q_mod[key] < 0: q_mod[key] = 0 return q_mod else: return q_mod def find_random(self): r = random.randint(0, len(self.quotes)) return [[self.quotes[r], self.movies[r], self.context[r]]] def find_similar(self, query): query_words = self.tokenizer.tokenize(query) query_tfidf = defaultdict(int) for word in query_words: query_tfidf[word] += 1 for word in query_tfidf: if word in self.idf: query_tfidf[word] *= self.idf[word] else: query_tfidf[word] = 0 query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) if query_norm == 0: return self.find_random() scores = [0 for _ in self.quotes] for word in query_tfidf: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += query_tfidf[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * query_norm), i)) top_res_num = 5 results.sort(reverse=True) return [[self.quotes[i], self.movies[i], self.context[i]] for _, i in results[:top_res_num]] def find_final(self, q, rocchio=True, pseudo_rocchio_num=5, sw=False, pmi_num=8, ml=False): """ Arguments: q: a string representing the query rocchio: a boolean representing whether or not to use pseudo relevance feedback with Rocchio psudo_rocchio_num: and int representing the number of top documents to consider relevant for rocchio sw: a boolean on whether or not to include stop words. pmi_num: an int representing the number of items to add to the query to expand it with PMI. Returns: result_quotes: a list of the top x results """ # Vectorize query query_tfidf, query_norm = self.query_vectorize(q, sw) if query_norm == 0: r = random.randint(0, len(self.quotes)) return [[self.quotes[r], self.movies[r], self.context[r]]] # Expand query using PMI # http://www.jofcis.com/publishedpapers/2011_7_1_17_24.pdf pmi_expansion = defaultdict(float) pmi_norm = 1 for word in query_tfidf: # Sum PMI lists if word in self.pmi_dict.keys(): pmi_list = self.pmi_dict[word][:pmi_num] pmi_score_list = [] for word, score in pmi_list: pmi_expansion[word] += score pmi_score_list.append(score) temp_norm = 0 for s in pmi_score_list: temp_norm += math.pow(s, 2) temp_norm = math.sqrt(query_norm) pmi_norm *= temp_norm query_tfidf.update(pmi_expansion) query_norm = query_norm * 2 * pmi_num * pmi_norm # Find query norm query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) # Get scores scores = [0 for _ in self.quotes] for word in query_tfidf: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += query_tfidf[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * query_norm), i)) # Weight scores with year and rating for i in range(len(results)): score = results[i][0] index = results[i][1] year = self.year_rating_dict[self.movies[i]][0] rating = self.year_rating_dict[self.movies[i]][1] results[i] = (year_rating_weight(float(year), float(rating), score), index) # sort results results.sort(reverse=True) if rocchio: # Do pseudo-relevance feedback with Rocchio mod_query = self.pseudo_rocchio(query_tfidf, query_norm, results[:pseudo_rocchio_num], sw) mod_query_norm = 0 for word in mod_query: mod_query_norm += math.pow(mod_query[word], 2) mod_query_norm = math.sqrt(mod_query_norm) # Re-find scores and reweight with year and rating scores = [0 for _ in self.quotes] for word in mod_query: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += mod_query[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * mod_query_norm), i)) d_score_updates = {} if ml is True: d_score_updates = self.find_ml(q) # Weight scores with year and rating for i in range(len(results)): score = results[i][0] index = results[i][1] year = self.year_rating_dict[self.movies[i]][0] rating = self.year_rating_dict[self.movies[i]][1] results[i] = (year_rating_weight(float(year), float(rating), score), index) if ml is True and index in d_score_updates: results[i] = (results[i][0]*0.9 + d_score_updates[index], results[i][1]) # Sort and return results top_res_num = 5 results.sort(reverse=True) used_quotes = [] return_res = [] counter = 0 while len(return_res) <= top_res_num: # Avoid duplicate quotes score, i = results[counter] if self.quotes[i] not in used_quotes: used_quotes.append(self.quotes[i]) return_res.append((score, i)) else: counter += 1 result_quotes = [[self.quotes[i], self.movies[i], self.context[i]] for _, i in return_res[:top_res_num]] return result_quotes def sentiment_analysis(self, td): with open('jsons/all_words_neg.pickle', 'rb') as f: all_words_neg = pickle.load(f) with open('jsons/training_docs.pickle', 'rb') as f: training_docs = pickle.load(f) genres = ['action', 'crime', 'comedy', 'drama'] testing_docs = [(td, genre) for genre in genres] all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = self.sentim_analyzer.apply_features(training_docs) test_set = self.sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = self.sentim_analyzer.train(trainer, training_set) # f = open('my_classifier_test.pickle', 'rb') # classifier = pickle.load(f) # f.close() # classifier = nltk.data.load("my_classifier.pickle") genre_accuracy = [] for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()): # print('{0}: {1}'.format(key, value)) if key == 'Precision [action]': genre_accuracy.append(('action', value)) if key == 'Precision [comedy]': genre_accuracy.append(('comedy', value)) if key == 'Precision [drama]': genre_accuracy.append(('drama', value)) if key == 'Precision [crime]': genre_accuracy.append(('crime', value)) return genre_accuracy # Takes in a query # Outputs a dictionary of movie indices movies to weights where weight is to be added to all quote scores of movies def find_ml(self, td): f_tokenizer = TreebankWordTokenizer() query_words = f_tokenizer.tokenize(td) genres = self.sentiment_analysis(query_words) weighted_genres = [] genre_weights = {} for x in genres: if x[1] is not None: weighted_genres.append(x[0]) genre_weights[x[0]] = x[1] d_score_updates = {} for movie in self.movies: g = self.genre_dict[movie][0] total_genre_score = 0 if u'Comedy' in g and 'comedy' in weighted_genres: total_genre_score += genre_weights['comedy'] if u'Action' in g and 'action' in weighted_genres: total_genre_score += genre_weights['action'] if u'Crime' in g and 'crime' in weighted_genres: total_genre_score += genre_weights['crime'] if u'Drama' in g and 'drana' in weighted_genres: total_genre_score += genre_weights['drama'] d_score_updates[self.movies.index(movie)] = total_genre_score * .1 return d_score_updates