def sentiment_analysis(self, testing_data, training_data=None): if training_data is None: training_data = self.training_data ## Apply sentiment analysis to data to extract new "features" # Initialize sentiment analyzer object sentiment_analyzer = SentimentAnalyzer() # Mark all negative words in training data, using existing list of negative words all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training_data]) unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4) len(unigram_features) sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) training_final = sentiment_analyzer.apply_features(training_data) testing_final = sentiment_analyzer.apply_features(testing_data) ## Traing model and test model = NaiveBayesClassifier.train classifer = sentiment_analyzer.train(model, training_final) for key, value in sorted(sentiment_analyzer.evaluate(testing_final).items()): print ("{0}: {1}".format(key, value))
def train(): subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value))
def trainSubjectivity(): # Subjective vs. objective sentence classifier. Borrows from NLTK Documentation. # Plan on using it in larger machine learning sentiment model as pre-processing # Must differentiate between objective and subjective subjDocs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')] objDocs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')] nSubj = len(subjDocs) nObj = len(objDocs) # 90% Training, 10% Test subjTrain = int(.9 * nSubj) objTrain = int(.9 * nObj) trainSubj = subjDocs[:subjTrain] testSubj = subjDocs[subjTrain:nSubj] trainObj = objDocs[:objTrain] testObj = objDocs[objTrain:nObj] trainDocs = trainSubj + trainObj testDocs = testSubj + testObj # Create sentiment class, mark negation, create features (unigram) sentiment = SentimentAnalyzer() markNegation = sentiment.all_words([mark_negation(doc) for doc in trainDocs]) unigramFeats = sentiment.unigram_word_feats(markNegation, min_freq=4) sentiment.add_feat_extractor(extract_unigram_feats, unigrams=unigramFeats) training = sentiment.apply_features(trainDocs) testing = sentiment.apply_features(testDocs) # Train classifier trainer = NaiveBayesClassifier.train subjectivityClassifier = sentiment.train(trainer, training) joblib.dump(subjectivityClassifier, 'subjectivity.pkl') for key, value in sorted(sentiment.evaluate(testing).items()): print('{0}: {1}'.format(key, value)) '''
def __init__(self): #document represented by a tuple (sentence,labelt) n_instances = 100 subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] #split subj and objinstances to keep a balanced uniform class distribution in both train and test sets. train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs #train classifier sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) #use simple unigram word features, handling negation unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) #apply features to obtain a feature_value representations of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) self.trainer = NaiveBayesClassifier.train self.classifier = sentim_analyzer.train(self.trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) self.sid = SentimentIntensityAnalyzer()
def sentiment_classifier(df): df = df.copy() # prepping data df = df[['txgot_binary', 'Convo_1']].dropna() text_process_col = pre.process_corpus(np.asarray(df['Convo_1']), []) txgot_col = np.asarray(df['txgot_binary']) # turns into list of tuples (convo, label) docs = list(zip(text_process_col, txgot_col)) shuffle(docs) training_docs = docs[:int(len(docs) * 2 / 3)] test_docs = docs[int(len(docs) * 2 / 3):] # sentiment analyzer sentim_analyzer = SentimentAnalyzer() # simple unigram word features, handling negation all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # train classifier training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(test_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) # show results for key, value in sentim_analyzer.evaluate(test_set).items(): print('{}: {}'.format(key, value))
def load_data(self, classifier=None): # source: http://www.nltk.org/book/ch06.html, http://www.nltk.org/howto/sentiment.html print "Loading training data...", sys.stdout.flush() training_docs, testing_docs = self.load_web_reviews() # documents = [(word_tokenize(movie_reviews.raw(fileid)), category) # for category in movie_reviews.categories() # for fileid in movie_reviews.fileids(category)] # random.shuffle(documents) # cutoff = int(len(documents) * 0.1) # training_docs, testing_docs = documents[cutoff:], documents[:cutoff] print "Done!" print "Extracting unigram features and applying to training data...", sys.stdout.flush() sentim_analyzer = SentimentAnalyzer(classifier=classifier) all_words = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words)#, top_n=5000) # print len(unigrams) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats, handle_negation=True) training_set = sentim_analyzer.apply_features(training_docs) testing_set = sentim_analyzer.apply_features(testing_docs) print "Done!" return sentim_analyzer, training_set, testing_set
def train(): positive_tweets = read_tweets('/root/295/new/positive.txt', 'positive') negative_tweets = read_tweets('/root/295/new/negative.txt', 'negative') print len(positive_tweets) print len(negative_tweets) #pos_train = positive_tweets[:2000] #neg_train = negative_tweets[:2000] #pos_test = positive_tweets[2001:3000] #neg_test = negative_tweets[2001:3000] pos_train = positive_tweets[:len(positive_tweets)*80/100] neg_train = negative_tweets[:len(negative_tweets)*80/100] pos_test = positive_tweets[len(positive_tweets)*80/100+1:] neg_test = negative_tweets[len(positive_tweets)*80/100+1:] training_data = pos_train + neg_train test_data = pos_test + neg_test sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_data]) #print all_words_neg unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) #print unigram_feats print len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_data) test_set = sentim_analyzer.apply_features(test_data) print test_set trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) print sentim_analyzer.classify(tokenize_sentance('I hate driving car at night')) return sentim_analyzer
def runSentanal(train, test): sentanal = SentimentAnalyzer() all_words_neg = sentanal.all_words([mark_negation(doc) for doc in train]) unigramFeats = sentanal.unigram_word_feats(all_words_neg, min_freq=4) sentanal.add_feat_extractor(extract_unigram_feats, unigrams=unigramFeats, handle_negation=True) # bigramFeats = sentanal. # sentanal.add_feat_extractor(extract_bigram_feats, bigrams=bigramFeats) trainList = sentanal.apply_features(train) testList = sentanal.apply_features(test) trainer = NaiveBayesClassifier.train classifier = sentanal.train(trainer, trainList) classifier.show_most_informative_features() # creates array for storing values values = [] # display results for key, value in sorted(sentanal.evaluate(testList).items()): print('{0}: {1}'.format(key, value)) values.append(value) # write results to csv with open(OUTPUT_CSV, mode='a') as csvFile: writer = csv.writer(csvFile, delimiter=',') writer.writerow(values)
def demo_subjectivity(trainer, save_analyzer=False, n_instances=None, output=None): """ Train and test a classifier on instances of the Subjective Dataset by Pang and Lee. The dataset is made of 5000 subjective and 5000 objective sentences. All tokens (words and punctuation marks) are separated by a whitespace, so we use the basic WhitespaceTokenizer to parse the data. :param trainer: `train` method of a classifier. :param save_analyzer: if `True`, store the SentimentAnalyzer in a pickle file. :param n_instances: the number of total sentences that have to be used for training and testing. Sentences will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.sentiment import SentimentAnalyzer from nltk.corpus import subjectivity if n_instances is not None: n_instances = int(n_instances/2) subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_subj_docs, test_subj_docs = split_train_test(subj_docs) train_obj_docs, test_obj_docs = split_train_test(obj_docs) training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) # Add simple unigram word features handling negation unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if save_analyzer == True: save_file(sentim_analyzer, 'sa_subjectivity.pickle') if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='subjectivity', Classifier=type(classifier).__name__, Tokenizer='WhitespaceTokenizer', Feats=extr, Instances=n_instances, Results=results) return sentim_analyzer
def addfeatures(cleaned_tokens_list): sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(token_list) for token_list in cleaned_tokens_list]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
def GetSampleTrainDataForNLTK(self, trainSet): sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in trainSet]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) sampleTrainData = sentim_analyzer.apply_features(trainSet) return sampleTrainData
def main(): x, y = load_datasets(["../datasets/sentiment_uci/yelp_labelled.txt"]) stopwords = set() with open('../stopwords.txt', 'r') as f: for w in f: stopwords.add(w.strip()) tok = TweetTokenizer() x = [remove_stopwords(tok.tokenize(s.lower()), stopwords) for s in x] x = np.array(x) accumulate = dict() folds = 10 for train_idx, test_idx in StratifiedKFold(y=y, n_folds=folds, shuffle=True): train_x, train_y = x[train_idx], y[train_idx] test_x, test_y = x[test_idx], y[test_idx] # train_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in train_x] # test_x = [remove_stopwords(tok.tokenize(s), stopwords) for s in test_x] train_docs = [(sent, label) for sent, label in zip(train_x, train_y)] test_docs = [(sent, label) for sent, label in zip(test_x, test_y)] cls = SentimentAnalyzer() # train words_with_neg = cls.all_words([mark_negation(a) for a in train_x]) unigram_feats = cls.unigram_word_feats(words_with_neg) # bigram_feats = cls.bigram_collocation_feats(train_x) cls.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats, handle_negation=True) # cls.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats) training_set = cls.apply_features(train_docs, labeled=True) cls.train(MaxentClassifier.train, training_set, max_iter=10, trace=0) # test & evaluate test_set = cls.apply_features(test_docs) for key, value in sorted(cls.evaluate(test_set).items()): print('\t{0}: {1}'.format(key, value)) accumulate.setdefault(key, 0.0) accumulate[key] += value if value is not None else 0.0 print("Averages") for key, value in sorted(accumulate.items()): print('\tAverage {0}: {1}'.format(key, value / folds))
def train_lr(training_set): sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_set]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_set) trainer = logreg.train classifier = sentim_analyzer.train(trainer, training_set) return [sentim_analyzer,classifier]
def demo_movie_reviews(trainer, n_instances=None, output=None): """ Train classifier on all instances of the Movie Reviews dataset. The corpus has been preprocessed using the default sentence tokenizer and WordPunctTokenizer. Features are composed of: - most frequent unigrams :param trainer: `train` method of a classifier. :param n_instances: the number of total reviews that have to be used for training and testing. Reviews will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.corpus import movie_reviews from nltk.sentiment import SentimentAnalyzer if n_instances is not None: n_instances = int(n_instances/2) pos_docs = [(list(movie_reviews.words(pos_id)), 'pos') for pos_id in movie_reviews.fileids('pos')[:n_instances]] neg_docs = [(list(movie_reviews.words(neg_id)), 'neg') for neg_id in movie_reviews.fileids('neg')[:n_instances]] # We separately split positive and negative instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_docs = train_pos_docs+train_neg_docs testing_docs = test_pos_docs+test_neg_docs sentim_analyzer = SentimentAnalyzer() all_words = sentim_analyzer.all_words(training_docs) # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Apply features to obtain a feature-value representation of our datasets training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) classifier = sentim_analyzer.train(trainer, training_set) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='Movie_reviews', Classifier=type(classifier).__name__, Tokenizer='WordPunctTokenizer', Feats=extr, Results=results, Instances=n_instances)
def sentiment_analysis(data): from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) from nltk.sentiment.vader import SentimentIntensityAnalyzer from nltk import tokenize sid = SentimentIntensityAnalyzer() for line in data: ss = sid.polarity_scores(line['line_text']) line['compound'] = ss['compound'] line['neg'] = ss['neg'] line['pos'] = ss['pos'] line['neu'] = ss['neu']
def train_sentiment(): instances = 8000 subj = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:instances]] obj = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:instances]] train_subj = subj train_obj = obj train_set = train_subj + train_obj sentiment = SentimentAnalyzer() all_neg = sentiment.all_words([mark_negation(doc) for doc in train_set]) uni_g = sentiment.unigram_word_feats(all_neg, min_freq=4) sentiment.add_feat_extractor(extract_unigram_feats, unigrams=uni_g) trained_set = sentiment.apply_features(train_set) nb = NaiveBayesClassifier.train classifier = sentiment.train(nb, trained_set) return classifier
def train_model(training): ## Apply sentiment analysis to data to extract new "features" # Initialize sentiment analyzer object sentiment_analyzer = SentimentAnalyzer() # Mark all negative words in training data, using existing list of negative words all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training]) unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4) len(unigram_features) sentiment_analyzer.add_feat_extractor(extract_unigram_feats,unigrams=unigram_features) training_final = sentiment_analyzer.apply_features(training) return [training_final]
def subjectivity_classifier(): from nltk.classify import NaiveBayesClassifier from nltk.corpus import subjectivity from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * """ Initializes and trains categorical subjectivity analyzer """ N_INSTANCES = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:N_INSTANCES] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:N_INSTANCES]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sent_analyzer = SentimentAnalyzer() all_words_neg = sent_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sent_analyzer.unigram_word_feats(all_words_neg, min_freq=4) print(f"unigram feats: {len(unigram_feats)}") sent_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sent_analyzer.apply_features(training_docs) test_set = sent_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sent_analyzer.train(trainer, training_set) for k, v in sorted(sent_analyzer.evaluate(test_set).items()): print(f"{k}: {v}") return sent_analyzer
def get_nltk_NB(NEG_DATA, POS_DATA, num_train): train_neg, test_neg = get_nltk_train_test(NEG_DATA, 'neg', num_train) train_pos, test_pos = get_nltk_train_test(POS_DATA, 'pos', num_train) training_docs = train_neg + train_pos testing_docs = test_neg + test_pos sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) #results = [] for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key,value))
def analyze_sentiment(paragraph): n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) sid = SentimentIntensityAnalyzer() total_sum = 0 count = 0.0 sentences = sent_tokenize(paragraph) for sentence in sentences: total_sum += sid.polarity_scores(sentence)["compound"] count += 1 return total_sum * 10 / count
def run_sa_twitt(train, test): a = SentimentAnalyzer() tr = NaiveBayesClassifier.train all_words = [word for word in a.all_words(train)] # Add simple unigram word features unigram_feats = a.unigram_word_feats(all_words, top_n=1000) a.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = a.bigram_collocation_feats( [tweet[0] for tweet in train_twitt], top_n=100, min_freq=12) a.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) tr_set = a.apply_features(train) test_set = a.apply_features(test) #Training clf = a.train(tr, tr_set) res = a.evaluate(test_set) print(res)
def get_objectivity_analyzer(): n_instances = 100 subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] train_subj_docs = subj_docs train_obj_docs = obj_docs training_docs = train_subj_docs+train_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) trainer = NaiveBayesClassifier.train sentiment_classifier = sentim_analyzer.train(trainer, training_set) return sentim_analyzer
def getTrainer(): if (LOAD == False): dic = getData("/home/cioni/git/sentimentw/inputFolder/positive.csv", "/home/cioni/git/sentimentw/inputFolder/negative.csv", 5000) train = dic snt = SentimentAnalyzer() wrds = snt.all_words(dic, True) feat = snt.unigram_word_feats(wrds, min_freq=3) snt.add_feat_extractor(nltk.sentiment.util.extract_unigram_feats, unigrams=feat) train = snt.apply_features(train) trainer = NaiveBayesClassifier.train classifier = snt.apply_features(train, True) snt.train(trainer, train) clFile = open("classifierSmall2.pickle", "wb+") pickle.dump(snt, clFile) return snt else: load_cls = open("classifier.pickle", "rb") snt = pickle.load(load_cls) return snt
def prepare_review(review): global word_counts global review_counter review_counter += 1 print(review_counter) review = review.lower() review = word_tokenize(review) s_a = SentimentAnalyzer() #this marks words between a negation phrase (ex, 'not'') and the next punctuation with a 'NEG' tag #review = s_a.all_words([mark_negation(review)]) review = [word for word in review if word] # remove empty words # For some reason, word lemmatizers have a difficult time handling the word 'hate' and think # that it stems from the word 'hat'. So, you have to manually review = [ word if word[:4] != 'hate' or word[:5] != 'hatin' else 'hate' for word in review ] pos_tags = pos_tag(review) review = [ WordNetLemmatizer().lemmatize(word[0], get_wordnet_pos(word[1])) for word in pos_tags ] review = s_a.all_words([mark_negation(review)]) review = [ word for word in review if word not in stopwords.words('english') ] # https://stackoverflow.com/questions/5843518/remove-all-special-characters-punctuation-and-spaces-from-string review = [re.sub(r'[^(a-zA-Z\s]', '', word) for word in review] review = [word for word in review if len(word) > 1] for word in review: if word in word_counts: word_counts[word] += 1 else: word_counts[word] = 1 review = ' '.join(review) return review
neg_data['tweet'] = negative_json neg_data['senti'] = 'neg' result = pd.concat([pos_data, neg_data]) result = result.sample(frac=1).reset_index(drop=True) #print result training_tweets, testing_tweets = split_train_test(result) #x_train, x_test, y_train, y_test = train_test_split(result['tweet'], result['senti'], test_size=0.20, random_state=0) sentim_analyzer = SentimentAnalyzer() stopwords = stopwords.words('english') all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] print(all_words) # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets],top_n=100, min_freq=12) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set)
def get_tweets(self, query, count=10): tweets = [] try: #get the tweets from twitter fetched_tweets = self.api.search(q=query, count=count) n_instances = 100 subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [ (sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances] ] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs emotion_analyzer = SentimentAnalyzer() #get the negative words for feature extraction all_radical_slurs = emotion_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = emotion_analyzer.unigram_word_feats( all_radical_slurs, min_freq=4) emotion_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = emotion_analyzer.apply_features(training_docs) test_set = emotion_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = emotion_analyzer.train(trainer, training_set) #test sentences sentences = [ "Ravi is the worst boy in class", "The story is full of mean bitchy characters", "I had a good day!", "The day was okay", "The day was very bad", "Harry potter is a good book", "New Tata electric car is a piece of shit", "It has been a long time since I had a good food", "Stop acting as a asshole" ] sid = SentimentIntensityAnalyzer() for sentence in sentences: print(sentence) ss = sid.polarity_scores(sentence) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() for tweet in fetched_tweets: print(tweet.text) ss = sid.polarity_scores(tweet.text) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() return tweets except tweepy.TweepError as e: print("Error : " + str(e))
class SuicideClassifier(object): def __init__(self, sentiment_only, num_phrases_to_track=20): # neg_phrases = filter_negative_phrases(load_csv_sentences('thoughtsandfeelings.csv')) # pos_phrases = filter_positive_phrases(load_csv_sentences('spiritualforums.csv')) # file_pos = open("pos_phrases.txt", 'w') # file_neg = open("neg_phrases.txt", 'w') # for item in pos_phrases: # print>>file_pos, item # for item in neg_phrases: # print>>file_neg, item self.recent_sentiment_scores = [] neg_file = open("ALL_neg_phrases_filtered.txt", "r") pos_file = open("webtext_phrases_with_lots_of_words.txt", "r") neg_phrases = neg_file.readlines() pos_phrases = pos_file.readlines() neg_docs = [] pos_docs = [] for phrase in neg_phrases: neg_docs.append((phrase.split(), 'suicidal')) for phrase in pos_phrases[:len(neg_phrases)]: pos_docs.append((phrase.split(), 'alright')) print len(neg_docs) print len(pos_docs) # negcutoff = len(neg_docs) * 3 / 4 # poscutoff = len(pos_docs) * 3 / 4 negcutoff = -200 poscutoff = -200 train_pos_docs = pos_docs[:poscutoff] test_pos_docs = pos_docs[poscutoff:] train_neg_docs = neg_docs[:negcutoff] test_neg_docs = neg_docs[negcutoff:] training_docs = train_pos_docs + train_neg_docs testing_docs = test_pos_docs + test_neg_docs self.sentim_analyzer = SentimentAnalyzer() if not sentiment_only: all_words = self.sentim_analyzer.all_words([doc for doc in training_docs]) unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words, min_freq=1) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) self.sentim_analyzer.add_feat_extractor(vader_sentiment_feat) # bigram_feats = self.sentim_analyzer.bigram_collocation_feats(all_words, min_freq=1) # self.sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_feats) training_set = self.sentim_analyzer.apply_features(training_docs) test_set = self.sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train self.classifier = self.sentim_analyzer.train(trainer, training_set) for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) self.classifier.show_most_informative_features(20) def test(self, phrase): return self.sentim_analyzer.classify(phrase.split()) def update_sentiments(self, value): now = datetime.datetime.now() self.recent_sentiment_scores.append([now, value]) self.recent_sentiment_scores = [x for x in self.recent_sentiment_scores if x[ 0] > now - datetime.timedelta(seconds=60)] print sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores) return sum([x[1] for x in self.recent_sentiment_scores]) / len(self.recent_sentiment_scores)
objective_sentences = [ (sent, 'obj') for sent in subjectivity.sents(categories='obj')[:instances] ] # Divied each dataset into 20% test, 80% train train_subjective = subjective_sentences[:80] test_subjective = subjective_sentences[80:] train_objective = objective_sentences[:80] test_objective = objective_sentences[80:] training_docs = train_objective + train_subjective testing_docs = test_objective + test_subjective analyzer = SentimentAnalyzer() negative_words = analyzer.all_words( [mark_negation(doc) for doc in training_docs]) features = analyzer.unigram_word_feats(negative_words, min_freq=4) analyzer.add_feat_extractor(extract_unigram_feats, unigrams=features) training_set = analyzer.apply_features(training_docs) test_set = analyzer.apply_features(testing_docs) ## Training the classifier trainer = NaiveBayesClassifier.train classifier = analyzer.train(trainer, training_set) for key, value in sorted(analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) print("NLP classifier ready")
return math.log(len(bloblist) / (1 + n_containing(word, bloblist))) def tfidf(word, blob, bloblist): return tf(word, blob) * idf(word, bloblist) bloblist = [text for text in df.head(100)['body']] for i, blob in enumerate(bloblist): print("Top words in document {}".format(i + 1)) scores = {word: tfidf(word, blob, bloblist) for word in blob.words} sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True) for word, score in sorted_words[:3]: print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5))) from nltk.sentiment import SentimentAnalyzer sid = SentimentAnalyzer() for sentence in bloblist: print(sentence) ss = sid.polarity_scores(sentence) for k in sorted(ss): print('{0}: {1}, '.format(k, ss[k]), end='') print() sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([doc for doc in bloblist]) tokens = df['tokens'][2] tokens tagged = nltk.pos_tag(tokens)
test_objective = objective[int(0.8 * n) : n] # Now aggregate the training and test sets training = training_subjective + training_objective test = test_subjective + test_objective ## Apply sentiment analysis to data to extract new "features" # Initialize sentiment analyzer object sentiment_analyzer = SentimentAnalyzer() # Mark all negative words in training data, using existing list of negative words all_negative_words = sentiment_analyzer.all_words([mark_negation(data) for data in training]) unigram_features = sentiment_analyzer.unigram_word_feats(all_negative_words, min_freq=4) len(unigram_features) sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_features) training_final = sentiment_analyzer.apply_features(training) test_final = sentiment_analyzer.apply_features(test) ## Traing model and test model = NaiveBayesClassifier.train classifer = sentiment_analyzer.train(model, training_final) for key, value in sorted(sentiment_analyzer.evaluate(test_final).items()): print("{0}: {1}".format(key, value))
import re import nltk from nltk.tokenize import word_tokenize from nltk.classify import NaiveBayesClassifier from nltk.sentiment import SentimentAnalyzer from nltk.sentiment.util import * f = open("training_set.txt",'r') sa = SentimentAnalyzer() trainingset = [] for line in f: senti = line.split(",")[0] content = line[len(senti)+1:] tokens = word_tokenize(content.rstrip()) trainingset.append((tokens,senti)) all_words_neg = sa.all_words([mark_negation(doc) for doc in trainingset]) unigram_feats = sa.unigram_word_feats(all_words_neg,min_freq = 4) sa.add_feat_extractor(extract_unigram_feats,unigrams=unigram_feats) training_set = sa.apply_features(trainingset) for line in sys.stdin: if "username" in line: continue tweetWords=[] tweet= line.split(";")[4] likes = line.split(";")[3] likes = int(likes) if likes==0: num=1 else:
# In[13]: # Obtém a lista de stopwords em Inglês stopwords_all = [] for word in stopwords.words('english'): stopwords_all.append(word) stopwords_all.append(word + '_NEG') # In[14]: # Obtém 10.000 tweets do dataset de treino e retorna todas as palavras que não são stopwords dataset_treino_amostra = dataset_treino.take(10000) # In[15]: all_words_neg = sentiment_analyzer.all_words( [mark_negation(doc) for doc in dataset_treino_amostra]) all_words_neg_nostops = [x for x in all_words_neg if x not in stopwords_all] # In[16]: # Cria um unigram (n-grama: sequência de palavras) e extrai as features / tri-grama wordtrueback deep learning unigram_feats = sentiment_analyzer.unigram_word_feats(all_words_neg_nostops, top_n=200) sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentiment_analyzer.apply_features(dataset_treino_amostra) # In[17]: type(training_set)
(100, 100) subj_docs[0] (['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one', 'thing', 'is', 'a', 'small', 'gem', '.'], 'subj') train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs+train_obj_docs testing_docs = test_subj_docs+test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=1) len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key,value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) from nltk.sentiment.vader import SentimentIntensityAnalyzer sentences = ["VADER is smart, handsome, and funny.", # positive sentence example "VADER is smart, handsome, and funny!", # punctuation emphasis handled correctly (sentiment intensity adjusted)
class SentimentAnalyzerTry(object): def __init__(self): self.n_instances = 1000 self.n_training = int(self.n_instances * 0.8) self.n_testing = int(self.n_instances * 0.2) self.sentim_analyzer = SentimentAnalyzer() def prepare_training_and_test_data(self): """ Each document is represented by a tuple (sentence, label). The sentence is tokenized, so it is represented by a list of strings. E.g: (['smart', 'and', 'alert', ',', 'thirteen', 'conversations', 'about', 'one', 'thing', 'is', 'a', 'small', 'gem', '.'], 'subj') """ subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:self.n_instances]] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:self.n_instances]] # We separately split subjective and objective instances to keep a balanced uniform class distribution in both train and test sets. training_end = self.n_training testing_start = training_end testing_end = testing_start + self.n_testing train_subj_docs = subj_docs[:training_end] test_subj_docs = subj_docs[testing_start:testing_end] train_obj_docs = obj_docs[:training_end] test_obj_docs = obj_docs[testing_start:testing_end] self.training_docs = train_subj_docs + train_obj_docs self.testing_docs = test_subj_docs + test_obj_docs def extract_training_test_features(self): # We use simple unigram word features, handling negation. self.all_words_neg = self.mark_negative_sentence(self.training_docs) self.unigram_feats = self.sentim_analyzer.unigram_word_feats(self.all_words_neg, min_freq=4) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=self.unigram_feats) # We apply features to obtain a feature-value representation of our datasets. self.training_set = self.sentim_analyzer.apply_features(self.training_docs) self.test_set = self.sentim_analyzer.apply_features(self.testing_docs) def mark_negative_sentence(self, docs): all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in docs]) return all_words_neg def train_sentiment_analyzer(self, evaluate=True): self.prepare_training_and_test_data() self.extract_training_test_features() # We can now train our classifier on the training set, and subsequently output the evaluation results self.trainer = NaiveBayesClassifier.train self.classifier = self.sentim_analyzer.train(self.trainer, self.training_set) if evaluate: self.evaluate_classifier() def evaluate_classifier(self): for key, value in sorted(self.sentim_analyzer.evaluate(self.test_set).items()): print('{0}: {1}'.format(key, value)) def classify_text(self, text): self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=self.unigram_feats) return self.classifier.classify(self.sentim_analyzer.extract_features(tokenize.word_tokenize(text)))
def demo_tweets(trainer, n_instances=None, output=None): """ Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.tokenize import TweetTokenizer from nltk.sentiment import SentimentAnalyzer from nltk.corpus import twitter_samples, stopwords # Different customizations for the TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) if n_instances is not None: n_instances = int(n_instances/2) fields = ['id', 'text'] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = 'positive_tweets.csv' json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = 'negative_tweets.csv' json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer) # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs+train_neg_docs testing_tweets = test_pos_docs+test_neg_docs sentim_analyzer = SentimentAnalyzer() # stopwords = stopwords.words('english') # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] all_words = [word for word in sentim_analyzer.all_words(training_tweets)] # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats([tweet[0] for tweet in training_tweets], top_n=100, min_freq=12) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) try: classifier.show_most_informative_features() except AttributeError: print('Your classifier does not provide a show_most_informative_features() method.') results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances)
#call the function on each row in the dataset train_data = train_data_raw.map(lambda line: get_row(line)) #create a SentimentAnalyzer object sentim_analyzer = SentimentAnalyzer() #get list of stopwords (with _NEG) to use as a filter stopwords_all = [] for word in stopwords.words('english'): stopwords_all.append(word) stopwords_all.append(word + '_NEG') #take 10,000 Tweets from this training dataset for this example and get all the words #that are not stop words train_data_sample = train_data.take(10000) all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in train_data_sample]) all_words_neg_nostops = [x for x in all_words_neg if x not in stopwords_all] #create unigram features and extract features unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg_nostops, top_n=200) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(train_data_sample) #train the model trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) #classify test sentences test_sentence1 = [(['this', 'program', 'is', 'bad'], '')] test_sentence2 = [(['tough', 'day', 'at', 'work', 'today'], '')] test_sentence3 = [(['good', 'wonderful', 'amazing', 'awesome'], '')]
text = tokenizer.tokenize(line[5].decode("utf-8")) text = [token for token in text if token != u'\ufffd'] test.append((text, sent)) return test, train # Read in annotated data NUM_TRAIN = 10000 NUM_TEST = 2500 test, train = read_input("train.csv",NUM_TRAIN,NUM_TEST) sentiment_analyzer = SentimentAnalyzer() #all_words = sentiment_analyzer.all_words([mark_negation(doc[0]) for doc in train]) all_words = sentiment_analyzer.all_words([doc[0] for doc in train]) unigrams = sentiment_analyzer.unigram_word_feats(all_words, min_freq=4) # print unigrams sentiment_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigrams) training_set=sentiment_analyzer.apply_features(train) test_set=sentiment_analyzer.apply_features(test) trainer = NaiveBayesClassifier.train classifier = sentiment_analyzer.train(trainer, training_set) save_file(sentiment_analyzer, "sentiment_classifier.pkl") for key,value in sorted(sentiment_analyzer.evaluate(test_set).items()): print("{0}: {1}".format(key,value))
def demo_tweets(trainer, n_instances=None, output=None): """ Train and test Naive Bayes classifier on 10000 tweets, tokenized using TweetTokenizer. Features are composed of: - 1000 most frequent unigrams - 100 top bigrams (using BigramAssocMeasures.pmi) :param trainer: `train` method of a classifier. :param n_instances: the number of total tweets that have to be used for training and testing. Tweets will be equally split between positive and negative. :param output: the output file where results have to be reported. """ from nltk.tokenize import TweetTokenizer from nltk.sentiment import SentimentAnalyzer from nltk.corpus import twitter_samples, stopwords # Different customizations for the TweetTokenizer tokenizer = TweetTokenizer(preserve_case=False) # tokenizer = TweetTokenizer(preserve_case=True, strip_handles=True) # tokenizer = TweetTokenizer(reduce_len=True, strip_handles=True) if n_instances is not None: n_instances = int(n_instances / 2) fields = ['id', 'text'] positive_json = twitter_samples.abspath("positive_tweets.json") positive_csv = 'positive_tweets.csv' json2csv_preprocess(positive_json, positive_csv, fields, limit=n_instances) negative_json = twitter_samples.abspath("negative_tweets.json") negative_csv = 'negative_tweets.csv' json2csv_preprocess(negative_json, negative_csv, fields, limit=n_instances) neg_docs = parse_tweets_set(negative_csv, label='neg', word_tokenizer=tokenizer) pos_docs = parse_tweets_set(positive_csv, label='pos', word_tokenizer=tokenizer) # We separately split subjective and objective instances to keep a balanced # uniform class distribution in both train and test sets. train_pos_docs, test_pos_docs = split_train_test(pos_docs) train_neg_docs, test_neg_docs = split_train_test(neg_docs) training_tweets = train_pos_docs + train_neg_docs testing_tweets = test_pos_docs + test_neg_docs sentim_analyzer = SentimentAnalyzer() # stopwords = stopwords.words('english') # all_words = [word for word in sentim_analyzer.all_words(training_tweets) if word.lower() not in stopwords] all_words = [word for word in sentim_analyzer.all_words(training_tweets)] # Add simple unigram word features unigram_feats = sentim_analyzer.unigram_word_feats(all_words, top_n=1000) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) # Add bigram collocation features bigram_collocs_feats = sentim_analyzer.bigram_collocation_feats( [tweet[0] for tweet in training_tweets], top_n=100, min_freq=12) sentim_analyzer.add_feat_extractor(extract_bigram_feats, bigrams=bigram_collocs_feats) training_set = sentim_analyzer.apply_features(training_tweets) test_set = sentim_analyzer.apply_features(testing_tweets) classifier = sentim_analyzer.train(trainer, training_set) # classifier = sentim_analyzer.train(trainer, training_set, max_iter=4) try: classifier.show_most_informative_features() except AttributeError: print( 'Your classifier does not provide a show_most_informative_features() method.' ) results = sentim_analyzer.evaluate(test_set) if output: extr = [f.__name__ for f in sentim_analyzer.feat_extractors] output_markdown(output, Dataset='labeled_tweets', Classifier=type(classifier).__name__, Tokenizer=tokenizer.__class__.__name__, Feats=extr, Results=results, Instances=n_instances)
subj_docs = [ (sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances] ] obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]] len(subj_docs), len(obj_docs) subj_docs[0] train_subj_docs = subj_docs[:80] test_subj_docs = subj_docs[80:100] train_obj_docs = obj_docs[:80] test_obj_docs = obj_docs[80:100] training_docs = train_subj_docs + train_obj_docs testing_docs = test_subj_docs + test_obj_docs sentim_analyzer = SentimentAnalyzer() all_words_neg = sentim_analyzer.all_words( [mark_negation(doc) for doc in training_docs]) unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) len(unigram_feats) sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = sentim_analyzer.apply_features(training_docs) test_set = sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = sentim_analyzer.train(trainer, training_set) for key, value in sorted(sentim_analyzer.evaluate(test_set).items()): print('{0}: {1}'.format(key, value)) sid = SentimentIntensityAnalyzer() auth = OAuthHandler(ckey, csecret)
training_ratio = 0.80 pos_ratio = int(training_ratio * len(pos_reviews)) neg_ratio = int(training_ratio * len(neg_reviews)) # partitioning the docs into training and testing training_docs = pos_docs[:pos_ratio] + neg_docs[:neg_ratio] testing_docs = pos_docs[pos_ratio:] + neg_docs[neg_ratio:] print "preparing the classifier..." # fetching all the words which will make the most_frequent_features.txt # all_words_neg = sentiment_analyzer.all_words([w for (w,p) in training_docs]) # distributing words based on frequency word_features = nltk.FreqDist( sentiment_analyzer.all_words([w for (w, p) in training_docs])) # then taking the values keys which are most frequent word_features = sorted(word_features.items(), key=operator.itemgetter(1))[-2000:] word_features = [x for (x, y) in word_features] # generating the feature set based on the word_features feature_set = [(document_features(d, word_features), c) for (d, c) in training_docs] print "training..." # selecting and training the NaiveBayesClassifier from nltk packages classifier = NaiveBayesClassifier.train(feature_set)
class QuoteFinder: def __init__(self): self.sentim_analyzer = SentimentAnalyzer() self.genre_dict = read_file("jsons/movie_genre_quote_dict_2.json") context_file = "jsons/final_context.json" movie_file = "jsons/final_movies.json" quote_file = "jsons/final_quotes.json" year_rating_file = "jsons/final_year_rating.json" self.context = read_file(context_file) self.movies = read_file(movie_file) self.quotes = read_file(quote_file) self.year_rating_dict = read_file(year_rating_file) # Reincode to unicode for i in range(len(self.context)): self.context[i] = self.context[i].encode("utf-8").decode("utf-8") self.movies[i] = self.movies[i].encode("utf-8").decode("utf-8") self.quotes[i] = self.quotes[i].encode("utf-8").decode("utf-8") self.context, self.quotes, self.movies = quote_pruner(self.context, self.quotes, self.movies) self.inverted_index = read_file("jsons/f_inverted_index.json") self.idf = read_file("jsons/f_idf.json") # Initialize query tokenizer self.tokenizer = TreebankWordTokenizer() # Compute document norms self.norms = compute_doc_norms(self.inverted_index, self.idf, len(self.context)) word_co_filename = "jsons/word_co.json" word_count_filename = "jsons/word_count_dict.json" pmi_dict_filename = "jsons/pmi_dict.json" # Read files self.word_co = read_file(word_co_filename) self.word_count_dict = read_file(word_count_filename) self.pmi_dict = read_file(pmi_dict_filename) def find_basic_cooccurence(self, word_list): """ Initialize the base word co-occurrance list from our context and quotes. Arguments ========= word_list: the list of words which are in our movie space Returns ======= word_co : a dictionary representing the word_occurrance matrix """ # Get English stop words stop_words = stopwords.words('english') # Merge context and quotes quote_list = self.quotes new_quote_list = [] for q in quote_list: new_q = punct_strip(q) if new_q not in self.context: new_quote_list.append(new_q) context_quotes = self.context + new_quote_list # Find co occurences in context data, based co-occurences in a document word_co = defaultdict(list) word_count_dict = defaultdict(int) for doc in context_quotes: # Double loop to count word co-occurences tkns = self.tokenizer.tokenize(doc) for i in range(len(tkns)): if tkns[i] not in stop_words: word_count_dict[tkns[i]] += 1 for j in range(len(tkns)): if not (j == i) and (tkns[j] in word_list): word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j]) return word_co, word_count_dict def update_cooccurence(self, word_co_old, word_count_dict_old, word_list, docs): """ Updates the word co-occurrance mat and the word count dict with a new set of data. Arguments ========= word_co_old: a word co-occurrance matrix in the form of a dictionary word_count_dict_old: a dictionary that keeps track of the total occurences of a word word_list: the list of words which are in our movie space docs: a list of new docs we're using to update our word co-occurence Returns ======= word_co, word_count_dict : new word co-occurence dict/mat and new word count dictionary """ # Get English stop words stop_words = stopwords.words('english') # Make init dict word_co = defaultdict(list) word_count_dict = defaultdict(int) word_co.update(word_co_old) word_count_dict.update(word_count_dict_old) # Find co occurences in context data, based on document (content) for doc in docs: # Double loop to count word co-occurences tkns = self.tokenizer.tokenize(punct_strip(doc)) for i in range(len(tkns)): if tkns[i] not in stop_words: word_count_dict[tkns[i]] += 1 for j in range(len(tkns)): if not (j == i) and (tkns[j] in word_list): word_co[tkns[i]] = update_word_counts(word_co[tkns[i]], tkns[j]) return word_co, word_count_dict def query_vectorize(self, q, sw=False): # Remove punctuation, lowercase, and encode to utf query = punct_strip(q.lower().encode("utf-8").decode("utf-8")) # Tokenize query and check query stopword cutoff query_words = self.tokenizer.tokenize(query) # Remove stop words if necessary stop_words = stopwords.words('english') # Get English stop words if (sw): new_query = [] for x in query_words: if x not in stop_words: new_query.append(x) query_words = new_query # Make query tfidf query_tfidf = defaultdict(int) for word in query_words: query_tfidf[word] += 1 for word in query_tfidf: if word in self.idf: query_tfidf[word] *= self.idf[word] else: query_tfidf[word] = 0 # Find query norm query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) return query_tfidf, query_norm def pseudo_rocchio(self, query_tfidf, query_norm, relevant, sw=False, a=.3, b=.4, clip=True): """ Arguments: query: a string representing the name of the movie being queried for relevant: a list of int representing the indices of relevant movies for query irrelevant: a list of strings representing the names of irrelevant movies for query a,b: floats, corresponding to the weighting of the original query, relevant queriesrespectively. clip: boolean, whether or not to clip all returned negative values to 0 Returns: q_mod: a dict representing the modified query vector. this vector should have no negatve weights in it! """ relevant_id = [] for s, i in relevant: relevant_id.append(i) if query_norm == 0: return self.find_random() # Calculate alpha*query_vec query_vec = query_tfidf for word in query_vec: query_vec[word] /= query_norm query_vec[word] *= a # Get words in relevant docs relevant_words = [] relevant_context = [] for i in relevant_id: relevant_context.append(self.context[i]) for context in relevant_context: context_tkns = self.tokenizer.tokenize(context) for tkn in context_tkns: if tkn not in relevant_words: relevant_words.append(tkn) # Collect relevant doc vector sums relevant_docs = defaultdict(int) for word in relevant_words: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: if quote_id in relevant_id: relevant_docs[word] += (tf / self.norms[quote_id]) # Calculate beta term beta_term = b * (1.0 / len(relevant)) for key in relevant_docs: relevant_docs[key] *= beta_term # Sum query and relevant q_mod = {k: query_vec.get(k, 0) + relevant_docs.get(k, 0.0) for k in set(query_vec) | set(relevant_docs)} # negative checks for terms, if clip if (clip): for key in q_mod: if q_mod[key] < 0: q_mod[key] = 0 return q_mod else: return q_mod def find_random(self): r = random.randint(0, len(self.quotes)) return [[self.quotes[r], self.movies[r], self.context[r]]] def find_similar(self, query): query_words = self.tokenizer.tokenize(query) query_tfidf = defaultdict(int) for word in query_words: query_tfidf[word] += 1 for word in query_tfidf: if word in self.idf: query_tfidf[word] *= self.idf[word] else: query_tfidf[word] = 0 query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) if query_norm == 0: return self.find_random() scores = [0 for _ in self.quotes] for word in query_tfidf: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += query_tfidf[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * query_norm), i)) top_res_num = 5 results.sort(reverse=True) return [[self.quotes[i], self.movies[i], self.context[i]] for _, i in results[:top_res_num]] def find_final(self, q, rocchio=True, pseudo_rocchio_num=5, sw=False, pmi_num=8, ml=False): """ Arguments: q: a string representing the query rocchio: a boolean representing whether or not to use pseudo relevance feedback with Rocchio psudo_rocchio_num: and int representing the number of top documents to consider relevant for rocchio sw: a boolean on whether or not to include stop words. pmi_num: an int representing the number of items to add to the query to expand it with PMI. Returns: result_quotes: a list of the top x results """ # Vectorize query query_tfidf, query_norm = self.query_vectorize(q, sw) if query_norm == 0: r = random.randint(0, len(self.quotes)) return [[self.quotes[r], self.movies[r], self.context[r]]] # Expand query using PMI # http://www.jofcis.com/publishedpapers/2011_7_1_17_24.pdf pmi_expansion = defaultdict(float) pmi_norm = 1 for word in query_tfidf: # Sum PMI lists if word in self.pmi_dict.keys(): pmi_list = self.pmi_dict[word][:pmi_num] pmi_score_list = [] for word, score in pmi_list: pmi_expansion[word] += score pmi_score_list.append(score) temp_norm = 0 for s in pmi_score_list: temp_norm += math.pow(s, 2) temp_norm = math.sqrt(query_norm) pmi_norm *= temp_norm query_tfidf.update(pmi_expansion) query_norm = query_norm * 2 * pmi_num * pmi_norm # Find query norm query_norm = 0 for word in query_tfidf: query_norm += math.pow(query_tfidf[word], 2) query_norm = math.sqrt(query_norm) # Get scores scores = [0 for _ in self.quotes] for word in query_tfidf: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += query_tfidf[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * query_norm), i)) # Weight scores with year and rating for i in range(len(results)): score = results[i][0] index = results[i][1] year = self.year_rating_dict[self.movies[i]][0] rating = self.year_rating_dict[self.movies[i]][1] results[i] = (year_rating_weight(float(year), float(rating), score), index) # sort results results.sort(reverse=True) if rocchio: # Do pseudo-relevance feedback with Rocchio mod_query = self.pseudo_rocchio(query_tfidf, query_norm, results[:pseudo_rocchio_num], sw) mod_query_norm = 0 for word in mod_query: mod_query_norm += math.pow(mod_query[word], 2) mod_query_norm = math.sqrt(mod_query_norm) # Re-find scores and reweight with year and rating scores = [0 for _ in self.quotes] for word in mod_query: if word in self.inverted_index: for quote_id, tf in self.inverted_index[word]: scores[quote_id] += mod_query[word] * tf * self.idf[word] results = [] for i, s in enumerate(scores): if self.norms[i] != 0: results.append((s / (self.norms[i] * mod_query_norm), i)) d_score_updates = {} if ml is True: d_score_updates = self.find_ml(q) # Weight scores with year and rating for i in range(len(results)): score = results[i][0] index = results[i][1] year = self.year_rating_dict[self.movies[i]][0] rating = self.year_rating_dict[self.movies[i]][1] results[i] = (year_rating_weight(float(year), float(rating), score), index) if ml is True and index in d_score_updates: results[i] = (results[i][0]*0.9 + d_score_updates[index], results[i][1]) # Sort and return results top_res_num = 5 results.sort(reverse=True) used_quotes = [] return_res = [] counter = 0 while len(return_res) <= top_res_num: # Avoid duplicate quotes score, i = results[counter] if self.quotes[i] not in used_quotes: used_quotes.append(self.quotes[i]) return_res.append((score, i)) else: counter += 1 result_quotes = [[self.quotes[i], self.movies[i], self.context[i]] for _, i in return_res[:top_res_num]] return result_quotes def sentiment_analysis(self, td): with open('jsons/all_words_neg.pickle', 'rb') as f: all_words_neg = pickle.load(f) with open('jsons/training_docs.pickle', 'rb') as f: training_docs = pickle.load(f) genres = ['action', 'crime', 'comedy', 'drama'] testing_docs = [(td, genre) for genre in genres] all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) unigram_feats = self.sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4) all_words_neg = self.sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs]) self.sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats) training_set = self.sentim_analyzer.apply_features(training_docs) test_set = self.sentim_analyzer.apply_features(testing_docs) trainer = NaiveBayesClassifier.train classifier = self.sentim_analyzer.train(trainer, training_set) # f = open('my_classifier_test.pickle', 'rb') # classifier = pickle.load(f) # f.close() # classifier = nltk.data.load("my_classifier.pickle") genre_accuracy = [] for key, value in sorted(self.sentim_analyzer.evaluate(test_set).items()): # print('{0}: {1}'.format(key, value)) if key == 'Precision [action]': genre_accuracy.append(('action', value)) if key == 'Precision [comedy]': genre_accuracy.append(('comedy', value)) if key == 'Precision [drama]': genre_accuracy.append(('drama', value)) if key == 'Precision [crime]': genre_accuracy.append(('crime', value)) return genre_accuracy # Takes in a query # Outputs a dictionary of movie indices movies to weights where weight is to be added to all quote scores of movies def find_ml(self, td): f_tokenizer = TreebankWordTokenizer() query_words = f_tokenizer.tokenize(td) genres = self.sentiment_analysis(query_words) weighted_genres = [] genre_weights = {} for x in genres: if x[1] is not None: weighted_genres.append(x[0]) genre_weights[x[0]] = x[1] d_score_updates = {} for movie in self.movies: g = self.genre_dict[movie][0] total_genre_score = 0 if u'Comedy' in g and 'comedy' in weighted_genres: total_genre_score += genre_weights['comedy'] if u'Action' in g and 'action' in weighted_genres: total_genre_score += genre_weights['action'] if u'Crime' in g and 'crime' in weighted_genres: total_genre_score += genre_weights['crime'] if u'Drama' in g and 'drana' in weighted_genres: total_genre_score += genre_weights['drama'] d_score_updates[self.movies.index(movie)] = total_genre_score * .1 return d_score_updates