def train(self,classifier,tweets): # build the bag-of-words list using the 1k most frequent words in # the corpus bag_of_words = {} for tweet in tweets: words = [w.lower() for w,t in pre_process(tweet['MESSAGE']) if w not in stopwords and not w.isdigit()] for word in words: bag_of_words[word] = bag_of_words.get(word,0) + 1 # get the 1000 most frequent words self.bag_of_words = [w for w,freq in sorted(bag_of_words.items(),key=itemgetter(1),reverse=True)[:1000]] # perform the training step for tweet in tweets: classifier.train(self.extract_features(pre_process(tweet['MESSAGE'])),type=tweet['SENTIMENT'])
def output_individual_scores(self,tweets): tweet_texts = [tweet_message for tweet_message,label in tweets] tweet_labels = [label for tweet_message,label in tweets] # write the log fp = codecs.open('individual_scores.tab','w',encoding='utf8') line = 'pos_score_rule\tneg_score_rule\tpos_score_lex\tneg_score_lex\tpos_conf\tneg_conf\tneutral_conf\tclass\tmessage\n' fp.write(line) # 0. Pre-process the text (emoticons, misspellings, tagger) tweet_tokens_list = None tweet_tokens_list = pre_process(tweet_texts) predictions = [] for index,tweet_tokens in enumerate(tweet_tokens_list): line = '' # 1. Rule-based classifier. Look for emoticons basically positive_score,negative_score = self.rules_classifier.classify(tweet_tokens) line += str(positive_score) + '\t' + str(negative_score) + '\t' # 2. Lexicon-based classifier (using url_score obtained from RulesClassifier) positive_score, negative_score = self.lexicon_classifier.classify(tweet_tokens) lexicon_score = positive_score + negative_score line += str(positive_score) + '\t' + str(negative_score) + '\t' # 3. Machine learning based classifier - used the training set to define the best features to classify new instances result = self.ml_classifier.decision_function(tweet_tokens) line += str(result['positive']) + '\t' + str(result['negative']) + '\t' + str(result['neutral']) + '\t' line += tweet_labels[index] + '\t"' + tweet_texts[index].replace('"','') + '"\n' fp.write(line) print('Indivual score saved in the file: individual_scores.tab')
def classify(self,tweet_text): # 0. Pre-process the teets (tokenization, tagger, normalizations) tweet_tokens_list = [] print ('Preprocessing the string') # pre-process the tweets tweet_tokens_list = pre_process([tweet_text]) predictions = [] total_tweets = len(tweet_tokens_list) # iterate over the tweet_tokens for index, tweet_tokens in enumerate(tweet_tokens_list): # 1. Rule-based classifier. Look for emoticons basically positive_score,negative_score = self.rules_classifier.classify(tweet_tokens) # 1. Apply the rules, If any found, classify the tweet here. If none found, continue for the lexicon classifier. if positive_score >= 1 and negative_score == 0: sentiment = ('positive','RB') predictions.append(sentiment) continue elif positive_score == 0 and negative_score <= -1: sentiment = ('negative','RB') predictions.append(sentiment) continue # 2. Lexicon-based classifier positive_score, negative_score = self.lexicon_classifier.classify(tweet_tokens) lexicon_score = positive_score + negative_score # 2. Apply lexicon classifier, # If in the threshold classify the tweet here. If not, continue for the ML classifier if positive_score >= 1 and negative_score == 0: sentiment = ('positive','LB') predictions.append(sentiment) continue elif negative_score <= -2: sentiment = ('negative','LB') predictions.append(sentiment) continue # 3. Machine learning based classifier - used the Train+Dev set sto define the best features to classify new instances result = self.ml_classifier.classify(tweet_tokens) positive_conf = result['positive'] negative_conf = result['negative'] neutral_conf = result['neutral'] if negative_conf >= -0.4: sentiment = ('negative','ML') elif positive_conf > neutral_conf: sentiment = ('positive','ML') else: sentiment = ('neutral','ML') predictions.append(sentiment) return predictions
def classify(self, tweet_text): # 0. Pre-process the text (emoticons, misspellings, tagger) tweet_text = pre_process(tweet_text) # 1. Rule-based classifier. Look for emoticons basically positive_score, negative_score = self.rules_classifier.classify( tweet_text) rules_score = positive_score + negative_score # 1. Apply the rules, If any found, classify the tweet here. If none found, continue for the lexicon classifier. if rules_score != 0: if rules_score > 0: sentiment = 'positive' else: sentiment = 'negative' return sentiment # 2. Lexicon-based classifier positive_score, negative_score = self.lexicon_classifier.classify( tweet_text) lexicon_score = positive_score + negative_score # 2. Apply lexicon classifier, If the lexicon score is # 0 (strictly neutral), >3 (positive with confidence) or # <3 (negative with confidence), classify the tweet here. If not, # continue for the SVM classifier if lexicon_score == 0: sentiment = 'neutral' return sentiment if lexicon_score >= 3: sentiment = 'positive' return sentiment if lexicon_score <= -3: sentiment = 'negative' return sentiment # 3. Machine learning based classifier - used the training set to define the best features to classify new instances scores = self.ml_classifier.classify(tweet_text) positive_conf = scores[0][1] negative_conf = scores[1][1] neutral_conf = scores[2][1] # 3. Apply machine learning classifier, If positive or negative # confidence (probability) is >=0.3, classify with the sentiment. # Otherwise, classify as neutral if positive_conf >= 0.3 and negative_conf < positive_conf: sentiment = 'positive' elif negative_conf >= 0.3: sentiment = 'negative' else: sentiment = 'neutral' return sentiment
def classify(self,tweet_text): # 0. Pre-process the text (emoticons, misspellings, tagger) tweet_text = pre_process(tweet_text) # 1. Rule-based classifier. Look for emoticons basically positive_score,negative_score = self.rules_classifier.classify(tweet_text) rules_score = positive_score + negative_score # 1. Apply the rules, If any found, classify the tweet here. If none found, continue for the lexicon classifier. if rules_score != 0: if rules_score > 0: sentiment = 'positive' else: sentiment = 'negative' return sentiment # 2. Lexicon-based classifier positive_score, negative_score = self.lexicon_classifier.classify(tweet_text) lexicon_score = positive_score + negative_score # 2. Apply lexicon classifier, If the lexicon score is # 0 (strictly neutral), >3 (positive with confidence) or # <3 (negative with confidence), classify the tweet here. If not, # continue for the SVM classifier if lexicon_score == 0: sentiment = 'neutral' return sentiment if lexicon_score >= 3: sentiment = 'positive' return sentiment if lexicon_score <= -3: sentiment = 'negative' return sentiment # 3. Machine learning based classifier - used the training set to define the best features to classify new instances scores = self.ml_classifier.classify(tweet_text) positive_conf = scores[0][1] negative_conf = scores[1][1] neutral_conf = scores[2][1] # 3. Apply machine learning classifier, If positive or negative # confidence (probability) is >=0.3, classify with the sentiment. # Otherwise, classify as neutral if positive_conf >= 0.3 and negative_conf < positive_conf: sentiment = 'positive' elif negative_conf >= 0.3: sentiment = 'negative' else: sentiment = 'neutral' return sentiment
def __init__(self, tweets=[]): # initialize internal variables self.rules_classifier = RulesClassifier() self.lexicon_classifier = LexiconClassifier() self.ml_classifier = None # if the ML model has been generated, load the model from model.pkl if sys.version_info >= (3, 0): if os.path.exists( str(var.model_classifier) + '-model_python3.pkl'): print('Reading the ' + str(var.model_classifier) + ' model from model_python3.pkl') self.ml_classifier = pickle.load( open( str(var.model_classifier) + '-model_python3.pkl', 'rb')) else: if os.path.exists( str(var.model_classifier) + '-model_python2.pkl'): print('Reading the ' + str(var.model_classifier) + ' model from model_python2.pkl') self.ml_classifier = pickle.load( open( str(var.model_classifier) + '-model_python2.pkl', 'rb')) if self.ml_classifier == None: # Preprocess the data and train a new model print('Preprocessing the training data') tweet_messages = [tweet_message for tweet_message, label in tweets] tweet_labels = [label for tweet_message, label in tweets] # preproces all the tweet_messages (Tokenization, POS and normalization) tweet_tokens = pre_process(tweet_messages) # compile a trainset with tweek_tokens and labels (positive, # negative or neutral) trainset = [(tweet_tokens[i], tweet_labels[i]) for i in range(len(tweets))] # initialize the classifier and train it classifier = MachineLearningClassifier(trainset) # dump the model into de pickle python_version = sys.version_info[0] model_name = str(var.model_classifier) + '-model_python' + str( python_version) + '.pkl' print('Saving the trained model at ' + model_name) pickle.dump(classifier, open(model_name, 'wb')) self.ml_classifier = classifier
def train(self, classifier, tweets): # build the bag-of-words list using the 1k most frequent words in # the corpus bag_of_words = {} for tweet in tweets: words = [ w.lower() for w, t in pre_process(tweet['MESSAGE']) if w not in stopwords and not w.isdigit() ] for word in words: bag_of_words[word] = bag_of_words.get(word, 0) + 1 # get the 1000 most frequent words self.bag_of_words = [ w for w, freq in sorted( bag_of_words.items(), key=itemgetter(1), reverse=True)[:1000] ] # perform the training step for tweet in tweets: classifier.train(self.extract_features( pre_process(tweet['MESSAGE'])), type=tweet['SENTIMENT'])
def __init__(self, tweets=[]): # initialize internal variables self.rules_classifier = RulesClassifier() self.lexicon_classifier = LexiconClassifier() self.ml_classifier = None # if the ML model has been generated, load the model from model.pkl if sys.version_info >= (3,0): if os.path.exists('model_python3.pkl'): print ('Reading the model from model_python3.pkl') self.ml_classifier = pickle.load(open('model_python3.pkl','rb')) else: if os.path.exists('model_python2.pkl'): print ('Reading the model from model_python2.pkl') self.ml_classifier = pickle.load(open('model_python2.pkl','rb')) if self.ml_classifier == None: # Preprocess the data and train a new model print ('Preprocessing the training data') tweet_messages = [tweet_message for tweet_message,label in tweets] tweet_labels = [label for tweet_message,label in tweets] # preproces all the tweet_messages (Tokenization, POS and normalization) tweet_tokens = pre_process(tweet_messages) # compile a trainset with tweek_tokens and labels (positive, # negative or neutral) trainset = [(tweet_tokens[i],tweet_labels[i]) for i in range(len(tweets))] # initialize the classifier and train it classifier = MachineLearningClassifier(trainset) # dump the model into de pickle python_version = sys.version_info[0] model_name = 'model_python' + str(python_version) + '.pkl' print ('Saving the trained model at ' + model_name) pickle.dump(classifier, open(model_name, 'wb')) self.ml_classifier = classifier
def classify_batch(self, tweet_texts): # 0. Pre-process the teets (tokenization, tagger, normalizations) tweet_tokens_list = [] if len(tweet_texts) == 0: return tweet_tokens_list print('Preprocessing the test data') # pre-process the tweets tweet_tokens_list = pre_process(tweet_texts) predictions = [] total_tweets = len(tweet_tokens_list) line_save = [] my_index = 0 # iterate over the tweet_tokens for index, tweet_tokens in enumerate(tweet_tokens_list): print('Testing for tweet n. {}/{}'.format(index + 1, total_tweets)) ''' I comment this part to classify all the messages using only the ML method (airtonbjunior) # 1. Rule-based classifier. Look for emoticons basically positive_score,negative_score = self.rules_classifier.classify(tweet_tokens) # 1. Apply the rules, If any found, classify the tweet here. If none found, continue for the lexicon classifier. if positive_score >= 1 and negative_score == 0: sentiment = ('positive','RB') predictions.append(sentiment) continue elif positive_score == 0 and negative_score <= -1: sentiment = ('negative','RB') predictions.append(sentiment) continue # 2. Lexicon-based classifier positive_score, negative_score = self.lexicon_classifier.classify(tweet_tokens) lexicon_score = positive_score + negative_score # 2. Apply lexicon classifier, # If in the threshold classify the tweet here. If not, continue for the ML classifier if positive_score >= 1 and negative_score == 0: sentiment = ('positive','LB') predictions.append(sentiment) continue elif negative_score <= -2: sentiment = ('negative','LB') predictions.append(sentiment) continue ''' # 3. Machine learning based classifier - used the Train+Dev set sto define the best features to classify new instances result = self.ml_classifier.classify(tweet_tokens) #print(str(result)) #input("Press enter to continue...") positive_conf = result['positive'] negative_conf = result['negative'] neutral_conf = result['neutral'] line_save.append( str(positive_conf) + '\t' + str(negative_conf) + '\t' + str(neutral_conf)) #print(str(positive_conf)) #print(str(negative_conf)) #print(str(neutral_conf)) if var.model_classifier == "svm": if negative_conf >= -0.4: sentiment = ('negative', 'ML') elif positive_conf > neutral_conf: sentiment = ('positive', 'ML') else: sentiment = ('neutral', 'ML') elif var.model_classifier == "randomForest": if positive_conf > negative_conf and positive_conf > neutral_conf: sentiment = ('positive', 'ML') elif negative_conf > positive_conf and negative_conf > neutral_conf: sentiment = ('negative', 'ML') elif neutral_conf > positive_conf and neutral_conf > negative_conf: sentiment = ('neutral', 'ML') else: if positive_conf == neutral_conf: sentiment = ('positive', 'ML') elif negative_conf == neutral_conf: sentiment = ('negative', 'ML') else: sentiment = ('neutral', 'ML') elif var.model_classifier == "naive": #sentiment = var.naive_raw_predict[my_index] #print(str(sentiment)) sentiment = "" elif var.model_classifier == "lreg": if positive_conf > negative_conf and positive_conf > neutral_conf: sentiment = ('positive', 'ML') elif negative_conf > positive_conf and negative_conf > neutral_conf: sentiment = ('negative', 'ML') elif neutral_conf > positive_conf and neutral_conf > negative_conf: sentiment = ('neutral', 'ML') elif var.model_classifier == "sgd": if positive_conf > negative_conf and positive_conf > neutral_conf: sentiment = ('positive', 'ML') elif negative_conf > positive_conf and negative_conf > neutral_conf: sentiment = ('negative', 'ML') elif neutral_conf > positive_conf and neutral_conf > negative_conf: sentiment = ('neutral', 'ML') predictions.append(sentiment) my_index += 1 print('Saving the predictions values of ' + str(var.model_classifier) + ' on file ' + str(var.model_classifier) + '_test_results.txt') with open(str(var.model_classifier) + '_test_results.txt', 'a') as fr: ii = 0 for pred in line_save: if (var.model_classifier) == "randomForest": fr.write(pred + '\t' + str(var.rf_predicts[ii])[2:-2] + '\n') elif (var.model_classifier) == "svm": fr.write(pred + '\t' + str(var.svm_predicts[ii][2:-2]) + '\n') elif (var.model_classifier) == "naive": fr.write(pred + '\t' + str(var.naive_predicts[ii][2:-2]) + '\n') elif (var.model_classifier) == "lreg": fr.write(pred + '\t' + str(var.lreg_predicts[ii]) + '\n') elif (var.model_classifier) == "sgd": fr.write(pred + '\t' + str(var.sgd_predicts[ii]) + '\n') ii += 1 return predictions
import os import sys import time import numpy as np import warnings os.environ['TF_CPP_MIN_LOG_LEVEL']='2' warnings.filterwarnings("ignore") import tensorflow as tf sys.path.append(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir, os.pardir)) from Model import * from Chatbot.config import chatBotConfig from PreProcess import pre_process (questionswords2int, answerswords2int,sorted_clean_questions,sorted_clean_answers,answersints2word)=pre_process() # Defining a session tf.reset_default_graph() session = tf.InteractiveSession() # Loading the model inputs inputs, targets, lr, keep_prob = model_inputs() # Setting the sequence length sequence_length = tf.placeholder_with_default(25, None, name = 'sequence_length') # Getting the shape of the inputs tensor input_shape = tf.shape(inputs)