def sentiment_analysis(tweet_sample, aggregate=True, mode='trinary'): senti = PySentiStr() senti.setSentiStrengthPath(sentistrength_jar_full_path) senti.setSentiStrengthLanguageFolderPath(sentistrength_lan_full_path_en) sentiment_dict = {} if type(tweet_sample) is not dict: return 'No matches' else: for topic in tweet_sample.keys(): # Scores: scale, dual, binary and trinary sentiment = senti.getSentiment(tweet_sample[topic], score=mode) if (aggregate == True): sentisum = 0 summary = {} for sent in sentiment: sentisum += sent[ 2] # The trinary score returns a tuple, unless the others summary['value'] = sentisum if sentisum > 0: summary['sentiment'] = 'positive' else: summary['sentiment'] = 'negative' sentiment = summary sentiment_dict[topic] = sentiment return sentiment_dict
def main(): #==========================================================================# # criando o objeto do sentistrength e setando os caminhos dos arquivos # auxiliares #==========================================================================# obj_sentistrength = PySentiStr() obj_sentistrength.setSentiStrengthPath(SENTISTRENGTH_JAR_PATH) obj_sentistrength.setSentiStrengthLanguageFolderPath( SENTISTRENGTH_DATA_PATH) #===========================================================================# # realizando a leitura do arquivo frases.txt e colocando as linhas # na lista file_lines (file.readlines() retorna essa lista) #===========================================================================# with open('frases.txt', 'r') as file: file_lines = file.readlines() #===========================================================================# # iterando sobre a lista file_lines e realizando a análise de sentimentos # dos textos obtendo como resultados 3 scores (dual, trinary e scale) # similares e proporcionais para um mesmo texto de entrada #===========================================================================# for line in file_lines: text = line.strip() # para removermos o \n ao final da linha result_scale = obj_sentistrength.getSentiment(text, score='scale') result_dual = obj_sentistrength.getSentiment(text, score='dual') result_trinary = obj_sentistrength.getSentiment(text, score='trinary') print( 'text: {0}\nresult_scale: {1}\nresult_dual: {2}\nresult_trinary: {3}\n' .format(text, str(result_scale), str(result_dual), str(result_trinary)))
def tweet_word_sentiment(data): ''' input: whole corpus output: 1 dicts for tweet_word_sentiment, keys: tweet_id, values: dict (keys={"max","min","distance"}) max--highest sentiment score among all words min--lowest sentiment score among all words distance-- difference between highest score and lowest score ''' feature_dict = {} # try: senti = PySentiStr() senti.setSentiStrengthPath('./SentiStrength.jar') senti.setSentiStrengthLanguageFolderPath('./SentiStrengthData/') for tweet in data: tokenized = tweet.tweet_words() new_words = [word for word in tokenized if word.isalnum()] if not new_words: feature_dict[tweet.tweet_id] = {"max": 0, "min": 0, "distance": 0} continue result = senti.getSentiment(new_words) max_, min_ = result[0], result[0] for score in result: max_ = max(max_, score) min_ = min(min_, score) #feature_dict[tweet.tweet_id]={"max":max_,"min":min_,"distance":max_-min_} feature_dict[tweet.tweet_id] = [max_, min_, max_ - min_] return feature_dict
def sentistr(x): senti = PySentiStr() senti.setSentiStrengthPath("SentiStrength.jar") senti.setSentiStrengthLanguageFolderPath("SentStrength_Data") result = senti.getSentiment( x, score='trinary') #positive rating, negative rating and neutral rating return result
def __initialize_senti(self): self.senti = PySentiStr() self.senti.setSentiStrengthPath( str(Path.cwd() / 'lib' / 'SentiStrengthCom.jar')) self.senti.setSentiStrengthLanguageFolderPath(str(Path.cwd() / 'lang')) # simple test to make sure senti works test = self.senti.getSentiment(['You are beautiful'], 'dual') assert type(test) is list assert type(test[0]) is tuple
def get_sentistrength(df): senti = PySentiStr() senti.setSentiStrengthPath('~/softwares/SentiStrengthCom.jar') senti.setSentiStrengthLanguageFolderPath( '~/softwares/SentStrength_Data_Sept2011/') df["text"] = [t if t != "" else " " for t in df['text']] result = senti.getSentiment(df["text"], score='trinary') df["sentistrength_pos"] = [r[0] for r in result] df["sentistrength_neg"] = [r[1] for r in result] df["sentistrength_neutral"] = [r[2] for r in result] return df
def main(): #mudar entrada with open( './Comentarios_csv/Test/OPOVOOnline sobre escolha do novo reitor UFC.csv' ) as csv_file: csv_dict_reader = csv.DictReader(csv_file) senti = PySentiStr() senti.setSentiStrengthPath( "/home/caio/Documentos/Projeto Analise Comentarios Facebook/SentiStrength.jar" ) senti.setSentiStrengthLanguageFolderPath( "/home/caio/Documentos/Projeto Analise Comentarios Facebook/SentStrength_Data/portuguese/" ) #mudar saída with open('./Comentarios_csv/Test/teste.csv', 'w') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow( ["Comentário", "notaPositiva", "notaNegativa", "Sentimento"]) for row in csv_dict_reader: #colocar nome da coluna que tem o comentario if row["message"]: sentence = row["message"] #sentence = RemoveAccent(sentence) sentence = Tokenize(sentence) if sentence: sentence = RemoveStopWords(sentence) if sentence: sentence = Stemming(sentence) sentence = " ".join(sentence) #sentistrength result = senti.getSentiment(sentence, score='binary') if result[0][0] + result[0][1] <= -1: sentiment = 'negativo' elif result[0][0] + result[0][1] >= 1: sentiment = 'positivo' else: sentiment = 'neutro' spamwriter.writerow([ row["message"], result[0][0], result[0][1], sentiment ]) print("finish!")
if score > 0: return 'positive' elif score < 0: return 'negative' else: return 'neutral' afinn = Afinn() def afinn_polarity(text): score = afinn.score(text) if score > 0: return 'positive' elif score < 0: return 'negative' else: return 'neutral' senti = PySentiStr() senti.setSentiStrengthPath(senti_strength_jar_filepath) senti.setSentiStrengthLanguageFolderPath(senti_strength_data_dirname) def sentistrength_polarity(text): score = senti.getSentiment([text])[0] if score > 0: return 'positive' elif score < 0: return 'negative' else: return 'neutral' mpqa_df = pd.read_csv(mpqa_filepath) def mpqa_polarity(text):
def __aggregate(_posts, _media, _comments, session, logger): _entries = list() # Initialize sentistrength variable senti = PySentiStr() setup_sentistrength_path(senti) for p in _posts: try: """ Id, name of a post """ entry = [p.id, p.name] """ Number of version of a post """ version = __extract_version(p.discussion_url) entry = entry + [version] """ Number of tags for a product """ tags_number = session.query(func.count( Topic.name)).filter(Topic.post_id == p.id).scalar() entry = entry + [tags_number] entry = entry + [p.featured, p.votes_count, p.day, p.created_at] """ Time features """ launch_day = get_day_name_from_date(p.created_at.year, p.created_at.month, p.created_at.day) best_launch_time = is_best_posted_time(p.created_at.hour, p.created_at.minute, p.created_at.second) best_launch_day = is_best_launched_day(p.created_at.hour, p.created_at.minute, p.created_at.second, launch_day) max_follower = session.query(func.max( User.followers_count)).scalar() maker_id = session.query( Apps.maker_id).filter(Apps.post_id == p.id).one()[0] maker = session.query( User.id, User.name, User.twitter_username, User.website_url, User.followers_count).filter(User.id == maker_id).one() weekend = is_weekend(maker.followers_count, max_follower, launch_day) entry = entry + [ launch_day, best_launch_time, best_launch_day, weekend ] """ Presentation features """ entry = entry + [p.description] if p.description: """ Extraction of maker sentiment based on the description of his post """ maker_description_sentiment = __extract_sentiment( senti, p.description) entry = entry + [ maker_description_sentiment[0][0], maker_description_sentiment[0][1], '', '', '' ] # Text length entry = entry + [len(p.description)] # Sentence length sentence = get_sentence(p.description) sentence_length_sum = 0 for i in range(0, len(sentence)): sentence_length_sum = sentence_length_sum + len( sentence[i]) try: sentence_length_average = sentence_length_sum / len( sentence) except ZeroDivisionError: sentence_length_average = 0.0 entry = entry + [round(sentence_length_average)] # Bullet points / Explicit features bullet_points_explicit_features = __extract_bullet_points_explicit_features( sentence) entry = entry + [bullet_points_explicit_features] # Emoji in description emoji_description = __extract_emoji(p.description) entry = entry + [emoji_description] else: entry = entry + [1, -1, '', '', '', 0, 0, 'No', 'No'] entry = entry + [p.tagline] if p.tagline: # Tagline length entry = entry + [len(p.tagline)] # Emoji in tagline emoji_tagline = __extract_emoji(p.tagline) entry = entry + [emoji_tagline] else: entry = entry + [0, 'No'] # Video, Tweetable images, Gif and Gif's number for a post video = [] tweetable_images = [] gif = [] index_media = 0 while index_media < len(_media): # check if the current post_id is equal to the post_id of the current media if p.id == _media[index_media][0]: # check if the media type is 'video' if _media[index_media][1] == 'video': # append to the list the link of the video video = video + [_media[index_media][2]] # calculate the image size passing its width and its height roughly, ratio = calculate_aspect_ratio( _media[index_media][3], _media[index_media][4]) # check if the image is a tweetable image if (roughly == 2) and (ratio == 1): # append to the list the image url tweetable_images = tweetable_images + [ _media[index_media][5] ] # check if the image is a gif image passing its url found = is_gif(_media[index_media][5]) if found: # append to the list the image url gif = gif + [_media[index_media][5]] index_media = index_media + 1 if video: entry = entry + ['Yes'] else: entry = entry + ['No'] if tweetable_images: entry = entry + ['Yes'] else: entry = entry + ['No'] if gif: entry = entry + [gif, len(gif)] else: entry = entry + ['', len(gif)] # Offers, Promo/Discount Codes, Questions, Maker_inside, Hunter_inside in comment body for a post offers = [] questions = [] promo_codes = [] # maker_follows_up_on_comments = 0 hunter_follows_up_on_comments = 0 maker_comments = [] others_comments = [] comm_in_thread = [] hunter_id = session.query( Hunts.hunter_id).filter(Hunts.post_id == p.id).one()[0] index_comment = 0 while index_comment < len(_comments): # check if the current post_id is equal to the post_id of the current comment if p.id == _comments[index_comment][3]: # extract offers passing the comment body offer = __extract_offers(_comments[index_comment][1]) if offer: offers = offers + offer # extract questions passing the comment body question = __extract_questions(_comments[index_comment][1]) if question: questions = questions + question # extract promo_codes passing the comment body promo_code = __extract_promo_codes( _comments[index_comment][1]) if promo_code: promo_codes = promo_codes + promo_code # # check if the maker follows up on the current comment # if _comments[index_comment][4] == maker_id: # maker_follows_up_on_comments = 1 # put comments in comm list (comment_id, comment_body, created_at, user_id) comm_in_thread.append([ _comments[index_comment][0], _comments[index_comment][1], _comments[index_comment][2], _comments[index_comment][4] ]) # check if the hunter follows up on the current comment if _comments[index_comment][4] == hunter_id: hunter_follows_up_on_comments = 1 """ Extraction of maker sentiment based on his post comments written the day of launch """ if _comments[index_comment][4] == maker_id: # date of maker's comment written the day the post was launched comment_date = _comments[index_comment][2] # cut the comments written days after the post was launched if (p.created_at.year == comment_date.year) and ( p.created_at.month == comment_date.month) and ( p.created_at.day == comment_date.day): if not maker_comments: maker_comments = [_comments[index_comment][1]] else: maker_comments = maker_comments + [ _comments[index_comment][1] ] """ Extraction of others users sentiment based on their post comments written the day of launch """ if (_comments[index_comment][4] != maker_id) and (maker_id != hunter_id): # date of others comment written the day the post was launched comment_date = _comments[index_comment][2] # cut the comments written days after the post was launched if (p.created_at.year == comment_date.year) and ( p.created_at.month == comment_date.month) and ( p.created_at.day == comment_date.day): if not others_comments: others_comments = [_comments[index_comment][1]] else: others_comments = others_comments + [ _comments[index_comment][1] ] index_comment = index_comment + 1 if offers: entry = entry + ['Yes'] else: entry = entry + ['No'] if promo_codes: entry = entry + ['Yes'] else: entry = entry + ['No'] if questions: entry = entry + ['Yes'] else: entry = entry + ['No'] # check if the maker writes the first comment in the thread maker_started_comment_thread = 0 if comm_in_thread: if (p.created_at.year == comm_in_thread[0][2].year) and ( p.created_at.month == comm_in_thread[0][2].month) and ( p.created_at.day == comm_in_thread[0][2].day): if comm_in_thread[0][3] == maker_id: maker_started_comment_thread = 1 # calculate maker comment ratio ((number of maker comments / number of all comments)*100) number_maker_comments = 0 number_others_comments = 0 if comm_in_thread: for i in range(0, len(comm_in_thread)): if (p.created_at.year == comm_in_thread[i][2].year) and ( p.created_at.month == comm_in_thread[i][2].month ) and (p.created_at.day == comm_in_thread[i][2].day): if comm_in_thread[i][3] == maker_id: number_maker_comments = number_maker_comments + 1 else: number_others_comments = number_others_comments + 1 thread_length = number_maker_comments + number_others_comments try: if maker_started_comment_thread == 1: maker_comment_ratio = (number_maker_comments / thread_length) * 100 else: maker_comment_ratio = 0.0 except ZeroDivisionError: maker_comment_ratio = 0.00 # Hunter reputation hunter = session.query( User.id, User.name, User.twitter_username, User.website_url, User.followers_count, User.apps_made_count).filter(User.id == hunter_id).one() entry = entry + [ hunter.id, hunter.name, hunter.twitter_username, hunter.website_url, hunter.followers_count, hunter.apps_made_count, hunter_follows_up_on_comments ] # Maker reputation entry = entry + [ maker.id, maker.name, maker.twitter_username, maker.website_url, maker.followers_count, maker_started_comment_thread, round(maker_comment_ratio, 2), thread_length ] # check if the hunter is also the maker and append the variable hunter_is_maker to the list entry hunter_is_maker = 0 if hunter_id == maker_id: hunter_is_maker = 1 entry = entry + [hunter_is_maker] # Append to the list the maker comment sentiment if maker_comments: comment = '\n'.join(maker_comments) sentiment = __extract_sentiment(senti, comment) else: comment = '' sentiment = [[1, -1]] entry = entry + [ comment, sentiment[0][0], sentiment[0][1], '', '', '' ] # Append to the list the others comment sentiment if others_comments: comment = '\n'.join(others_comments) sentiment = __extract_sentiment(senti, comment) else: comment = '' sentiment = [[1, -1]] entry = entry + [ comment, sentiment[0][0], sentiment[0][1], '', '', '' ] _entries.append(entry) except NoResultFound as ex: logger.error(str(ex)) continue except MultipleResultsFound as ex: logger.error(str(ex)) continue return _entries
def main(): with open( './OPOVOOnline sobre escolha do novo reitor UFC.csv') as csv_file: csv_dict_reader = csv.DictReader(csv_file) senti = PySentiStr() senti.setSentiStrengthPath( "/home/caio/Documentos/Projeto Analise Comentarios Facebook/SentiStrength.jar" ) senti.setSentiStrengthLanguageFolderPath( "/home/caio/Documentos/Projeto Analise Comentarios Facebook/SentStrength_Data/portuguese/" ) prev_message = "" with open( '/home/caio/Documentos/Projeto Analise Comentarios Facebook/Frases_Neutras.csv', 'w') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow(["Frase", "notaPositiva", "notaNegativa"]) #sentistrength for row in csv_dict_reader: if prev_message != row["message"] and row["message"]: sentence = row["message"] #sentence = RemoveAccent(sentence) sentence = Tokenize(sentence) if sentence: sentence = RemoveStopWords(sentence) if sentence: sentence = Stemming(sentence) sentence = " ".join(sentence) result = senti.getSentiment(sentence, score='binary') if result[0][0] + result[0][1] == 0: #salvar frase tokenizada #spamwriter.writerow([sentence, result[0][0], result[0][1]]) #salvar frase inteira spamwriter.writerow([ row["message"], result[0][0], result[0][1] ]) #publicacao com resposta de comentários if row["object_link.connections.comments.message"] != 'null' and row[ "object_link.connections.comments.message"]: sentence = row["object_link.connections.comments.message"] #sentence = RemoveAccent(sentence) sentence = Tokenize(sentence) if sentence: sentence = RemoveStopWords(sentence) if sentence: sentence = Stemming(sentence) sentence = " ".join(sentence) result = senti.getSentiment(sentence, score='binary') if result[0][0] + result[0][1] == 0: #mostrar tokenizada #spamwriter.writerow([sentence, result[0][0], result[0][1]]) #mostrar frase inteira spamwriter.writerow([ row["object_link.connections.comments.message"], result[0][0], result[0][1] ]) prev_message = row["message"] print("finish!")
def pre_process_and_predict(sentence): wordnet_lemmatizer = WordNetLemmatizer() # # Replacing double quotes with single, within a string sentence = sentence.replace("\"", "\'") # # Removing unnecessary special characters, keeping only , ! ? sentence = re.sub(r"[^!?,a-zA-Z0-9\ ]+", '', sentence) # # Lemmatization on verbs sentence = ' '.join([ wordnet_lemmatizer.lemmatize(word, pos='v') for word in word_tokenize(sentence) ]) sn = SenticNet() senti = PySentiStr() senti.setSentiStrengthPath(CODE_PATH + '/sentistrength/SentiStrength.jar') senti.setSentiStrengthLanguageFolderPath( CODE_PATH + '/sentistrength/SentStrength_Data/') sentiment_score = [] for sen in sent_tokenize(sentence): senti_pos, senti_neg = senti.getSentiment(sen, score='dual')[0] senti_pos -= 1 if senti_neg == -1: senti_neg = 0 sum_pos_score = 0 sum_neg_score = 0 for word in word_tokenize(sen): try: w_score = float(sn.polarity_intense(word)) * 5 except KeyError: w_score = 0 if w_score > 0: sum_pos_score = sum_pos_score + w_score elif w_score < 0: sum_neg_score = sum_neg_score + w_score sum_pos_score = (sum_pos_score + senti_pos) / 2 sum_neg_score = (sum_neg_score + senti_neg) / 2 sentiment_score.append((sum_pos_score, sum_neg_score)) additional_features_s = [] additional_features_ns = [] contra = [] pos_low = [] pos_medium = [] pos_high = [] neg_low = [] neg_medium = [] neg_high = [] for sum_pos_score, sum_neg_score in sentiment_score: contra.append(int(sum_pos_score > 0 and abs(sum_neg_score) > 0)) pos_low.append(int(sum_pos_score < 0)) pos_medium.append(int(sum_pos_score >= 0 and sum_pos_score <= 1)) pos_high.append(int(sum_pos_score >= 2)) neg_low.append(int(sum_neg_score < 0)) neg_medium.append(int(sum_neg_score >= 0 and sum_neg_score <= 1)) neg_high.append(int(sum_neg_score >= 2)) additional_features_s = additional_features_s + [ max(pos_medium), max(pos_high), max(neg_medium), max(neg_high) ] additional_features_ns = additional_features_ns + [ max(pos_low), max(neg_low) ] tweet = sentence punctuation_count = SequencePunctuationCount(tweet) character_count = SequenceCharacterCount(tweet) capitalized_count = CapitalizedCount(tweet) exclamation_count = ExclamationCount(tweet) # emoji_count = EmojiCount(tweet) f_count = [ punctuation_count, character_count, capitalized_count, exclamation_count ] for count in f_count: f_low = int(count == 0) f_medium = int(count >= 1 and count <= 3) f_high = int(count >= 4) additional_features_s = additional_features_s + [f_medium, f_high] additional_features_ns = additional_features_ns + [f_low] X = [sentence] in_file = open(os.path.join(PICKLES_PATH, "vocab.pickle"), "rb") vocab = pickle.load(in_file) in_file.close() in_file = open(os.path.join(PICKLES_PATH, "model.pickle"), "rb") model = pickle.load(in_file) in_file.close() vectorizer = TfidfVectorizer(vocabulary=vocab) X = vectorizer.fit_transform(X) ans = int(sum(model.predict(X))) print('Sentence : ', sentence) print('Sarcastic features : ', additional_features_s) print('Not Sarcastic features : ', additional_features_ns) print('Contradict : ', max(contra)) print('Model Predict : ', ans) print( 'My obs : ', int((sum(additional_features_s) >= sum(additional_features_ns)) and max(contra) == 1)) print('Final Prd : ', end='') if ans == 1 or ((sum(additional_features_s) >= sum(additional_features_ns)) and max(contra) == 1): return True else: return False
import xml.etree.ElementTree as xml from sentistrength import PySentiStr #inicializando sentistrength sstrength = PySentiStr() sstrength.setSentiStrengthPath("SentiStrength.jar") sstrength.setSentiStrengthLanguageFolderPath("SentiStrength_Data") # Dada uma lista com as respostas, retorna uma lista com os valores de sentimento # gerados pelo SentiStr def analise_sentistr(respostas): return sstrength.getSentiment(respostas)