def sentiment(input): try: #using IBM Watson response = naturalLanguageUnderstanding.analyze( text=input, #an object that is part of Watson API features=Features( sentiment=SentimentOptions(document=None, targets=None)) ).get_result() #not sure why dumping and immedietly loading? #is it a formatting reason? parsed_json = json.loads(json.dumps(response, indent=2)) #sentiment: negative, 0, or positive sentiment = parsed_json['sentiment'] document = sentiment['document'] score = document['score'] sentiment_value = float(score) except: #use nltk instead sentiment_value = sid().polarity_scores(input)['compound'] print(sentiment_value) react_with_sound(sentiment_value) return 6
def general_sentiment(inpt): from nltk.sentiment.vader import SentimentIntensityAnalyzer as sid count = [] senti_analyzer = sid() n = len(inpt) for i in range(n): count.append( senti_analyzer.polarity_scores(str( inpt['reviews.text'][i]))['compound']) return (sum(count)) / n
def confirm(this): while (True): speak("Please confirm if this text is okay") txt = this.recognize_text(2) senti_analyzer = sid() score = senti_analyzer.polarity_scores(txt)['compound'] if score > 0: return True elif score < 0: return False else: speak("Sorry I didn't understand that")
def sentiment(input): try: response = naturalLanguageUnderstanding.analyze( text=input, features=Features(sentiment=SentimentOptions( document=None, targets=None))).get_result() parsed_json = json.loads(json.dumps(response, indent=2)) sentiment = parsed_json['sentiment'] document = sentiment['document'] score = document['score'] sentiment_value = float(score) except: sentiment_value = sid().polarity_scores(input)['compound'] print(sentiment_value) react_with_sound(sentiment_value) return 7
def main(): r = sr.Recognizer() ### opens microphone and takes speech from human to convert to text mic = sr.Microphone(0) with mic as source: r.adjust_for_ambient_noise(source) print("\n\n\nYou may begin talking:\n\n\n") audio = r.listen(source) try: ### parsing speech to text spoken = r.recognize_google(audio) print("The following text was said:\n\n" + spoken) ### use basic NLTK sentiment analysis algo Vader to assess speech senti_analyzer = sid().polarity_scores(spoken)['compound'] print ("On a -1 to 1 scale (< 0 is negative, > 0 is positive, = 0 is neutral), the text is: " + str(senti_analyzer)) #TODO: change this section to be more specific to perform more specific analysis #added sound output lead_folder = "/home/pi/r2-voice_recognition/R2FinalSounds/" sounds = {"angry":"R2Angry.wav" , "good":"R2Good.wav" , "happy":"R2Happy.wav" , "neutral":"R2Neutral.wav", "sad":"R2Sad.wav"} if (senti_analyzer < -0.5): play_sound(lead_folder + sounds["angry"]) elif (senti_analyzer < 0): play_sound(lead_folder + sounds["sad"]) elif (senti_analyzer == 0): play_sound(lead_folder + sounds["neutral"]) elif (senti_analyzer > 0.5): play_sound(lead_folder + sounds["happy"]) else: play_sound(lead_folder + sounds["good"]) #TODO: change this section to be more specific to perform more specific analysis except sr.UnknownValueError: print ("What are you saying?")
def main(): methodcnt = False #method dispatcher to connect to functions dispatcher = { 'wave1': wave, 'greet1': greet, 'take_attendance1': take_attendance, 'grab_item1': grab_item } # https://www.reddit.com/r/Python/comments/7udbs1/using_python_dict_to_call_functions_based_on_user/ #test run to see if all r2 functionality working as expected fndictGreetingsKeys = {"wave", "hello", "hi", "hey", "check", "attendance"} fndictGetItemsKeys = {"water", "bottle", "stickers"} #fndictGetGamesKey = {"None", "rock paper scissors"} #in formation of dictionaries, all functions being called fndictGreetings = { "wave": dispatcher['wave1'], "hello": dispatcher['greet1'], "hi": dispatcher['greet1'], "hey": dispatcher['greet1'], "check": dispatcher['take_attendance1'], "attendance": dispatcher['take_attendance1'] } fndictGetItems = { "water": dispatcher['grab_item1'], "bottle": dispatcher['grab_item1'], "stickers": dispatcher['grab_item1'] } #fndictGames = {"game":game("None"), "games":game("None"), "rock paper scissors":game("rock paper scissors")} methodcnt = True ### opens microphone instance that takes speech from human to convert to text #r = sr.Recognizer() #mic = sr.Microphone(2) # tells R2 to wake up while (True): spoken_text = input("enter text here: ") #spoken_text = listen(r, mic) #spoken_text = spoken_text.lower() print("The following startup phrase was said:\n" + spoken_text + "\n") # R2 unsure of input if (spoken_text == ""): print("What?") react_with_sound(no_clue_final) elif ("r2 stop" in spoken_text): write(spoken_text) stop() elif ("hey r2" in spoken_text): print("awake") react_with_sound(wakeup_final) break # R2 waits to hear what user wants - CHANGE PROMPTS HERE while (True): spoken = input("enter text here 2: ") #spoken = simplify_text(listen (r, mic)) #spoken = spoken.lower() print("The following text was said:\n" + spoken + "\n") t1 = threading.Thread(target=write, args=(spoken, )) if ("r2 stop" in spoken): stop() # R2 unsure of input elif (spoken == ""): print("What?") react_with_sound(no_clue_final) #use NLTK to determine part of speech of first word spoken tokens = nltk.word_tokenize(spoken) tagged = nltk.pos_tag(tokens) print(tagged[0]) keywords = liteClient.getKeywords(spoken) #if question desired about Cornell Cup if ("cup" in keywords and "cornell" in keywords or "competition" in keywords): spit_info() #run through commands first elif ("VB" in tagged[0]): if ("high five" in spoken): keywords.append("high five") if "wave" in keywords: wave() break else: for x in range(0, len(keywords)): word = keywords[x] print(word) react_with_sound(confirmation_final) if (word in fndictGreetingsKeys): print(fndictGreetings[word](methodcnt)) print("in fndictGreetingKeys") break elif (word in fndictGetItemsKeys): print(fndictGetItems[word](word, methodcnt)) print("in fndictGetItemsKey") break #tell R2 to open Periscope elif ("periscope" in keywords): open_periscope() """ #tell R2 to play a game elif ("rock paper scissors" in keywords or "game" in keywords): game("rock paper scissors") """ else: #sentiment analysis try: response = naturalLanguageUnderstanding.analyze( text=spoken, features=Features(sentiment=SentimentOptions( document=None, targets=None))).get_result() parsed_json = json.loads(json.dumps(response, indent=2)) sentiment = parsed_json['sentiment'] document = sentiment['document'] score = document['score'] sentiment_value = float(score) except: sentiment_value = sid().polarity_scores(spoken)['compound'] print(sentiment_value) react_with_sound(sentiment_value) #write(spoken) t1.start() t1.join()
from watson_developer_cloud import NaturalLanguageUnderstandingV1 from watson_developer_cloud.natural_language_understanding_v1 \ import Features, EntitiesOptions, KeywordsOptions, SentimentOptions naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1( version='2018-11-16', iam_apikey='ZpNv1kcHqUvvzupBoxNRa-PvNKf-vbLnL6QLjBZTvHmr') with open("C:\PythonProjects\\r2-voice_recognition\CSVSentences\sentences.txt", newline='', encoding='utf-8') as f: reader = csv.reader(f, delimiter='\t') csvData = [] for row in reader: spoken = row[0] sentiment_value = sid().polarity_scores(spoken)['compound'] print(sentiment_value) print(row) print(spoken) row.append(sentiment_value) try: response = naturalLanguageUnderstanding.analyze( text=spoken, features=Features(sentiment=SentimentOptions( document=None, targets=None))).get_result() parsed_json = json.loads(json.dumps(response, indent=2)) sentiment = parsed_json['sentiment'] document = sentiment['document'] score = document['score'] sentiment_value = float(score)
def main(): methodcnt = False # method dispatcher to connect to functions (https://www.reddit.com/r/Python/comments/7udbs1/using_python_dict_to_call_functions_based_on_user/) dispatcher = { 'wave1': wave, 'greet1': greet, 'take_attendance1': take_attendance, 'grab_item1': grab_item } # test run to see if all r2 functionality working as expected fndictGreetingsKeys = {"wave", "hello", "hi", "hey", "check", "attendance"} fndictGetItemsKeys = { "water", "bottle", "stickers", "periscope", "nerf", "guns", "gun" } # NEED TO CHECK SPELLING OF PERISCOPE FOR VOICE RECOGNITION # in formation of dictionaries, all functions being called fndictGreetings = { "wave": dispatcher['wave1'], "hello": dispatcher['greet1'], "hi": dispatcher['greet1'], "hey": dispatcher['greet1'], "check": dispatcher['take_attendance1'], "attendance": dispatcher['take_attendance1'] } fndictGetItems = { "water": dispatcher['grab_item1'], "bottle": dispatcher['grab_item1'], "stickers": dispatcher['grab_item1'], "periscope": dispatcher['grab_item1'], "nerf": dispatcher['grab_item1'], "guns": dispatcher['grab_item1'], "gun": dispatcher['grab_item1'] } methodcnt = True setup_bool = True # opens microphone instance that takes speech from human to convert to text r = sr.Recognizer() mic = sr.Microphone(device_index) r.dynamic_energy_threshold = True # tells R2 to wake up while (True): spoken = input("enter command or sentence: ") print("The following startup phrase was said:\n" + spoken + "\n") close_enough = [ "ar2", "or to blue", "hey arthur", "ai2", "they are two", "ko2", "halo 2", "naruto", "ar jail", "ar-10", "airtel", "rdr2 hello", "q38", "hey r2", "yo are two", "zr2", "you are two", "hey or two", "hey are two" ] # R2 unsure of input if (spoken == ""): print("What?") react_with_sound(no_clue_final) elif ("r2 stop" in spoken): stop() elif (spoken in close_enough): print("awake") react_with_sound(wakeup_final) break # R2 waits to hear what user wants - CHANGE PROMPTS HERE while (True): spoken = input("enter command or sentence: ") print("The following text was said:\n" + spoken + "\n") if ("r2 stop" in spoken): stop() # R2 unsure of input elif (spoken == ""): print("What?") react_with_sound(no_clue_final) #calling object detection elif ("what do you see" in spoken): object_detection() #calling make friends elif ("call me " in spoken): make_friends(spoken) else: # use NLTK to determine part of speech of first word spoken tokens = nltk.word_tokenize(spoken) tagged = nltk.pos_tag(tokens) print(tagged[0]) #obtain keywords from uttered phrase keywords = liteClient.getKeywords(spoken) # run through commands first if ("wave" in spoken or "high five" in spoken or "VB" in tagged[0] or "JJ" in tagged[0]): if ("high five" in spoken): keywords.append( "high five" ) #need to add a case for r2 to give a high five to user else: for x in range(0, len(keywords)): word = keywords[x] print(word) react_with_sound(confirmation_final) if (word in fndictGreetingsKeys): print(fndictGreetings[word](methodcnt)) print("in fndictGreetingKeys") break elif (word in fndictGetItemsKeys): print(fndictGetItems[word](word, methodcnt)) print("in fndictGetItemsKey") break # sentiment analysis else: try: global sentiment_value response = naturalLanguageUnderstanding.analyze( text=spoken, features=Features(sentiment=SentimentOptions( document=None, targets=None))).get_result() parsed_json = json.loads(json.dumps(response, indent=2)) sentiment = parsed_json['sentiment'] document = sentiment['document'] score = document['score'] sentiment_value = float(score) except: sentiment_value = sid().polarity_scores(spoken)['compound'] print(sentiment_value) react_with_sound(sentiment_value) #writing outcome to gui t1 = threading.Thread(target=writeToVoice, args=(spoken, )) t2 = threading.Thread(target=writeToSentiment, args=(sentiment_value, )) t1.start() t2.start() t1.join() t2.join()
pickle.dump(positive_reviews, save_Preview_data) save_Preview_data.close() save_rating_data = open("Rating.pickle","wb") pickle.dump(d_rating, save_rating_data) save_rating_data.close() # ============================================================================= # Filter out stop-words, punctuation and misspelled words and perform sentiment # analysis to find the compound sentiment of the review. Save the sentiments as # a pickle file # ============================================================================= stop_words = set(stopwords.words("english")) word_list = words.words() senti_analyzer = sid() m = len(d_review) for j in range(m): sent = [] word = word_tokenize(d_review[j]) for w in word: if w not in stop_words and w not in string.punctuation \ and w in word_list: sent.append(w) sent = ' '.join(sent) ss = senti_analyzer.polarity_scores(sent) sentiment[j] = ss['compound'] # compound of neg, pos and neutral sentiment print("Sentiment score for review number {0}: {1} is {2}".format((j+1), d_review[j], sentiment[j]))
def main(): r = sr.Recognizer() ### opens microphone and takes speech from human to convert to text mic = sr.Microphone(2) ### wake up call while (True): spoken_text = listen(r, mic) print("The following startup phrase was said:\n" + spoken_text + "\n") if ("wakeupdroid" in simplify_text(spoken_text) or "wake-updroid" in simplify_text(spoken_text)): print ("awake") # file_object_correct = open("data-yes.csv", "a") # file_object_wrong = open("data-no.csv", "a") # file_object_r2 = open("r2sayings.txt", "a") react_with_sound(wakeup_final) break while (True): spoken = listen (r, mic) print("The following text was said:\n" + spoken + "\n") # R2 unsure of input if (spoken == ""): print ("What?") react_with_sound(no_clue_final) # shut down R2 elif ("sleepdroid" in simplify_text(spoken)): print ("sleeping") # file_object_correct.close() # file_object_wrong.close() react_with_sound(sleep_final) break # have R2 take attendance elif ("takeattendancedroid" in simplify_text(spoken)): print ("checking in - F.R.") react_with_sound(attendance_final) # moving R2 elif (("move" in simplify_text(spoken) or "turn" in simplify_text(spoken)) and "droid" in simplify_text(spoken)): spoken = simplify_text(spoken) global data if (spoken.lower() == "moveforwarddroid" or spoken.lower() == "moveforwardsdroid"): data = "1" #data["r2"] = "fwd" elif (spoken.lower() == "movebackwarddroid" or spoken.lower() == "movebackwardsdroid"): data = "2" #data["r2"] = "rvr" elif (spoken.lower() == "moveleftdroid" or spoken.lower() == "turnleftdroid"): data = "3" #data["r2"] = "left" elif (spoken.lower() == "moverightdroid" or spoken.lower() == "turnrightdroid"): data = "4" #data["r2"] = "right" print(data) react_with_sound(move_final) #time.sleep(0.1) #data["r2"] = "-1" #sendSocket.sendall(json.dumps(data).encode()) # R2 analyzing speech elif (spoken[:5].lower() == "droid"): #phrase = spoken[6:] ### use basic NLTK sentiment analysis algo Vader to assess speech phrase = spoken sentiment_value = sid().polarity_scores(phrase)['compound'] print ("On a -1 to 1 scale (< 0 is negative, > 0 is positive, = 0 is neutral), the text is: " + str(sentiment_value)) #TODO: change this section to be more specific to perform more specific analysis #write to file #print ("good? y or n") #answer = input() #if (answer == "y"): # file_object_correct.write (phrase + "," + str(sentiment_value) + "\n") #elif (answer == "n"): # file_object_wrong.write (phrase + "," + str(sentiment_value) + "\n") ### sound output react_with_sound(sentiment_value)