def mic(): record_audio() r = sr.Recognizer() audio = "C:\\Users\\Omkar\\Desktop\\file.wav" with sr.AudioFile(audio) as source: txt_audio = r.record(source) t = (str(r.recognize_google(txt_audio))) txt.insert(END, t)
def convert(): record_audio() r = sr.Recognizer() audio = "C:\\Users\\Omkar\\Desktop\\file.wav" with sr.AudioFile(audio) as source: txt_audio = r.record(source) t = (str(r.recognize_google(txt_audio))) a = translator.translate(t, src='en', dest='hi') txt.insert(END, a.text)
def ask_playback(urls, q): if main.run == True: url = str(urls) main.voice_data = recorder.record_audio("Let me know if you like me to play any video from the search results?",q) #print("Voice data is: " + str(main.voice_data)) if main.person_says(["first", "1", "one", "1st", "start", "yes", "sure", "yep", "yeah", "any", "okay", "ok"]): watchvideo(url, 0, q) elif main.person_says(["second", "2", "two", "2nd"]): watchvideo(url, 1, q) elif main.person_says(["third", "3", "three", "3rd"]): watchvideo(url, 2, q) elif main.person_says(["fourth", "4", "four", "4th"]): watchvideo(url, 3, q) elif main.person_says(["fifth", "5", "five", "5th"]): watchvideo(url, 4, q) elif main.person_says(["sixth", "6", "six", "6th"]): watchvideo(url, 5, q) elif main.person_says(["seventh", "7", "seven", "7th"]): watchvideo(url, 6, q) elif main.person_says(["eighth", "8", "eight", "8th"]): watchvideo(url, 7, q) elif main.person_says(["ninth", "9", "nine", "9th"]): watchvideo(url, 8, q) elif main.person_says(["tenth", "10", "ten", "10th"]): watchvideo(url, 9, q) elif main.person_says(["eleventh", "11", "eleven", "11th"]): watchvideo(url, 10, q) elif main.person_says(["No", "nope", "sorry", "Nah", "quit"]): q.put(main.asis_obj.name + ": " + "Okay!" + "\n") speaker.speech_output("Okay!") else: q.put(main.asis_obj.name + ": " + "Sorry, I could not grasp what you meant." + "\n") speaker.speech_output("Sorry, I could not grasp what you meant.") else: pass
def recognize_speech(audiofile, duration): record_audio(duration, audiofile) audio = read_audio(audiofile) headers = { 'authorization': 'Bearer ' + wit_access_token, 'Content-Type': 'audio/wav' } resp = requests.post(API_ENDPOINT + API_FUNCTION_SPEECH, headers=headers, data=audio) data = json.loads(resp.content) print(data) text = '' if 'text' in data: text = data['text'] # return the text from data response return text
def definitions(q): if main.run == True or main.crun == True: import recorder if main.person_says(["what is"]) and 'definition' not in main.voice_data and 'definitions' not in main.voice_data: search_term = main.voice_data.split("what is")[-1] search_term = search_term.replace('is', ' ') wiki_search(search_term, q) elif main.person_says(["definition of", "definitions of"]): search_term = main.voice_data.split("of")[-1] wiki_search(search_term, q) elif main.person_says(["definition for", "definitions for"]): search_term = main.voice_data.split("for")[-1] wiki_search(search_term, q) else: if main.run == True: definition=recorder.record_audio("What do you need the definitions of? Please tell me the word again.") search_term = main.voice_data wiki_search(search_term, q) else: pass else: pass
def core(q): import main if main.run == True: import speaker import recorder moves = ["rock", "paper", "scissor"] pmoves = ["rock", "paper", "scissor", "caesar", "peppa"] main.voice_data = recorder.record_audio( "Choose among rock, paper or scissor:", q) if main.voice_data not in pmoves: q.put(main.asis_obj.name + ": " + "Sorry I did not understand. Would you like to try again?" + "\n") speaker.speech_output( "Sorry I did not understand. Would you like to try again?") error(q) else: import random cmove = random.choice(moves) pmove = main.voice_data pmove = name_check(pmove) q.put(main.asis_obj.name + ": " + "You chose " + pmove + "\n") speaker.speech_output("You chose " + pmove) q.put(main.asis_obj.name + ": " + "I chose " + cmove + "\n") speaker.speech_output("I chose " + cmove) if pmove == cmove or (pmove == "peppa" and cmove == "paper") or ( pmove == "caesar" and cmove == "scissor"): q.put( main.asis_obj.name + ": " + "The match is draw. Haha. We both are out of luck today." + "\n") speaker.speech_output( "The match is draw. Haha. We both are out of luck today.") elif pmove == "rock" and cmove == "scissor": q.put(main.asis_obj.name + ": " + "Scissor crushes rock. You won " + main.person_obj.name + "! You are quite lucky today." + "\n") speaker.speech_output("Scissor crushes rock. You won " + main.person_obj.name + "! You are quite lucky today.") elif pmove == "rock" and cmove == "paper": q.put(main.asis_obj.name + ": " + "Paper beats rock. You lost " + main.asis_obj.name + ". Sorry, " + main.person_obj.name + ", I am luckier than you today!" + "\n") speaker.speech_output("Paper beats rock. You lost " + main.asis_obj.name + ". Sorry, " + main.person_obj.name + ", I am luckier than you today!") elif (pmove == "paper" or pmove == "peppa") and cmove == "rock": q.put(main.asis_obj.name + ": " + "Paper beats rock. You won " + main.person_obj.name + "! Looks like someone is luckier today!" + "\n") speaker.speech_output("Paper beats rock. You won " + main.person_obj.name + "! Looks like someone is luckier today!") elif (pmove == "paper" or pmove == "peppa") and cmove == "scissor": q.put(main.asis_obj.name + ": " + "Scissor cuts paper. You lost " + main.asis_obj.name + "! I hope i did not play you out. hehe." + "\n") speaker.speech_output("Scissor cuts paper. You lost " + main.asis_obj.name + "! I hope i did not play you out. hehe.") elif (pmove == "scissor" or pmove == "caesar") and cmove == "paper": q.put(main.asis_obj.name + ": " + "Scissor cuts paper. You're the winner " + main.person_obj.name + ". Aha! You're awesome." + "\n") speaker.speech_output( "Scissor cuts paper. You're the winner " + main.person_obj.name + ". Aha! You're awesome.") elif (pmove == "scissor" or pmove == "caesar") and cmove == "rock": q.put(main.asis_obj.name + ": " + "Scissor crushes rock. I'm the winner " + main.asis_obj.name + "! I love rock and roll! Haha" + "\n") speaker.speech_output("Scissor crushes rock. I'm the winner " + main.asis_obj.name + "! I love rock and roll! Haha") main.voice_data = recorder.records_audio( "Do you want to play again?", q) if main.person_says(["quit", "no", "nope", "nah"]): pass elif main.person_says(["play", "yes", "sure", "okay", "ok"]): core(q) else: q.put( main.asis_obj.name + ": " + "Sorry I did not understand. Would you like to try again?" + "\n") speaker.speech_output( "Sorry I did not understand. Would you like to try again?") error(q) else: pass
def listen(ask, q): voice_data = recorder.record_audio(ask, q) # get the voice input
def main_page(q): while run: voice_data = recorder.record_audio("", q) # get the voice input #logging() respond.response(voice_data, q, 1)
def record_button(self, display, template, speaker, session, number): rec.record_audio(template, speaker, session, number) plotter.show_waveplot(display, self.templateSelect[:-5], self.speakerSelect[:-5], self.session, self.counter, 0, 0)