def who_are_you(): messages = [ 'I am Darong, your lovely personal assistant.', 'Darong, dint I tell you before?', 'You ask that so many times! I am Darong.' ] tts(random.choice(messages))
def how_am_i(): replies = [ 'You are goddamn handsome!', 'My knees go weak when I see you', 'You look like the kindest person that I have met', 'How come you have such a stupid question' ] tts(random.choice(replies))
def tell_joke(): jokes = [ 'What happens to a frogs car when it breaks down? It gets toad away.', 'Why was six scared of seven? because seven ate nine.', 'No, I aways forget the punch line.' ] tts(random.choice(jokes))
def show_all_notes(): conn = sqlite3.connect('memory.db') tts('Your notes are as follows: ') cursor = conn.execute("SELECT notes FROM notes") # the first "note" is one column of the database for row in cursor: print(row[0]) tts(row[0]) conn.close()
def note_something(speech_text): conn = sqlite3.connect('memory.db') words_of_message = speech_text.split() words_of_message.remove('note') cleaned_message = ' '.join(words_of_message) conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", \ (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) conn.commit() conn.close() tts('Your note has been saved')
def driver(self): tts("Hi. I'm Scarlett. How can I help you today?") recognized_audio = self.listen_audio() if "scarlett" or "Scarlett" in recognized_audio: tts("Yes {}, how can I help you?".format(name)) recognized_audio = self.listen_audio() return recognized_audio
def main(): if os.path.isfile('profile.json'): profile = open('profile.json') profile_data = json.load(profile) profile.close() else: profile_populator() main() tts('Welcome ' + profile_data['name'] + ', systems are now ready to run. How can I help you?') stt(profile_data)
def connect_to_proxy(proxy_username, proxy_password): tts("Connecting to proxy server.") browser = webdriver.Firefox() browser.get('http://10.1.1.9:8090/httpclient.html') id_number = browser.find_element_by_name('username') password = browser.find_element_by_name('password') id_number.send_keys(proxy_username) password.send_keys(proxy_password) browser.find_element_by_name('btnSubmit').click()
def main(): r = sr.Recognizer() with sr.Microphone() as source: print("Say something!") audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print("Melissa thinks you said '" + speech_text + "'") except sr.UnknownValueError: print("Melissa could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) tts(speech_text)
def passiveListen(): THRESHOLD_MULTIPLIER = 1.8 RATE = 16000 CHUNK = 1024 THRESHOLD_TIME = 1 LISTEN_TIME = 300 stream = _audio.open(format=pyaudio.paInt16, channels=1, rate=RATE, input=True, frames_per_buffer=CHUNK) frames = [] lastN = [i for i in range(30)] for i in range(0, RATE / CHUNK * THRESHOLD_TIME): data = stream.read(CHUNK) frames.append(data) lastN.pop(0) lastN.append(getScore(data)) average = sum(lastN) / len(lastN) THRESHOLD = average * THRESHOLD_MULTIPLIER frames = [] didDetect = False for i in range(0, RATE / CHUNK * LISTEN_TIME): data = stream.read(CHUNK) frames.append(data) score = getScore(data) if score > THRESHOLD: didDetect = True stream.stop_stream() stream.close() time.sleep(1) tts('Yes?') main() if not didDetect: print "No disturbance detected" stream.stop_stream() stream.close()
def main(): # GET INPUT r = sr.Recognizer() with sr.Microphone() as source: print("Say something!") audio = r.listen(source) # PRODUCE OUTPUT try: speech_text = r.recognize_google(audio).lower().replace("'", "") tts('You said, ' + speech_text) except sr.UnknownValueError: print('Audio not understood') except sr.RequestError as e: print('Could not request results from GSR service; {0}'.format(e))
def stt(): tts("Hi! I'm Scarlett. How can I help you today?") r = sr.Recognizer() with sr.Microphone() as source: audio = r.listen(source) try: recognized_speech = r.recognize_google(audio) # tts(recognized_speech) return recognized_speech except sr.UnknownValueError: print("Couldn't recognize audio") except sr.RequestError as e: print( "Couldn't request results from Google speech service : {}".format( e))
def main(): #obtain audio from the microphone r = sr.Recognizer() with sr.Microphone() as source: print("Say something!") audio = r.listen(source) #recognize speech using WIT.AI WIT_AI_KEY = "YVRDNX5MC5L6DJRZYU44MOIUWS56JDRW" #WIT_AI_KEY = "3HJYXBPG6BZ3KFZG3KQHIOGAYD5OLWXK" try: speech_text = r.recognize_wit(audio, key=WIT_AI_KEY) print("Jarvis thinks you said '" + speech_text + "'") except sr.UnknownValueError: print("Jarvis could not understand audio") except sr.RequestError as e: print("Could not request results from Wit.ai service; {0}".format(e)) tts(speech_text)
def define_subject(speech_text): words_of_message = speech_text.split() words_of_message.remove('define') cleaned_message = ''.join(words_of_message) try: wiki_data = wikipedia.summary(cleaned_message, sentences=5) regEx = re.compile(r'([^\(]*)\([^\)]*\) *(.*)') m = regEx.match(wiki_data) while m: wiki_data = m.group(1) + m.group(2) m = regEx.match(wiki_data) wiki_data = wiki_data.replace("'", "") tts(wiki_data) print(wiki_data) except wikipedia.exceptions.DisambiguationError as e: tts('Can you please be more specific? You may choose something' + 'from the following.') print("Can you please be more specific? You may choose something" + "from the following; {0}".format(e))
def main(): profile = open('profile.yaml') profile_data = yaml.safe_load(profile) profile.close() r = sr.Recognizer() tts('Welcome ' + profile_data['name'] + ', systems are now ready to run. How can I help you?') if profile_data['stt'] == 'google': while True: with sr.Microphone() as source: r.adjust_for_ambient_noise(source) print("Say something!") audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print("Melissa thinks you said '" + speech_text + "'") except sr.UnknownValueError: print("Melissa could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) else: brain(profile_data, speech_text) elif profile_data['stt'] == 'sphinx': def sphinx_stt(): modeldir = profile_data['pocketsphinx']['modeldir'] hmm = profile_data['pocketsphinx']['hmm'] lm = profile_data['pocketsphinx']['lm'] dic = profile_data['pocketsphinx']['dic'] config = Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, hmm)) config.set_string('-lm', os.path.join(modeldir, lm)) config.set_string('-dict', os.path.join(modeldir, dic)) config.set_string('-logfn', '/dev/null') decoder = Decoder(config) stream = open('recording.wav', 'rb') in_speech_bf = False decoder.start_utt() while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) if decoder.get_in_speech() != in_speech_bf: in_speech_bf = decoder.get_in_speech() if not in_speech_bf: decoder.end_utt() speech_text = decoder.hyp().hypstr print speech_text decoder.start_utt() else: break decoder.end_utt() return speech_text.lower().replace("'", "") while True: with sr.Microphone() as source: r.adjust_for_ambient_noise(source) print("Say something!") audio = r.listen(source) with open("recording.wav", "wb") as f: f.write(audio.get_wav_data()) brain(profile_data, sphinx_stt())
import speech_recognition as sr from GreyMatter.SenseCells.tts import tts from brain import brain profile = open("profile.yaml.default") profile_data = yaml.safe_load(profile) profile.close() #Variables name = profile_data['name'] city_name = profile_data['city_name'] city_code = profile_data['city_code'] tts('Welcome ' + name + ', systems are ready to run') def main(): r = sr.Recognizer() with sr.Microphone(1) as source: print('Speak') audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print("Vicky thinks you said '" + speech_text + "'") except sr.UnknownValueError: print("Vicky could not understand you") except sr.RequestError as e: print("Could not request results from Speech recognition service; {0}". format(e))
def how_are_you(): messages = ["I am dying inside. Thanks for asking.", "I am okay as long as I serve you.", "I'm fine, thank you."] tts(random.choice(messages))
def who_am_i(name): tts("You are " + name + ", my brilliant creator. I worship you.")
def open_firefox(): tts("Openning Firefox") webdriver.Firefox()
elif speech_check(["how", "i", "look"]): general_conversation.how_am_i() elif speech_check(["what", "time", "now"]): time_teller.what_is_time() else: general_conversation.undefined() profile = open('profile.yaml') profile_data = yaml.safe_load(profile) profile.close() name = profile_data['name'] city_name = profile_data['city_name'] tts("Welcome " + name + ". The system is ready to serve you") def main(): r = sr.Recognizer() with sr.Microphone() as source: print("How can I help you?") audio = r.listen(source) try: speech_text = r.recognize_google(audio).lower().replace("'", "") print("Scarlett thinks you said,' " + speech_text + " '") brain(speech_text) except sr.UnknownValueError: print("Scarlett could not understand audio !!")
profile_data = yaml.safe_load(profile) profile.close() # Functioning Variables name = profile_data['name'] music_path = profile_data['music_path'] city_name = profile_data['city_name'] city_code = profile_data['city_code'] proxy_username = profile_data['proxy_username'] proxy_password = profile_data['proxy_password'] access_token = profile_data['twitter']['access_token'] access_token_secret = profile_data['twitter']['access_token_secret'] consumer_key = profile_data['twitter']['consumer_key'] consumer_secret = profile_data['twitter']['consumer_secret'] tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') # Thanks to Jasper for passive code snippet. _audio = pyaudio.PyAudio() def getScore(data): rms = audioop.rms(data, 2) score = rms / 3 return score def fetchThreshold(): THRESHOLD_MULTIPLIER = 1.8 RATE = 16000 CHUNK = 1024 THRESHOLD_TIME = 1
render = web.template.render('templates/') urls = ( '/', 'index', ) profile = open('profile.yaml') profile_data = yaml.safe_load(profile) profile.close() # Functioning Variables name = profile_data['name'] tts('Welcome ' + name + ', systems are now ready to run. How can I help you?') class index: def GET(self): return render.index() def POST(self): x = web.input(myfile={}) filedir = os.getcwd( ) + '/uploads' # change this to the directory you want to store the file in. if 'myfile' in x: # to check if the file-object is created filepath = x.myfile.filename.replace( '\\', '/') # replaces the windows-style slashes with linux ones. filename = filepath.split(
def undefine(): tts('I do not know what that means!')
def undefined(): tts("I dont know what that means!")
def how_are_you(): tts('I am fine, thank you.')
def where_born(): tts('I was created by a magician name Frank, in China')
def go_to_sleep(): tts("Goodbye! Have a great day!") quit()
def mirror(): import json API_key = "o.0uXY4V8zygueZLziiZcaXA18ldTldaSl" url = "https://api.pushbullet.com/v2/pushes?limit=1" headers = {'Access-Token': API_key} # pb = Pushbullet(API_key) ws = websocket.create_connection( "wss://stream.pushbullet.com/websocket/{}".format(API_key)) while True: result = ws.recv() # print(result) # print(type(result)) # result = ast.literal_eval(result) result = json.loads(result) # print(result) pprint(result) print(type(result)) if result["type"] == "tickle": r = requests.get(url, headers=headers) re = r.json() pprint(re) push = re['pushes'] text = push[0]['body'] name = push[0]['sender_name'] tts("You have a new notification from {}. {} sent {}".format( name, name, text)) elif result['type'] == 'push' and result['push'][ 'type'] != 'dismissal': push = result['push'] application = push['application_name'] text = push['body'] sender = push['title'] # Clear title no_msg = 0 name = '' for i in range(len(sender)): if sender[i] == '(' or sender[i] == ':': if sender[i] == ':': no_msg = 1 break else: i += 1 try: no_msg = eval(sender[i:i + 1]) except: no_msg = eval(sender[i]) break else: name += sender[i] if no_msg == 0: no_msg = 1 print(name, no_msg, text, application) if 'call' in sender: # Incoming call handling by = push['body'] msg = "You have an {} by {}".format(sender, by) else: # Incoming message handling. Tested for the following apps: # Whatsapp # Messenger msg = "You have {} new notification from {} by {}. {}".format( no_msg, application, name, text) print(msg) tts(msg) pass
def who_are_you(): messages = ["I am Melissa, your slave for eternity.", "Melissa, did I not I tell you before?", "You imbecile. I am Melissa."] tts(random.choice(messages))
def __speak_(self, msg): tts(msg) pass
def what_is_time(): tts("The time is " + datetime.strftime(datetime.now(), '%H:%M:%S'))
def who_am_i(name): tts('You are ' + name + ', a brilliant persion. I love you!')
# Functioning Variables name = profile_data["name"] music_path = profile_data["music_path"] images_path = profile_data["images_path"] city_name = profile_data["city_name"] city_code = profile_data["city_code"] proxy_username = profile_data["proxy_username"] proxy_password = profile_data["proxy_password"] access_token = profile_data["twitter"]["access_token"] access_token_secret = profile_data["twitter"]["access_token_secret"] consumer_key = profile_data["twitter"]["consumer_key"] consumer_secret = profile_data["twitter"]["consumer_secret"] client_id = profile_data["imgur"]["client_id"] client_secret = profile_data["imgur"]["client_secret"] tts("Welcome " + name + ", systems are now ready to run. How can I help you?") # Thanks to Jasper for passive code snippet. _audio = pyaudio.PyAudio() def getScore(data): rms = audioop.rms(data, 2) score = rms / 3 return score def fetchThreshold(): THRESHOLD_MULTIPLIER = 1.8 RATE = 16000