def stt(self): txt = dict() result = '' ret = False if self.real: result = wit.voice_query_auto( self.wit_token ) ans = self.getKey( json.loads(result) ) if ans['intent'] == 'attention': self.logger.info('[*] Listening') self.playSound('../sounds/misc/beep_hi.wav') result = wit.voice_query_auto( self.wit_token ) self.logger.info('[*] Done listening') self.playSound('../sounds/misc/beep_lo.wav') ret = True else: self.logger.info('[-] Error, no audio detected') ret = False else: print 'You:' result = self.stdin.readline() result = wit.text_query(result, self.wit_token ) ret = True txt = self.getKey( json.loads(result) ) if txt['intent'] == 'error': ret = False return txt, ret
def stt(self): txt = dict() result = '' ret = False if self.real: result = wit.voice_query_auto(self.wit_token) ans = self.getKey(json.loads(result)) if ans['intent'] == 'attention': self.logger.info('[*] Listening') self.playSound('../sounds/misc/beep_hi.wav') result = wit.voice_query_auto(self.wit_token) self.logger.info('[*] Done listening') self.playSound('../sounds/misc/beep_lo.wav') ret = True else: self.logger.info('[-] Error, no audio detected') ret = False else: print 'You:' result = self.stdin.readline() result = wit.text_query(result, self.wit_token) ret = True txt = self.getKey(json.loads(result)) if txt['intent'] == 'error': ret = False return txt, ret
def main(): audio = pyaudio.PyAudio() hue = phue.Bridge('172.17.172.101') token = harmony.auth.login('*****@*****.**', '1yD27amH1') session_token = harmony.auth.swap_auth_token('172.17.172.100', 5222, token) harmony_client = harmony.client.create_and_connect_client('172.17.172.100', 5222, session_token) config = harmony_client.get_config() activity_map = {act['label']: act['id'] for act in config['activity']} while True: #triggered = listen_for_trigger(audio, 'Eddie') triggered = True if triggered: say('What can I do for you?') wit.init() query = wit.voice_query_auto(WIT_TOKEN) query = json.loads(query) process_query(hue, query, harmony_client, activity_map) # Wrapping up wit.close()
def main(): audio = pyaudio.PyAudio() hue = phue.Bridge('172.17.172.101') token = harmony.auth.login('*****@*****.**', '1yD27amH1') session_token = harmony.auth.swap_auth_token('172.17.172.100', 5222, token) harmony_client = harmony.client.create_and_connect_client( '172.17.172.100', 5222, session_token) config = harmony_client.get_config() activity_map = {act['label']: act['id'] for act in config['activity']} while True: #triggered = listen_for_trigger(audio, 'Eddie') triggered = True if triggered: say('What can I do for you?') wit.init() query = wit.voice_query_auto(WIT_TOKEN) query = json.loads(query) process_query(hue, query, harmony_client, activity_map) # Wrapping up wit.close()
def application(self, message): """ Main application logic of the module. Args: message (dict): received data as a dictionary Returns: dict: data to send back as dictionary """ action = message['action'] if action == 'listen': wit.init() response = wit.voice_query_auto(self.access_token) print('Response: {}'.format(response)) print('msg_id') jresponse = json.loads(response) print(jresponse["_text"]) wit.close() data = {'response': jresponse["_text"], 'JSON': response} # return result return data else: return False
def index(self): wit.init() response = wit.voice_query_auto(access_token) cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' # cherrypy.response.headers['Content-Type'] = 'application/json' wit.close() return response
def application(self, message): """ Main application logic of the module. Args: message (dict): received data as a dictionary Returns: dict: data to send back as dictionary """ action = message['action'] if action == 'listen': wit.init() response = wit.voice_query_auto(self.access_token) print('Response: {}'.format(response)) print('msg_id') jresponse = json.loads(response) print(jresponse["_text"]) wit.close() data = {'request': jresponse["_text"], 'JSON': response} # return result return data else: return False
def listen_and_interpret(rosrequest): rospy.logdebug("About to record audio") response = json.loads(wit.voice_query_auto(APIKEY)) rospy.logdebug("Response: {0}".format(response)) if not response: return None return parse_response(response, ListenAndInterpretResponse)
def take_command(): tts.textToSpeech("How may I help you?") wit.init() commandJSON = json.loads('{}'.format(wit.voice_query_auto(tok))) parsed = jsonparser.parse(commandJSON) verify(parsed) wit.close()
def query(): access_token = ACCESS_TOKEN wit.init() response = json.loads( wit.voice_query_auto(access_token) ) print('Response: {}'.format(response)) wit.close() return response
def get_wit_response(): wit.init() response = wit.voice_query_auto(access_token) print('Raw: {}'.format(response)) wit.close() if not response: return None response = json.loads(response) response = get_outcome_response(response) return response
def talker(): pub = rospy.Publisher('chatter', String, queue_size=10) rospy.init_node('talker', anonymous=True) rate = rospy.Rate(1) # 10hz while not rospy.is_shutdown(): response = wit.voice_query_auto(access_token) abc=szukaj("_text\":\"(.+?)\"",response) rospy.loginfo(abc) pub.publish(abc) rate.sleep() rate.sleep()
def ecoute(passage): # Valeurs passage: 1 Mise en route, 2 Ecoute OK en attente, 3 Commande pas comprise, 4 pas de message response = None if passage == 1: parole("Je suis za ton ecoute") if passage == 2: parole("Je suis za l'ecoute pour une autre commande") if passage == 3: parole("Merci de reipaitai la commande, je n'ai pas compris") while response == None: response = wit.voice_query_auto(access_token) texte_json(response)
def speech_to_text(): # user is prompted to talk speech_response = wit.voice_query_auto(wit_access_token) # response question = urllib.quote_plus(speech_response['_text']) resp = subprocess.call(['curl', 'https://www.houndify.com/textSearch?query=' + question + '&clientId=e7SgQJ_wwXjv5cUx1nLqKQ%3D%3D&clientKey=Pi_smrHYQhCA_nLgukp4C4nnQE2WyQvk3l3Bhs8hcbchrLAmjl5LWS3ewq1U8LMser8j890OfhklwNm77baPTw%3D%3D', '-H', 'Accept-Encoding: gzip, deflate, sdch', '-H', 'Accept-Language: en-US,en;q=0.8', '-H', 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36', '-H', 'Accept: */*', '-H', 'Referer: https://www.houndify.com/try/986dcfd1-0b91-4346-a5a0-6d53f0d18da2', '-H', 'Cookie: houndify-sess=s%3Ar-94jGq48cQMay2q1fgRwSolHIV4ZQpk.Y3Wns0NNtM5LCgWUcaAc8MUdH3Z0elclREmfzZ%2BJzLY; _gat=1; _ga=GA1.2.1948120585.1453572520', '-H', 'Connection: keep-alive', '-H', 'Hound-Request-Info: {"ClientID":"e7SgQJ_wwXjv5cUx1nLqKQ==","UserID":"houndify_try_api_user","PartialTranscriptsDesired":true,"SDK":"web","SDKVersion":"0.1.6"}', '--compressed']) answer = json.parse(resp) talk_answer = answer["AllResults"][0]['SpokenResponseLong']; # do something with answer # speak the answer espeak.synth(talk_answer) IS_TALKING = False
def mainQuery(): # initialize WIT.AI wit.init() try: response = wit.voice_query_auto(WIT_AI_KEY) chooseIntent(response) except TypeError: main() wit.close() return wit.close()
def listen2(intents): while True: response = wit.voice_query_auto(wit_access_token) response = json.loads(response) print response if len(response['outcomes']) > 0 and response['outcomes'][0]['intent'] == 'start_listen': break shutdown = False while not shutdown: print '\nActive Mode\n' response = wit.voice_query_auto(wit_access_token) response = json.loads(response) print response, 'respsssssssss' print('Response: {}'.format(response)) for outcome in response['outcomes']: if outcome['intent'] in intents: if outcome['intent'] == 'youtube': print 'sending request' intents[outcome['intent']](outcome['entities']['search_query'][0]['value']) elif intents[outcome['intent']]() == True: return listen2(intents) break
def ecoute( passage ): # Valeurs passage: 1 Mise en route, 2 Ecoute OK en attente, 3 Commande pas comprise, 4 pas de message response = None if passage == 1: parole("Je suis pret") if passage == 2: parole("Je suis prèt pour une autre commande") if passage == 3: parole("Merci de répéter la commande, je n\'ai pas compris") while response == None: response = wit.voice_query_auto(access_token) texte_json(response)
def main(interval=2): i=0 total_fucks=0 while i<interval: access_token = '5OOPLQECDO32JWXIAN5TAPE7JZ7J4UHX' wit.init() response = wit.voice_query_auto(access_token) parse_for_fucks = json.loads(response) total_fucks = total_fucks +count_fucks(parse_for_fucks) wit.close() i = i + 1 return total_fucks
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False): """ Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None """ RATE = 16000 CHUNK = 1024 LISTEN_TIME = 12 # check if no threshold provided if THRESHOLD is None: THRESHOLD = self.fetchThreshold() self._logger.warning("playing hi") self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) # prepare recording stream #stream = self._audio.open(format=pyaudio.paInt16, # channels=1, # rate=RATE, # input=True, # frames_per_buffer=CHUNK) # # frames = [] # increasing the range # results in longer pause after command # generation #lastN = [THRESHOLD * 1.2 for i in range(30)] #for i in range(0, RATE / CHUNK * LISTEN_TIME): # data = stream.read(CHUNK) # frames.append(data) # score = self.getScore(data) # # lastN.pop(0) # lastN.append(score) # # average = sum(lastN) / float(len(lastN)) # TODO: 0.8 should not be a MAGIC NUMBER! # if average < THRESHOLD * 0.8: # break wit.init() response = wit.voice_query_auto(self.token) wit.close() self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) self._logger.warning("playing lo") # save the audio data #stream.stop_stream() #stream.close() #with tempfile.SpooledTemporaryFile(mode='w+b') as f: # wav_fp = wave.open(f, 'wb') # wav_fp.setnchannels(1) # wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) # wav_fp.setframerate(RATE) # wav_fp.writeframes(''.join(frames)) # wav_fp.close() # f.seek(0) #return json.dumps(self.active_stt_engine.transcribe(f)) return response
import wit if __name__ == "__main__": wit.init() response = wit.voice_query_auto("ACCESS_TOKEN") print("Response: {}".format(response)) wit.close()
def test_wit(): wit.init() # response = wit.text_query('play music', wit_access_token) response = wit.voice_query_auto(wit_access_token) print('Response: {}'.format(response)) wit.close()
#!/usr/bin/env python import wit #import wit2 as wit import json #access_token = 'NUFTF6VJH6EC3BN5S2EK2STCPYGKPPNR' #Jakub Konrad Database access_token = '3LH5Q3E3KEIYYVINQKBLOL2QGAIQ2IRG' #Jakub Drapela Database wit.init() response = wit.voice_query_auto(access_token) print('Response: {}'.format(response)) print('msg_id') r = json.loads(response) print(r["_text"]) wit.close()
try: text = r.recognize_google(audio) except sr.UnknownValueError: print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e)) return text def handle_response(self, response): try: response = json.loads(response) if response['_text'] == 'stop': self.bot.stop_talking() # continuously call the listen function self.listen() except Exception as e: print e if __name__ == '__main__': wit.init() # Let's hack! resp = wit.text_query('Turn on the lights', TOKEN) response = wit.voice_query_auto(TOKEN) print('Response: {}'.format(resp)) print('Response: {}'.format(response)) # Wrapping up wit.close()
if __name__ == "__main__": wit.init() pub = rospy.Publisher('chatter', Num, queue_size=10) rospy.init_node('talker', anonymous=True) rate = rospy.Rate(10) # 10hz while not rospy.is_shutdown(): #While(True) try: dataDict = wit.voice_query_auto(key) except: print "Sth wrong with end of speech detection" if not dataDict: continue; try: intent= dataDict["outcomes"][0]["intent"] print ("Try intent") try: number=dataDict["outcomes"][0]["entities"]['number'][0]['value'] print ("Try number") if(number): if not(number.isdigit()):
print("Google Speech Recognition could not understand audio") except sr.RequestError as e: print( "Could not request results from Google Speech Recognition service; {0}" .format(e)) return text def handle_response(self, response): try: response = json.loads(response) if response['_text'] == 'stop': self.bot.stop_talking() # continuously call the listen function self.listen() except Exception as e: print e if __name__ == '__main__': wit.init() # Let's hack! resp = wit.text_query('Turn on the lights', TOKEN) response = wit.voice_query_auto(TOKEN) print('Response: {}'.format(resp)) print('Response: {}'.format(response)) # Wrapping up wit.close()