def application(self, message): """ Main application logic of the module. Args: message (dict): received data as a dictionary Returns: dict: data to send back as dictionary """ action = message['action'] if action == 'listen': wit.init() response = wit.voice_query_auto(self.access_token) print('Response: {}'.format(response)) print('msg_id') jresponse = json.loads(response) print(jresponse["_text"]) wit.close() data = {'request': jresponse["_text"], 'JSON': response} # return result return data else: return False
def application(self, message): """ Main application logic of the module. Args: message (dict): received data as a dictionary Returns: dict: data to send back as dictionary """ action = message['action'] if action == 'listen': wit.init() response = wit.voice_query_auto(self.access_token) print('Response: {}'.format(response)) print('msg_id') jresponse = json.loads(response) print(jresponse["_text"]) wit.close() data = {'response': jresponse["_text"], 'JSON': response} # return result return data else: return False
def get_response(): wit.init() wit.voice_query_start('I5FO3XJWG7M5NYLBTDIN44VUQ6YEOBGI') # start recording time.sleep(2) # let's speak for 2 seconds a = json.loads(wit.voice_query_stop()) wit.close() print(a)
def main(): audio = pyaudio.PyAudio() hue = phue.Bridge('172.17.172.101') token = harmony.auth.login('*****@*****.**', '1yD27amH1') session_token = harmony.auth.swap_auth_token('172.17.172.100', 5222, token) harmony_client = harmony.client.create_and_connect_client( '172.17.172.100', 5222, session_token) config = harmony_client.get_config() activity_map = {act['label']: act['id'] for act in config['activity']} while True: #triggered = listen_for_trigger(audio, 'Eddie') triggered = True if triggered: say('What can I do for you?') wit.init() query = wit.voice_query_auto(WIT_TOKEN) query = json.loads(query) process_query(hue, query, harmony_client, activity_map) # Wrapping up wit.close()
def index(self): wit.init() response = wit.voice_query_auto(access_token) cherrypy.response.headers['Access-Control-Allow-Origin'] = '*' # cherrypy.response.headers['Content-Type'] = 'application/json' wit.close() return response
def main(): audio = pyaudio.PyAudio() hue = phue.Bridge('172.17.172.101') token = harmony.auth.login('*****@*****.**', '1yD27amH1') session_token = harmony.auth.swap_auth_token('172.17.172.100', 5222, token) harmony_client = harmony.client.create_and_connect_client('172.17.172.100', 5222, session_token) config = harmony_client.get_config() activity_map = {act['label']: act['id'] for act in config['activity']} while True: #triggered = listen_for_trigger(audio, 'Eddie') triggered = True if triggered: say('What can I do for you?') wit.init() query = wit.voice_query_auto(WIT_TOKEN) query = json.loads(query) process_query(hue, query, harmony_client, activity_map) # Wrapping up wit.close()
def search_text(_text): wit.init() result = wit.text_query(_text, 'VAVQDA6WFDRZBY7W62HER5QTDTEHOOR2') j = json.loads(result) #print j #print json.dumps(j, sort_keys=True, indent = 4 , separators = (',',': ')) wit.close() return j['outcomes'][0]['entities']
def take_command(): tts.textToSpeech("How may I help you?") wit.init() commandJSON = json.loads('{}'.format(wit.voice_query_auto(tok))) parsed = jsonparser.parse(commandJSON) verify(parsed) wit.close()
def ask_wit(message): access_token = 'PWKWIJSS4XX3EJDMC6AHHI6N7VQ5JJXW' wit.init() # response = wit.voice_query_auto(access_token) response = wit.text_query(message, access_token) wit.close() return 'Response: {}'.format(response)
def __init__(self, wit_token, real, stdin): self.real = real self.stdin = stdin logging.basicConfig(level=logging.INFO) self.logger = logging.getLogger('Jarvis') self.wit_token = wit_token wit.init()
def __init__(self,wit_token,real,stdin): self.real = real self.stdin = stdin logging.basicConfig(level=logging.INFO) self.logger = logging.getLogger('Jarvis') self.wit_token = wit_token wit.init()
def query(): access_token = ACCESS_TOKEN wit.init() response = json.loads( wit.voice_query_auto(access_token) ) print('Response: {}'.format(response)) wit.close() return response
def get_wit_response(): wit.init() response = wit.voice_query_auto(access_token) print('Raw: {}'.format(response)) wit.close() if not response: return None response = json.loads(response) response = get_outcome_response(response) return response
def getRequest(_text): wit.init() result = wit.text_query(_text.lower(), 'VAVQDA6WFDRZBY7W62HER5QTDTEHOOR2') j = json.loads(result) data = j['outcomes'][0]['entities']['how_to_text'][0] wit.close() #check if exist the value if data.has_key('value'): return data['value'] else: return None
def setup(): ADC.setup(0x48) LCD.init(0x27, 1) LCD.write(0,0,'System startup...') time.sleep(1) LCD.clear() GPIO.setup(THERMISTOR_PIN, GPIO.IN) GPIO.setup(GAS_SENSOR_PIN, GPIO.IN) GPIO.setup(BUZZ_PIN, GPIO.OUT) GPIO.setup(H2O_PIN, GPIO.OUT) GPIO.output(BUZZ_PIN, GPIO.HIGH) wit.init()
def main(): # initialize WIT.AI wit.init() wit.voice_query_start(WIT_AI_KEY) time.sleep(3) response = wit.voice_query_stop() wit.close() # case for the keyword voice command checkStartup(getIntent(response))
def mainQuery(): # initialize WIT.AI wit.init() try: response = wit.voice_query_auto(WIT_AI_KEY) chooseIntent(response) except TypeError: main() wit.close() return wit.close()
def text_command(text): import wit wit.init() # Let's hack! commandJSON = json.loads('{}'.format(wit.text_query(text, tok))) wit.close() parsed = jsonparser.parseAlt(text,commandJSON) #if confidence is low if parsed[1] < .5: take_command() else: verify(parsed)
def run(self, text, application=None): if not application and self._default_application: app = self._default_application elif application: app = application else: raise Exception('Unknown application: %s' % application) access_token = self._applications.get(app, None) if not access_token: raise Exception('Missing API key for application %s' % application) wit.init() response = wit.text_query(text, access_token) wit.close() return json.loads(response)
def main(interval=2): i=0 total_fucks=0 while i<interval: access_token = '5OOPLQECDO32JWXIAN5TAPE7JZ7J4UHX' wit.init() response = wit.voice_query_auto(access_token) parse_for_fucks = json.loads(response) total_fucks = total_fucks +count_fucks(parse_for_fucks) wit.close() i = i + 1 return total_fucks
def handle_query(text): """Send a text-based query to Wit.""" wit.init() message = DEFAULT_MESSAGE response = wit.text_query(text, private.WIT_API_TOKEN) result = json.loads(response) intent = result['outcomes'][0]['intent'] if result['outcomes'] and 'entities' in result['outcomes'][0]: entity = result['outcomes'][0]['entities'] if 'get_weather' in intent: message = get_weather(entity) return message
def setup_wit(): wit.init()
def wit_function(access_token, string): wit.init() response = json.loads(wit.text_query(string, access_token)) wit.close() return response['outcomes'][0]
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True, MUSIC=False): """ Records until a second of silence or times out after 12 seconds Returns a list of the matching options or None """ RATE = 16000 CHUNK = 1024 LISTEN_TIME = 12 # check if no threshold provided if THRESHOLD is None: THRESHOLD = self.fetchThreshold() self._logger.warning("playing hi") self.speaker.play(jasperpath.data('audio', 'beep_hi.wav')) # prepare recording stream #stream = self._audio.open(format=pyaudio.paInt16, # channels=1, # rate=RATE, # input=True, # frames_per_buffer=CHUNK) # # frames = [] # increasing the range # results in longer pause after command # generation #lastN = [THRESHOLD * 1.2 for i in range(30)] #for i in range(0, RATE / CHUNK * LISTEN_TIME): # data = stream.read(CHUNK) # frames.append(data) # score = self.getScore(data) # # lastN.pop(0) # lastN.append(score) # # average = sum(lastN) / float(len(lastN)) # TODO: 0.8 should not be a MAGIC NUMBER! # if average < THRESHOLD * 0.8: # break wit.init() response = wit.voice_query_auto(self.token) wit.close() self.speaker.play(jasperpath.data('audio', 'beep_lo.wav')) self._logger.warning("playing lo") # save the audio data #stream.stop_stream() #stream.close() #with tempfile.SpooledTemporaryFile(mode='w+b') as f: # wav_fp = wave.open(f, 'wb') # wav_fp.setnchannels(1) # wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16)) # wav_fp.setframerate(RATE) # wav_fp.writeframes(''.join(frames)) # wav_fp.close() # f.seek(0) #return json.dumps(self.active_stt_engine.transcribe(f)) return response
import wit if __name__ == "__main__": wit.init() response = wit.text_query("turn on the lights in the kitchen", "ACCESS_TOKEN") print("Response: {}".format(response)) wit.close()
def test_wit(): wit.init() # response = wit.text_query('play music', wit_access_token) response = wit.voice_query_auto(wit_access_token) print('Response: {}'.format(response)) wit.close()
def __init__(self): wit.init()
response = wit.voice_query_stop() #print("Response: {}".format(response)) return response def pokaKolory(): for i in range(0,len(dostepneKolory)): kolor = "ZmienKolor " + dostepneKolory[i] coRob(kolor) def coRob(abc): rospy.loginfo(abc) pub.publish(abc) rate.sleep() if __name__ == "__main__": wit.init() pub = rospy.Publisher('chatter', String, queue_size=1) rospy.init_node('talker', anonymous=True) rate = rospy.Rate(1) # 10hz powtorzKomendy = "powtorzKomendy 8 3" PrzywolajZolwia = "PrzywolajZolwia" ZmianaKontroli = "Zmiana_kontroli 2" zmienKolor = "ZmienKolor czerwony" zmienKolor2 = "ZmienKolor niebieski" zmienKolor3 = "ZmienKolor zielony" zmienKolor4 = "ZmienKolor biały" Przod = "Przod" Tyl = "Tyl" Prawo = "Prawo" Lewo = "Lewo"
sendurl('LED1=ON', "J'ai allumé la petite lampe de la chambre") if val_On_Off == 'off': sendurl('LED1=OFF', "J'ai éteint la petite lampe de la chambre") if intent == 'lustre_chambre': if val_On_Off == 'on': sendurl('LEDA=ON', "J'ai allumé le lustre de la chambre") if val_On_Off == 'off': sendurl('LEDA=OFF', "J'ai éteint le lustre de la chambre") ecoute(2) # Fonction envoie de l'URL def sendurl(arg, texte): # arg argument passé à la requete URL, Texte = Texte parole suivant commande url = 'http://192.168.0.34/?' + arg request = urllib.urlopen(url) parole(texte) # Connexion du signal à notre fonction signal.signal(signal.SIGINT, fermer_programme) espeak.set_voice('fr', '', 1, 0, 0) # Initialisation language parole("Je me prépare") wit.init() #Lancement de wit # Mise en route et lancement de l'écoute ecoute(1)
"J'ai éteint la petite lampe de la chambre") if intent == 'lustre_chambre': if val_On_Off == 'on': sendurl('LEDA=ON', "J'ai allumé le lustre de la chambre") if val_On_Off == 'off': sendurl('LEDA=OFF', "J'ai éteint le lustre de la chambre") ecoute(2) # Fonction envoie de l'URL def sendurl( arg, texte ): # arg argument passé à la requete URL, Texte = Texte parole suivant commande url = 'http://192.168.0.34/?' + arg request = urllib.urlopen(url) parole(texte) # Connexion du signal à notre fonction signal.signal(signal.SIGINT, fermer_programme) espeak.set_voice('fr', '', 1, 0, 0) # Initialisation language parole("Je me prépare") wit.init() #Lancement de wit # Mise en route et lancement de l'écoute ecoute(1)