def callbacks(): global detector snowboydecoder.play_audio_file() detector.terminate() RecordFun(record_file) res = AsrFun(record_file) wake_up()
def detected_callback(timeout=0): snowboydecoder.play_audio_file() detector.terminate() listen() wait_for_hotword()
def rec(out_file, rec_time): # out_file:输出音频文件名,rec_time:音频录制时间(秒) CHUNK = 1024 FORMAT = pyaudio.paInt16 #16bit编码格式 CHANNELS = 1 #单声道 RATE = 16000 #16000采样频率 p = pyaudio.PyAudio() #pyaudio包 # 创建音频流 stream = p.open( format=FORMAT, # 音频流wav格式 channels=CHANNELS, # 单声道 rate=RATE, # 采样率16000 input=True, frames_per_buffer=CHUNK) print("666") snowboydecoder.play_audio_file() frames = [] # 录制的音频流 for i in range(0, int(RATE / CHUNK * rec_time)): data = stream.read(CHUNK, exception_on_overflow=False) frames.append(data) stream.stop_stream() stream.close() p.terminate() #用于出现异常时的终止行为 # 保存音频文件 wf = wave.open(out_file, 'wb') wf.setnchannels(CHANNELS) wf.setsampwidth(p.get_sample_size(FORMAT)) wf.setframerate(RATE) wf.writeframes(b''.join(frames)) wf.close()
def play_ding(): if not cek_talking(): print "Play ding" snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) respn = get_speech() spik = find_cmd(a.respond(respn)) talk(spik)
def main(FILE_PATH): r = requests.post(URL, headers=buildHeader(), data=readFile(FILE_PATH)) dic_json = r.json() #print(r.content) for i in dic_json['data']: if(i['sub'] == "nlp" and i['intent'] != {}): try: text = i['intent']['answer']['text'] except: text = "不明白你再说什么,你还是说中文吧" print('我 :'+i['intent']['text']) print('机器人 :'+text) s = socket.socket() # creat socket s.connect((host, port)) # connect serve s.send(('我 :'+i['intent']['text']+'<br/>机器人 :'+text).encode('UTF-8')) # recieve data s.close() # Close the connection r = requests.post(TTS_URL,headers=getHeader(),data={'text':text}) contentType = r.headers['Content-Type'] if contentType == "audio/mpeg": sid = r.headers['sid'] if AUE == "raw": writeFile("tts.wav", r.content) else : writeFile("tts.mp3", r.content) #print "success, sid = " + sid else : print(r.text) snowboydecoder.play_audio_file("tts.wav") os.system("rm tts.wav") break else: print('没有检测到人声')
def listener(): global detector global client detector.terminate() #snowboydecoder.play_audio_file() print('Started Recording') snowboydecoder.play_audio_file() record_to_file('jarvis_detect.wav') snowboydecoder.play_audio_file() snowboydecoder.play_audio_file('jarvis_detect.wav') print('Detected! Sending...') f = open('jarvis_detect.wav', 'r') response = requests.post(url='https://api.wit.ai/speech?v=20160526', data=f, headers={'Authorization': 'Bearer Z4EWWHOU5UHRAL22EZ4CFO3RYPV7RSYJ', 'Content-Type': 'audio/wav'}) j = response.json() print('Response: '+str(j)) if ('intent' in j['entities'] and j['entities']['intent']): intent = j['entities']['intent'][0]['value'] if (intent == 'play_music'): print('playing music........') response = requests.post(url='http://mopidy.musky.duckdns.org/mopidy/rpc', data='{"jsonrpc": "2.0", "id": 1, "method": "core.playback.play"}') print('Respo:' + str(response)) elif (intent == 'pause_music'): print('pausing music........') response = requests.post(url='http://mopidy.musky.duckdns.org/mopidy/rpc', data='{"jsonrpc": "2.0", "id": 1, "method": "core.playback.pause"}') print('Respo:' + str(response)) else: snowboydecoder.play_audio_file("resources/dong.wav") else: snowboydecoder.play_audio_file("resources/dong.wav") decoder_loop()
def openKitchen(): print("Kitchen") snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) os.system("lcd 4 5 Kitchen") pwm.set_pwm(0, 0, 300) time.sleep(2) pwm.set_pwm(0, 0, servo_max)
def openOther(): print("Other") snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) os.system("lcd 4 5 Other") pwm.set_pwm(1, 0, 302) time.sleep(2) pwm.set_pwm(1, 0, servo_max)
def callbacks(): #自定义返回命令 global detector print("Wake UP!....") snowboydecoder.play_audio_file() # ding detector.terminate() # close Speech(access_token) wake_up()
def test() : print("hi") snowboydecoder.play_audio_file() detector.terminate() file_name = make_sound_file.main() return_text = stt.main(file_name) print(return_text) if return_text.find("안녕") != -1 : file_name = tts.main("안녕하세요") os.system("mpg123 {}".format(file_name)) elif return_text.find("날씨") >= 0 : temp, humi = weather_api.main() weather_text = "온도는 {}도, 습도는 {}% 입니다.".format(temp, humi) file_name = tts.main(weather_text) os.system("mpg123 {}".format(file_name)) elif return_text.find("조명") != -1 : if return_text.find("밝게") != -1 : ser.write(led_on_str.encode()) elif return_text.find("어둡게") != -1 : ser.write(led_off_str.encode()) detector.start(detected_callback=test, interrupt_check=interrupt_callback, sleep_time=0.03)
def callbacks(): global detector snowboydecoder.play_audio_file() detector.terminate() hebing.main() wake_up()
def startNLP(model): #model = sys.argv[1] def detectedCallback(): detector.terminate() # So google Assistant can use audio device snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) assistant.startAssist() snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) detector.start(detected_callback=detectedCallback, interrupt_check=interrupt_callback, sleep_time=0.03) i2c = I2C(SLAVE_ADDR) assistant = GoogleAssistant(i2c) # capture SIGINT signal, e.g., Ctrl+C signal.signal(signal.SIGINT, signal_handler) # The obj contains the hotword detection snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5) print('Listening... Press Ctrl+C to exit') # main loop detector.start( detected_callback=detectedCallback, #snowboydecoder.play_audio_file, interrupt_check=interrupt_callback, sleep_time=0.03) detector.terminate()
def vad_from_queue(self): # time.sleep(1) vad = webrtcvad.Vad(1) print('waiting for order') snowboydecoder.play_audio_file() # time.sleep(0.5) # while not self.q.empty(): self.q.get() segments = vad_collector(16000, 30, 300, vad, self.q, False) # segments = list(segments) if segments: # self.wavefile.writeframes(segments) # with wave.open('vad.wav','w') as wf: # wf.setnchannels(1) # wf.setsampwidth(2) # wf.setframerate(16000) # wf.writeframes(segments) res = self.baidu_client.asr(segments, 'wav', 16000, { 'dev_pid': 1936, }) if res and res['err_no'] == 0: asr = ''.join(res['result'][0].split(',')) if len(asr) > 0: print('') # print(asr) self.post(asr) print('waiting for hotword') return segments
def listen_for_command(): detector.terminate() r = sr.Recognizer() with sr.Microphone() as source: print('Listening...') snowboydecoder.play_audio_file() audio = r.record(source, duration=4) snowboydecoder.play_audio_file() print('Done Listening...') try: command = r.recognize_google(audio).lower() print('You said: ' + command + '\n') if 'camera' in command: talkToMe('turning cameras on') if 'play' in command: os.system('mpg123 inmyfeelings.mp3') except sr.UnknownValueError: print('Your last command couldn\'t be heard') detector.open_stream() detector.start(detected_callback=listen_for_command, interrupt_check=interrupt_callback, sleep_time=0.03)
def conversation(): #while True: snowboydecoder.play_audio_file() count=0 while True: user_said=None while (user_said==None): detector.terminate() print('now say...') user_said, talking=VoiceUsingChrome.chrome_detect() if(user_said==None and count>0 and not talking): count=count+1 print('Terminate listening: ',wait_limit-count) if (count==wait_limit): break if(user_said==None and count<=0 and not talking): count=count-1 print('Terminate listening: ',wait_limit+count) if (count==-wait_limit*2): break if (count==wait_limit): break if (count==-wait_limit*2): break else: count=1 print('\n\n') print(user_said) try: a = datetime.datetime.now() dialogflow_response=dialog(user_said) b = datetime.datetime.now() print("dialogflow time: ",b-a) save_and_load_speech(dialogflow_response) mixer.music.play() if not response_json["result"]['actionIncomplete']: local_response=take_action(response_json['result']['metadata']['intentName']) while(mixer.music.get_busy()): time.sleep(.1) mixer.music.load("test2.mp3") if local_response!='': save_and_load_speech(local_response) mixer.music.play() while(mixer.music.get_busy()): time.sleep(.1) mixer.music.load("test2.mp3") while(mixer.music.get_busy()): time.sleep(.1) mixer.music.load("test2.mp3") print("end") except KeyboardInterrupt: raise except: pass
def detectedCallback(): detector.terminate() # So google Assistant can use audio device snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) assistant.startAssist() snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) detector.start(detected_callback=detectedCallback, interrupt_check=interrupt_callback, sleep_time=0.03)
def detect_callback(): detector.terminate() snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) assistant.assist() snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) detector.start(detected_callback=detect_callback, interrupt_check=interrupt_callback, sleep_time=0.03)
def onhotword(): snowboydecoder.play_audio_file( "/home/homeautomation/homeassistant/media/computerbeep_5.wav") os.system( "AUDIODEV=hw:1,0 rec /tmp/recording.wav rate 16k trim 0 5 silence -l 1 0.1 3% 1 1.2 3%" ) snowboydecoder.play_audio_file( "/home/homeautomation/homeassistant/media/computerbeep_65.wav")
def audioRecorderCallback(fname): snowboydecoder.play_audio_file() print "converting audio to text" r = sr.Recognizer() r.pause_threshold = 0.8 r.phrase_threshold = 0.3 r.non_speaking_duration = 0.5 with sr.AudioFile(fname) as source: audio = r.record(source) # read the entire audio file try: query = r.recognize_google(audio, language="zh-TW") #print(r.recognize_google(audio, language="zh-TW")) print(query) lang = 'zh-tw' session_id = str( uuid.uuid1() ) timezone = 'Asia/Taipei' authorization = '<FIXME>' headers = { "accept": "application/json", "authorization": authorization } url = 'https://api.dialogflow.com/v1/query?v=20180712' params = {'query':str(query), 'lang':lang, 'sessionId': session_id, 'timezone': timezone} response = requests.request("GET", url, headers=headers, params=params) data = json.loads(response.text) #print(data) status = data['status']['code'] print("Status: {}".format(status)) if status == 200: resolveQuery = data['result']['resolvedQuery'] fulfillment = data['result']['fulfillment']['speech'] print("Query: {}".format(resolveQuery)) print("Response: {}".format(fulfillment)) if fulfillment == 'turn_on_light_ok': GPIO.output(32, GPIO.HIGH) # elif fulfillment == 'turn_off_light_ok': else: GPIO.output(32, GPIO.LOW) except sr.UnknownValueError: print "Google Speech Recognition could not understand audio" except sr.RequestError as e: print "Could not request results from Google Speech Recognition service; {0}".format(e) os.remove(fname)
def detected(): detector.terminate() GPIO.output(22, GPIO.HIGH) time.sleep(.05) GPIO.output(22, GPIO.LOW) snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) gassist.assist() detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03)
def start(): global isWork if not isWork: isWork = True print('called') m = myThread(1, 'Hello', 1) m.setDaemon(True) threads.append(m) snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) m.start()
def detected_callback(): snowboydecoder.play_audio_file() try: capture() except: print "Error when capturing images" try: send() except: print "Error when sending emails" print("Continue listening... Press Ctrl+C to exit")
def callbacks(): global detector # 关闭snowboy功能 detector.terminate() # 语音识别 snowboydecoder.play_audio_file() # 打开snowboy功能 wake_up() # wake_up —> monitor —> wake_up 递归调用
def main_handler(): youtube.change_player_volume(5) snowboydecoder.play_audio_file() try: query = assistant.recognize_from_mic() youtube.change_player_volume(youtube.current_player_volume) print('[*] Sending request...') qh.handle_query(query) except: pass print('Listening... Press Ctrl+C to exit')
def play_ding(): global sleep_state, yesno_state if not yesno_state: if not sleep_state: if not cek_talking(): print "Play ding" snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) respn = get_speech() print "respn:", respn spik = find_cmd(a.respond(respn)) print "spik:", spik talk(spik)
def wakeupCallbacks(self): print(">> 语音唤醒") # 语音唤醒后,提示ding两声 snowboydecoder.play_audio_file() snowboydecoder.play_audio_file() # 关闭snowboy功能 self.detector.terminate() # 开启语音识别 ## 这里放唤醒后要执行的函数 self.mainSpeechFunc() # 打开snowboy功能 self.waitUntilAwakened() # wake_up —> monitor —> wake_up 递归调用
def detect_callback(): detector.terminate() snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) gen_canvas[4]["text"] = "LISTENING ...." assist.assist(gen_canvas) gen_canvas[4]["text"] = "SAY ROGHEC" snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) print(stop_id()) if not stop_id(): detector.start(detected_callback=detect_callback, interrupt_check=interrupt_callback, sleep_time=0.03)
def callbacks(): global detector snowboydecoder.play_audio_file() snowboydecoder.play_audio_file() os.system('mplayer %s' % 'music/wake_up.mp3') #os.system('mplayer %s' % 'music/voiceControlON.mp3') detector.terminate() #time.sleep(2) monitor.monitor() # 语音识别与语音控制 snowBoy() # solve audio IOerror snowBoy->monitor->snowBoy
def passivewake(): snowboydecoder.play_audio_file() #被动唤醒 #创建有名管道,写管道进程 #if os.path.exists(MYPIPE): # os.remove(MYPIPE) #os.mkfifo(MYPIPE) wfd = os.open(STATUSTXT, os.O_NONBLOCK | os.O_RDWR) if wfd < 0: print "open error\n" if os.write(wfd, "passive") == -1: print "write error" os.close(wfd) os.kill(os.getpid(), signal.SIGINT)
def state_menu(): print('MENU') # Reduce volume player.audio_set_volume(vol_low) # Play menu tone snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) # Start detection for commands command_detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03) # Stop detection detector.terminate()
def state_sleep(): print('SLEEP') # Restore volume to normal player.audio_set_volume(vol) # Play sleep tone snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG) # Start detection for hotword detector.start(detected_callback=state_menu, interrupt_check=interrupt_callback, sleep_time=0.03) # Stop detection detector.terminate()
def hotword_detected_callback(): print("!Hotword Detected") snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
interrupted = True def interrupt_callback(): global interrupted return interrupted if len(sys.argv) != 3: print("Error: need to specify 2 model names") print("Usage: python demo.py 1st.model 2nd.model") sys.exit(-1) models = sys.argv[1:] # capture SIGINT signal, e.g., Ctrl+C signal.signal(signal.SIGINT, signal_handler) sensitivity = [0.5]*len(models) detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) callbacks = [lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING), lambda: snowboydecoder.play_audio_file(snowboydecoder.DETECT_DONG)] print('Listening... Press Ctrl+C to exit') # main loop # make sure you have the same numbers of callbacks and models detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03) detector.terminate()