def main(): parser = argparse.ArgumentParser() parser.add_argument("--filename", "-f", default="~/recording.wav") args = parser.parse_args() with Board() as board: print("Press button to start recording.") board.led.state = Led.BEACON board.button.wait_for_press() done = threading.Event() board.led.state = Led.BLINK board.button.when_pressed = done.set def wait(): start = time.monotonic() while not done.is_set(): duration = time.monotonic() - start print("Recording: %.02f seconds [Press button to stop]" % duration) time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype="wav") print("Press button to play recorded sound.") board.led.state = Led.BEACON board.button.wait_for_press() print("Playing...") board.led.state = Led.ON play_wav(args.filename) print("Done.")
def main(): parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='enregistrement.wav') args = parser.parse_args() with Board() as board: print('Press button to start recording.') board.button.wait_for_press() done = threading.Event() board.button.when_pressed = done.set def wait(): start = time.monotonic() while not done.is_set(): duration = time.monotonic() - start print('Recording: %.02f seconds [Press button to stop]' % duration) time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print('Press button to play recorded sound.') board.button.wait_for_press() print('Playing...') play_wav(args.filename) print('Done.')
def record_candy(): parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() # with Board() as board: # print('Press button to start recording.') # board.button.wait_for_press() # done = threading.Event() #board.button.when_pressed = done.set #def wait(): # start = time.monotonic() # while not done.is_set(): # duration = time.monotonic() - start # print('Recording: %.02f seconds [Press button to stop]' % duration) # time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=300, filetype='wav') #print('Press button to play recorded sound.') #board.button.wait_for_press() print('Playing...') play_wav(args.filename) print('Done.')
def play_candy(self): # play .wav of recording scene: refusing to eat durian parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() print('Playing...') play_wav(args.filename) print('Done.')
def play_birthday(self): # play .wav of birthday song parser = argparse.ArgumentParser() parser.add_argument('--filename2', '-f', default='birthday.wav') args = parser.parse_args() print('Playing...') play_wav(args.filename2) print('Done.')
def play_sound(): print('Playing a test sound...') play_wav(TEST_SOUND_PATH) if ask('Again?'): return True return False
def check_speaker_works(): print('Playing a test sound...') play_wav(TEST_SOUND_PATH) if not ask('Did you hear the test sound?'): error(ERROR_NO_SPEAKER_SOUND) return False return True
def check_speaker_works(): print('Playing a test sound...') play_wav(TEST_SOUND_PATH) if not ask('Did you hear the test sound?'): error(ERROR_NO_SPEAKER_SOUND) return False return True
def main(): global board, connection_status connection_status = 0 board.led.state = Led.OFF play_wav('bell.wav') print('starting...') while True: if connection_status == 2: board.button._pressed_callback = finish voiceToTvCmd() elif connection_status == 0: board.button._pressed_callback = conn_attempt
def check_microphone_works(): with tempfile.NamedTemporaryFile() as f: input('When you are ready, press Enter and say "Testing, 1 2 3"...') print('Recording for %d seconds...' % RECORD_DURATION_SECONDS) record_file(AudioFormat.CD, filename=f.name, filetype='wav', wait=lambda: time.sleep(RECORD_DURATION_SECONDS)) print('Playing back recorded audio...') play_wav(f.name) if not ask('Did you hear your own voice?'): error(ERROR_NO_RECORDED_SOUND) return False return True
def main(): parser = argparse.ArgumentParser() parser.add_argument("--filename", "-f", default="~/recorded.wav") args = parser.parse_args() with Board() as board: while True: print("Press button to play recorded sound.") board.led.state = Led.BEACON board.button.wait_for_press() print("Playing...") board.led.state = Led.ON play_wav(args.filename) print("Done.")
def check_microphone_works(): with tempfile.NamedTemporaryFile() as f: input('When you are ready, press Enter and say "Testing, 1 2 3"...') print('Recording for %d seconds...' % RECORD_DURATION_SECONDS) record_file(AudioFormat.CD, filename=f.name, filetype='wav', wait=lambda: time.sleep(RECORD_DURATION_SECONDS)) print('Playing back recorded audio...') play_wav(f.name) if not ask('Did you hear your own voice?'): error(ERROR_NO_RECORDED_SOUND) return False return True
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: #board.led.state = Led.ON with Leds() as leds: while True: if hints: logging.info('Say something, e.g. %s.' % ', '.join(hints)) else: logging.info('Say something.') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('You said nothing.') continue logging.info('You said: "%s"' % text) text = text.lower() if 'turn on the light' in text: board.led.state = Led.ON elif 'turn off the light' in text: board.led.state = Led.OFF elif 'blink the light' in text: board.led.state = Led.BLINK elif 'goodbye' in text: break elif 'happy' in text: leds.pattern = Pattern.blink(50) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) audio.play_wav('laugh.wav') elif 'creep' in text: leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) elif 'cheer' in text: leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) audio.play_wav('people-cheering.wav')
def main(): print('LED is ON while button is pressed (Ctrl-C for exit).') with Board() as board: while True: board.button.wait_for_press() print('ON') board.led.state = Led.ON board.button.wait_for_release() # Generates a random number between a given positive range r1 = random.randint(1, 6) print("Random number between 1 and 6 is % s" % (r1)) wav = "/home/pi/breatheresponsibly/sounds/%s.wav" % r1 print(wav) play_wav(wav) print('OFF') board.led.state = Led.OFF
def recognize(language_code='en_US', wait=None, duration=5): global seconds global play_file status, host, client_id = check_file_exist() if status == False: print("ERROR: Can't find the credential file") sys.exit(1) timestamp = math.ceil(time.time()) filename = '/tmp/file-' + str(timestamp) #print(filename) if wait != None: record_file(AudioFormat.CD, filename=filename, wait=wait, filetype='wav') else: seconds = duration record_file(AudioFormat.CD, filename=filename, wait=_wait_for_duration, filetype='wav') try: #url = "http://192.168.101.153:8081/client/ABCDE" url = host + "/voice/" + client_id files = {'file': open(filename, 'rb')} values = {'languageCode': language_code} req = requests.post(url, files=files, data=values) #play_wav(filename) if play_file == True: play_wav(filename) os.remove(filename) #print(req.text) reply = json.loads(req.text) if reply["status"] == True: return reply["transcript"] else: if reply["error"] != None: print("ERROR:", reply["error"]) return "" except Exception as err: return ""
def voiceToTvCmd(): global speech_client, tvInputControl, tvMediaControl if not tvInputControl or not tvMediaControl: return text = speech_client.recognize(language_code='en-US', hint_phrases=('change channel', 'change volume')) if text is None: return text = text.lower() if text == 'change channel' or text == 'change volume': play_wav('selectsound.wav') print('ok') num_text = speech_client.recognize(language_code='en-US', hint_phrases=None) if num_text is None: print('no number spoken') play_wav('errorsound.wav') return if text == 'change channel': num_regex = re.search( '^(\d+)$ | ^(\d+)\.(\d+)$ | ^(\d+)\s-\s(\d+)$', num_text, re.VERBOSE) num1 = None num2 = None if num_regex: if num_regex.group(1): num1 = num_regex.group(1) elif num_regex.group(2) and num_regex.group(3): num1 = num_regex.group(2) num2 = num_regex.group(3) elif num_regex.group(4) and num_regex.group(5): num1 = num_regex.group(4) num2 = num_regex.group(5) changeChannel(num1, num2, tvInputControl) print('channel changed') else: print('invalid channel number') play_wav('errorsound.wav') elif text == 'change volume': num_regex = re.search('^(\d+)$', num_text, re.VERBOSE) if num_regex and num_regex.group(1): vol = int(num_regex.group(1)) tvMediaControl.set_volume(vol) print('volume changed') else: print('invalid volume number') play_wav('errorsound.wav')
def main(): # play_wav('機器已啟動,按鈕開始啟動功能') firsttime = True while True: # play_wav("請按鈕呼叫機器人") with Board() as button_ready: print("請按開關開始") play_wav("final2/start.wav") button_ready.button.wait_for_press() if firsttime == True: play_wav('final2/button.wav') firsttime = False record() try: user_speak_text = taiwanese_recognize() except: return 0 print(user_speak_text) intend = tending_justify(user_speak_text) function_select(intend)
def alarm(done, leds): print("alarm thread") intensity = 0 start = time.monotonic() duration = 0 while not done.is_set(): if (intensity < 1): intensity += (5. / 70.) if (intensity > 1): intensity = 1 set_volume(intensity * MAX_VOLUME) leds.pattern = Pattern.breathe(map(intensity, 0., 1., 1000., 100.)) leds.update(Leds.rgb_pattern((0, 0, intensity * MAX_BRIGHTNESS))) duration = time.monotonic() - start print('Alarm [Press button to stop] %.02fs, intensity: %.02f' % (duration, intensity)) play_wav(ALARM_SOUND_PATH) time.sleep(SLEEP_TIME)
def main(): outside = 0 while True: with Board(): print("\n\npress to start") record() play_wav("r.wav") record_file = open('./r.wav', 'rb').read() text = askForService(token, record_file) print('+++++++++++++++++++') print(text) print('+++++++++++++++++++') textlist = text.split(':', 1) textlist = textlist[1].split('r', 1) print(textlist) print(type(textlist)) print('---------------------') print(textlist[0]) print('-----------------------') RniReply = Rni(textlist[0], outside) #RniReply = Rni('時間', outside) # --------因為出去玩的有對話系統 ---------- # if str(RniReply) != '出去玩': outside = 0 if str(RniReply) == '出去玩': outside = 1 # --------因為出去玩的有對話系統 ---------- # # --------非對話系統中 的行為 ------------ # if outside == 0: #tts = gTTS(text=RniReply, lang='zh-tw') #tts.save('ip.mp3') #os.system('mpg321 ip.mp3') print('刪除掉了 rni reply QQQQQ') # --------非對話系統中 的行為 ------------ # '''
def main(): with Board() as board: with Leds() as leds: # init volume and brightness set_volume(0) leds.pattern = Pattern.breathe(750) leds.update(Leds.rgb_pattern(Color.BLACK)) done = threading.Event() board.button.when_pressed = done.set alarm_thread = threading.Thread(target=alarm, args=(done, leds), daemon=True) alarm_thread.start() if done.wait(timeout=TIMEOUT_LIMIT): set_volume(MAX_VOLUME) leds.update(Leds.rgb_on(Color.GREEN)) print('GOOD MORNING!') play_wav(GOOD_MORNING_SOUND_PATH) else: print('Timed out.')
def detect_wakeword(self): self.greet_start_time = time.time() user_text = "" while (True): self.greet_end_time = time.time() if (self.greet_end_time - self.greet_start_time >= 30): break try: user_text = self.assistant.conversation() except KeyboardInterrupt: lighting_controller.led_controller.switch_off() break except Exception: print("Internet out!!") lighting_controller.led_controller.blink() print('user_text is :', user_text) if (user_text in wake_words): aplay_mutex.acquire() try: logging.info("Locking in acknowledge fn") play_wav("ding.wav") finally: aplay_mutex.release() logging.info("Releasing in acknowledge fn") user_text = "" lighting_controller.led_controller.listening_lighting( ) #Listening lighting start break elif user_text != "": aplay_mutex.acquire() try: logging.info("Try again!") tts.say('Sorry, could not pick that up') self.greet_start_time = time.time() finally: aplay_mutex.release()
def conn_attempt(): global board, connection_status, tvClient, tvInputControl, tvMediaControl print('Attempting to connect') connection_status = 1 board.led.state = Led.PULSE_QUICK board.button._pressed_callback = None client_list = WebOSClient.discover() if len(client_list) != 1: play_wav('errorsound.wav') print('connection failed: more than 1 tv found') connection_status = 0 board.led.state = Led.OFF return tvClient = client_list[0] try: tvClient.connect() for status in tvClient.register(store): if status == WebOSClient.PROMPTED: play_wav('ding.wav') print('See TV prompt and press yes') elif status == WebOSClient.REGISTERED: play_wav('ding.wav') print('Successful connection') board.led.state = Led.ON connection_status = 2 tvInputControl = InputControl(tvClient) tvInputControl.connect_input() tvMediaControl = MediaControl(tvClient) except Exception: play_wav('errorsound.wav') print('connection failed: you probably pressed no on the prompt') connection_status = 0 board.led.state = Led.OFF return
def main(): print('Start playing...') audio.play_wav('sample.wav') print('Done.')
def main(): file = io.open('./illness.csv', 'r', encoding='utf-8') lines = file.readlines() i = 1 while i < len(lines): data = lines[i].replace('\n', '').split(',') data_list.append(data) data = data[1:] data = list(map(int, data)) cos_list.append(data) i += 1 symptom_list = lines[0].replace('\n', '').split(',') while True: flag = 0 play_wav('../final2/你有哪裡不舒服嗎.wav') text = taiwanese_recognize() if text is None: print('抱歉,我沒聽清楚。可以再說一次嗎?') else: print('你說:', text, '"') symptom(text, 0) while sym_count <= 4 and max( score) <= 0.75 and flag == 0: #問診數量6 相似度>0.75 症狀沒問完 if len(ill_list) == 1: #只有一病 for i in range(1, 39): #問39症狀 #下面的:症狀為沒問過且對應疾病 if i not in sym_list and i not in sym_nolist and int( data_list[score.index(max(score))][i]) == 1: print('above_1s i : ' + str(i)) play_wav('../final2/那你會.wav') play_wav('../final2/' + str(symptom_list[i]) + '.wav') play_wav('../final2/嗎.wav') text = taiwanese_recognize() if text is None: play_wav('../final2/不好意思我聽不懂,請再說一次.wav') else: if '沒有' in text or '不會' in text or '沒' in text or '無' in text or '背' in text or '袂' in text or '咧' in text: score_change(i) sym_name.append(symptom_list[i]) yesno.append('否') break elif '有' in text or '會' in text or '是' in text or '對' in text or '痛' in text or '會痛' in text or '有痛' in text or '嗚' in text or '呼' in text: symptom('', i) break else: play_wav('../final2/不好意思我聽不懂,請再說一次.wav') break elif i == 39: #症狀皆問完 flag = 1 break else: #可能疾病有2個以上 if len(sym_list) < 2: #符合症狀只有一個 play_wav('../final2/還有哪裡不舒服.wav') if '沒有' in text: play_wav('../final2/症狀過少無法判斷.wav') #button.wait_for_press() text = taiwanese_recognize() print(text) symptom(text, 0) else: #符合症狀兩個以上 for i in range(1, 39): if i not in sym_list and i not in sym_nolist and int( data_list[score.index(max(score))][i]) == 1: print('above_2s i : ' + str(i)) play_wav('../final2/那你會.wav') play_wav('../final2/' + str(symptom_list[i]) + '.wav') play_wav('../final2/嗎.wav') #button.wait_for_press() text = taiwanese_recognize() if text is None: play_wav('../final2/不好意思我聽不懂,請再說一次.wav') else: if '沒有' in text or '不會' in text: score_change(i) sym_name.append(symptom_list[i]) yesno.append('否') break elif '有' in text or '會' in text: symptom('', i) break else: play_wav('../final2/不好意思我聽不懂,請再說一次.wav') break elif i == 39: flag = 1 break play_wav('../final2/診斷完畢.wav') print('No.\t症狀\t\t是\否') for i in range(len(sym_name)): print(str(i) + '\t' + sym_name[i] + '\t\t' + yesno[i]) i += 1 print('可能疾病:' + data_list[score.index(max(score))][0]) play_wav('../final2/您可能患有的病症是.wav') play_wav('../final2/' + str(data_list[score.index(max(score))][0]) + '.wav') play_wav('../final2/請盡速去耳鼻喉科掛號.wav') return False file.close()
def symptom(word, sym_flag): if ('耳朵' in word and '癢' in word) or sym_flag == 1: sym = '耳朵癢' sym_name.append(sym) yesno.append('是') score_count(1) elif ('耳朵' in word and '異物' in word) or sym_flag == 2: sym = '耳朵異物感' sym_name.append(sym) yesno.append('是') score_count(2) elif ('拉' in word and '耳' in word and '痛' in word) or sym_flag == 3: sym = '拉耳廓會痛' sym_name.append(sym) yesno.append('是') score_count(3) elif ('張嘴' in word and '痛' in word) or sym_flag == 4: sym = '張嘴會疼痛' sym_name.append(sym) yesno.append('是') score_count(4) elif ('聽' in word and '不清楚' in word) or sym_flag == 5: sym = '聽不清楚' sym_name.append(sym) yesno.append('是') score_count(5) elif ('臉' in word and '壓痛' in word) or sym_flag == 6: sym = '鼻竇壓痛' sym_name.append(sym) yesno.append('是') score_count(6) elif ('喉嚨' in word and '痛' in word) or sym_flag == 7: sym = '喉嚨痛' sym_name.append(sym) yesno.append('是') score_count(7) elif '打噴嚏' in word or sym_flag == 8: sym = '打噴嚏' sym_name.append(sym) yesno.append('是') score_count(8) elif '流鼻水' in word or '流鼻' in word or '流(鼻)' in word or '鼻水' in word or sym_flag == 9: sym = '鼻水' sym_name.append('流' + sym) yesno.append('是') score_count(9) elif '鼻塞' in word or sym_flag == 10: sym = '鼻塞' sym_name.append(sym) yesno.append('是') score_count(10) elif ('鼻子' in word and '癢' in word) or sym_flag == 11: sym = '鼻子癢' sym_name.append(sym) yesno.append('是') score_count(11) elif '結膜炎' in word or sym_flag == 12: sym = '結膜炎' sym_name.append(sym) yesno.append('是') score_count(12) elif ('嗅覺' in word and '差' in word) or sym_flag == 13: sym = '嗅覺變差' sym_name.append(sym) yesno.append('是') score_count(13) elif '頭痛' in word or '頭(疼)' in word or '(頭)(疼)' in word or sym_flag == 14: sym = '頭痛' sym_name.append(sym) yesno.append('是') score_count(14) elif '口臭' in word or sym_flag == 15: sym = '口臭' sym_name.append(sym) yesno.append('是') score_count(15) elif ('耳朵' in word and '痛' in word) or sym_flag == 16: sym = '耳朵痛' sym_name.append(sym) yesno.append('是') score_count(16) elif ('最近' in word and '感冒' in word) or sym_flag == 17: sym = '最近有感冒' sym_name.append(sym) yesno.append('是') score_count(17) elif '咳嗽' in word or sym_flag == 18: sym = '咳嗽' sym_name.append(sym) #症狀0 yesno.append('是') score_count(18) elif '耳鳴' in word or sym_flag == 19: sym = '耳鳴' sym_name.append(sym) yesno.append('是') score_count(19) elif (('脖子' in word or '頸' in word) and '腫塊' in word) or sym_flag == 20: sym = '頸部腫塊' sym_name.append(sym) yesno.append('是') score_count(20) elif '流鼻血' in word or sym_flag == 21: sym = '流鼻血' sym_name.append(sym) yesno.append('是') score_count(21) elif ('臉' in word and '麻痺' in word) or sym_flag == 22: sym = '臉部麻痺' sym_name.append(sym) yesno.append('是') score_count(22) elif ('耳朵' in word and '流膿' in word) or sym_flag == 23: sym = '流膿' sym_name.append(sym) yesno.append('是') score_count(23) elif '發燒' in word or sym_flag == 24: sym = '發燒' sym_name.append(sym) yesno.append('是') score_count(24) elif '畏寒' in word or sym_flag == 25: sym = '畏寒' sym_name.append(sym) yesno.append('是') score_count(25) elif '寒顫' in word or sym_flag == 26: sym = '寒顫' sym_name.append(sym) yesno.append('是') score_count(26) elif '嘔吐' in word or sym_flag == 27: sym = '嘔吐' sym_name.append(sym) yesno.append('是') score_count(27) elif ('淋巴結' in word and ('痛' in word and '腫大' in word)) or sym_flag == 28: sym = '頸部淋巴結腫大' sym_name.append(sym) yesno.append('是') score_count(28) elif ('食慾' in word and ('不振' in word or '不好' in word or '不佳' in word)) or sym_flag == 29: sym = '食慾不振' sym_name.append(sym) yesno.append('是') score_count(29) elif ('腮腺' in word and ('發炎' in word or '腫大' in word)) or sym_flag == 30: sym = '腮腺發炎腫大' sym_name.append(sym) yesno.append('是') score_count(30) elif ('腮腺' in word and '痛' in word) or sym_flag == 31: sym = '腮腺疼痛' sym_name.append(sym) yesno.append('是') score_count(31) elif ('咀嚼' in word and '困難' in word) or sym_flag == 32: sym = '咀嚼困難' sym_name.append(sym) yesno.append('是') score_count(32) elif '胃痛' in word or sym_flag == 33: sym = '胃痛' sym_name.append(sym) yesno.append('是') score_count(33) elif ('喉嚨' in word and ('熱' in word or '燙' in word)) or sym_flag == 34: sym = '喉嚨灼熱' sym_name.append(sym) yesno.append('是') score_count(34) elif ('清' in word and '喉嚨' in word) or sym_flag == 35: sym = '常清喉嚨' sym_name.append(sym) yesno.append('是') score_count(35) elif ('喉嚨' in word and '異物' in word) or sym_flag == 36: sym = '喉嚨有異物感' sym_name.append(sym) yesno.append('是') score_count(36) elif ('聲音' in word and '啞' in word) or sym_flag == 37: sym = '聲音乾啞' sym_name.append(sym) yesno.append('是') score_count(37) elif ('肌肉' in word and '痠痛' in word) or sym_flag == 38: sym = '肌肉痠痛' sym_name.append(sym) yesno.append('是') score_count(38) elif '拉肚子' in word or sym_flag == 39: sym = '拉肚子' sym_name.append(sym) yesno.append('是') score_count(39) else: play_wav('../final2/無法判別症狀.wav')
def say(text): tts = gTTS(text, lang='zh-TW') #get the word into gtts inside object tts.save('output.mp3') #save the words into output.mp3 sound = AudioSegment.from_mp3('output.mp3') #read output.mp3 sound.export('output.wav', format='wav') #save into output.wav audio.play_wav('output.wav') #voice kit play the wav
def play_random_file_from_dir(dirpath): file_to_play = random.choice(os.listdir(dirpath)) fpath = os.path.join(dirpath, file_to_play) print("attempting to play: {}".format(fpath)) play_wav(fpath)
def finish(): # add shutting down sound here print('shutting down') play_wav('goodbye.wav') os.system('sudo shutdown -h now')
#!/usr/bin/env python3 import os from aiy.voice.audio import play_wav RING_SOUND = os.path.dirname(os.path.abspath(__file__)) + "/bell.wav" play_wav(RING_SOUND) # TODO have google assistant say something.
duration = time.monotonic() - start print('Recording: %.02f seconds [Press button to stop]' % duration) time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print('Press button to play recorded sound.') board.button.wait_for_press() print('Playing...') play_wav(args.filename) print('Done.') if __name__ == '__main__': main()def wait(): start = time.monotonic() while not done.is_set(): duration = time.monotonic() - start print('Recording: %.02f seconds [Press button to stop]' % duration) time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print('Press button to play recorded sound.') board.button.wait_for_press() print('Playing...') play_wav(args.filename) print('Done.') if __name__ == '__main__': main()
def listen_me(): global text, duration parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() # 라이브러리 준비 Vokaturi.load("/home/pi/lib/piZero.so") # 클라우드 스피치, 텍스트 자연어처리, tts 클라이언트 각각 초기화 client = CloudSpeechClient() nlp_client = language.LanguageServiceClient() tts_client = texttospeech.TextToSpeechClient() pos_wavs = [] neut_wavs = [] neg_wavs = [] intro_wavs = [] pos_wavs.append(text_to_audio(tts_client, '진짜?', '0.wav')) pos_wavs.append(text_to_audio(tts_client, '대박', '1.wav')) pos_wavs.append(text_to_audio(tts_client, '우와', '2.wav')) pos_wavs.append(text_to_audio(tts_client, '하하', '3.wav')) neut_wavs.append(text_to_audio(tts_client, '응', '10.wav')) neut_wavs.append(text_to_audio(tts_client, '그렇구나', '11.wav')) neut_wavs.append(text_to_audio(tts_client, '그래서?', '12.wav')) neut_wavs.append(text_to_audio(tts_client, '응응', '13.wav')) neg_wavs.append(text_to_audio(tts_client, '저런', '4.wav')) neg_wavs.append(text_to_audio(tts_client, '힘내', '5.wav')) neg_wavs.append(text_to_audio(tts_client, '에휴', '6.wav')) intro_wavs.append(text_to_audio(tts_client, '들어줄게. 얘기해봐', 'intro0.wav')) intro_wavs.append(text_to_audio(tts_client, '무슨 일 이야?', 'intro1.wav')) play_wav(random.choice(intro_wavs)) logging.basicConfig(level=logging.INFO) with Board() as board: while True: print('말해보자.') text = None duration = 0. emotion = None def wait(): global text, duration start = time.monotonic() while text is None: # 텍스트로 인식 text = client.recognize(language_code='ko-KR') duration = time.monotonic() - start # 녹음하면서 record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print(text) print('Recorded: %.02f seconds' % duration) if text in ['들어줘서 고마워', '내 얘기 들어줘서 고마워', '어시스턴트', '잘가', '잘 가']: return # 텍스트 감정 분석 document = types.Document(content=text, type=enums.Document.Type.PLAIN_TEXT) sentiment = nlp_client.analyze_sentiment( document=document).document_sentiment print('텍스트 감정 분석*********************************') print('Text: {}'.format(text)) print('Sentiment: {}, {}'.format(sentiment.score, sentiment.magnitude)) ##################### 실험후 바꿔도 됨 #################### pos_standard = 0.6 neg_standard = 0.1 # magnitude_standard = 0.1 # text sentiment analysis is enough if (sentiment.score < neg_standard or sentiment.score > pos_standard): if sentiment.score < neg_standard: emotion = False print("@@@negative") else: emotion = True print("@@@positive") else: # 녹음 파일 감정 분석 print('오디오 감정 분석*********************************') (sample_rate, samples) = scipy.io.wavfile.read(args.filename) # print (" sample rate %.3f Hz" % sample_rate) # print ("Allocating Vokaturi sample array...") buffer_length = len(samples) print(" %d samples, %d channels" % (buffer_length, samples.ndim)) c_buffer = Vokaturi.SampleArrayC(buffer_length) if samples.ndim == 1: # mono c_buffer[:] = samples[:] / 32768.0 else: # stereo c_buffer[:] = 0.5 * (samples[:, 0] + 0.0 + samples[:, 1]) / 32768.0 # print ("Creating VokaturiVoice...") voice = Vokaturi.Voice(sample_rate, buffer_length) # print ("Filling VokaturiVoice with samples...") voice.fill(buffer_length, c_buffer) # print ("Extracting emotions from VokaturiVoice...") quality = Vokaturi.Quality() emotionProbabilities = Vokaturi.EmotionProbabilities() voice.extract(quality, emotionProbabilities) if quality.valid: # print ("Neutral: %.3f" % emotionProbabilities.neutrality) # print ("Happy: %.3f" % emotionProbabilities.happiness) # print ("Sad: %.3f" % emotionProbabilities.sadness) # print ("Angry: %.3f" % emotionProbabilities.anger) # print ("Fear: %.3f" % emotionProbabilities.fear) # fear 는 무시하도록 하자. wave_score = emotionProbabilities.happiness - ( emotionProbabilities.sadness + emotionProbabilities.anger) if wave_score > 0 and sentiment.score > 0.4: print('@@@긍정') emotion = True elif wave_score < 0 and sentiment.score < 0.4: print('@@@부정') emotion = False # text 스코어와 wave 스코어가 불일치 할때는 중립반응 (emotion = None) # 여기서 부터 반응. with Leds() as leds: if emotion is True: play_wav(random.choice(pos_wavs)) leds.pattern = Pattern.blink(100) color = (255, 255, 0) leds.update(Leds.rgb_pattern(color)) time.sleep(1) # play_wav('laugh.wav') elif emotion is False: play_wav(random.choice(neg_wavs)) leds.pattern = Pattern.breathe(1000) color = (102, 140, 255) leds.update(Leds.rgb_on(color)) time.sleep(1) # play_wav('people-cheering.wav') # 중립 리액션 else: play_wav(random.choice(neut_wavs)) leds.pattern = Pattern.blink(5) color = (230, 0, 115) leds.update(Leds.rgb_on(color)) time.sleep(1)
def wait(): start = time.monotonic() while not done.is_set(): duration = time.monotonic() - start print('Recording: %.02f seconds [Press button to stop]' % duration) time.sleep(0.5) record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') print('Press button to play recorded sound.') board.button.wait_for_press() print('Playing...') play_wav(args.filename) print('Done.') if __name__ == '__main__': main()def wait():