def reponse_bouton(est_juste, ecart, action): with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') ####### Dire la phrase en plus ####### else: period = 10 * ecart leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print('Tourner la cheville') tts.say('Tourner la cheville', lang='fr-FR') ####### Dire la phrase ####### if action == 1: leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
def main(): client = connect_to_socket() validation_msg = crypt.encrypt(b"VOICE") client.sendall(validation_msg) # wait for response while True: try: stat = utils.start_handshake_recv(client) if stat: # get the data until bye recv_data, choice = utils.recv_data(client, CHUNK=CHUNK) logging.info("Received data : {}".format(recv_data)) logging.info("Choice : {}".format(choice)) tts.say(recv_data, volume=80) if choice == 'b': tts.say("Do you want to send any message to Ashraful?") voice_data = utils.voice_to_text() utils.send_data(client, voice_data) time.sleep(0.5) else: pass except (ConnectionRefusedError, BrokenPipeError, ConnectionResetError) as err: logging.error(err) client.close() main()
def process_event(assistant, led, event): logging.info(event) if event.type == EventType.ON_START_FINISHED: led.state = Led.BEACON_DARK # Ready. logging.info('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: led.state = Led.ON # Listening. elif event.type == EventType.ON_END_OF_UTTERANCE: led.state = Led.PULSE_QUICK # Thinking. elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED: assistant.stop_conversation() tts.say( get_prediction(event.args['text'], 'testenvironment-223010', 'TCN3813896006391298745')) elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE): led.state = Led.BEACON_DARK elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args[ 'is_fatal']: sys.exit(1)
def accord_de_la_guitare(): for k in range(6): print('Accorder la corde suivante') tts.say('Accorder la corde suivante', lang='fr-FR') ####### De même, phrase à dire ####### accord_de_la_corde() print('Guitare accordée') tts.say('Guitare accordée', lang='fr-FR') ####### Idem #######
def on_button_press(): state = vlc_player.get_state() if state == vlc.State.Playing: vlc_player.pause() tts.say('Music is paused!') elif state == vlc.State.Paused: tts.say('Music will be resumed!') vlc_player.play()
def main(): # Logger logging.basicConfig(level=logging.DEBUG) # Parsing the arguments parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() # SET THE LANGUAGE logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) # Get the client for aiy.cloud client = CloudSpeechClient() # Run the loop untill the we say "GOODBYE" while True: logging.info('Say something that you want to analyse') # Call the cloud Speech to text function to get text from cloud text = client.recognize(language_code=args.language, hint_phrases=hints) # If nothing is been said if text is None: logging.info('You said nothing.') continue # Display what you said logging.info('You said: "%s"' % text) # Sending the text to sentiment analysis API # retun a score score = 0 # compare the score and decide if it is positive or negative if score <= -0.25: sentiment = "Negative" elif score <=0.25: sentiment = "Neutral" else: sentimentstatus = "Positive" say(sentiment, volume=1, pitch=130, speed=80, device='default') # change her locale language, speed text = text.lower() ## if 'turn on the light' in text: ## board.led.state = Led.ON ## elif 'turn off the light' in text: ## board.led.state = Led.OFF ## elif 'blink the light' in text: ## board.led.state = Led.BLINK ## elif 'goodbye' in text: ## break if 'goodbye' in text: break
def lbsay(text, volume=50, speed=100, isNotification=False, silent=False): """ Text to speech """ global notification_is_on log("I said: " + text) if isNotification is True: if notification_is_on is False: return if silent is False: #subprocess.check_call('aplay -q -D default "/home/pi/lbAssistant/voices/' + text + '.wav' + '"', shell=True) tts.say(text, lang="fr-FR", pitch=100, volume=volume, speed=speed)
def _break(self): tts.say("Break time! Relax your brain!") self._board.led.state = Led.BEACON_DARK self._wait("Break") tts.say("Your break is over!") self._done.clear()
def greet(self): self.greet_timer_end = time.time() if (self.greet_timer_end - self.greet_timer_start > self.greet_timer_threshold): aplay_mutex.acquire() try: tts.say('Hello there!') finally: self.greet_timer_start = time.time() aplay_mutex.release()
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default='ko-KR') args = parser.parse_args() logging.info('%s... 초기화중...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: while True: logging.info('Press button to start conversation...') board.led.state = Led.ON board.button.wait_for_press() logging.info('Conversation started!') if hints: logging.info('??????') else: logging.info('말씀해주세요') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('아무것도 못 들었어요.') tts.say('You said nothing.') continue elif '안녕' in text: tts.say("Bye Bye") break elif 'goodbye' in text: tts.say("Goodbye") break logging.info("제가 제대로 들은게 맞나요? : '%s'" % text) data = {'text': text} res = requests.post("http://3.34.174.254:3000/python/py", data=data) try: say = json.loads(res.text) board.led.state = Led.BLINK #요청 응답이 잘 이루어졌다는 뜻 for i in range(len(say)): answer = str(say[i]['ans']) logging.info(answer) tts.say(answer) google_say(answer + "입니다") except: logging.info("잘못된 응답입니다.") board.led.state = Led.BEACON_DARK tts.say("Its SQL error.")
def start_conversation(self): database = open("responses.pkl") with open('responses.pkl', 'rb') as file: database = pickle.load(file) while True: self.listen_start_time = time.time() user_text = "" logging.info('Speak hotword to start conversation') volumes = self.mixer.getvolume() print('Volume = ' + str(volumes[0])) self.detect_wakeword() self.listen_end_time = time.time() if (self.listen_end_time - self.listen_start_time >= 120 or self.greet_end_time - self.greet_start_time >= 30): break logging.info('Ask any question to Voxx! Conversation started!') #user_text = self.assistant.conversation() try: for i in range(0, 2): user_text = self.assistant.conversation() if user_text != "": break except Exception: print("Internet out!!") lighting_controller.led_controller.blink() """Starts lighting effects thread""" if user_text != "": breathing_thread = threading.Thread( target=lighting_controller.led_controller.breathing_effect) thinking_thread = threading.Thread( target=lighting_controller.led_controller.wave_effect) breathing_thread.start() thinking_thread.start() time.sleep(1) if (user_text.lower() in database.keys()): try: aplay_mutex.acquire() tts.say(database[user_text.lower()]) lighting_controller.led_controller.greeting_effect(0) finally: aplay_mutex.release() logging.info("Releasing in answer") elif user_text != "": try: aplay_mutex.acquire() tts.say('Sorry, I do not know the answer to that') lighting_controller.led_controller.greeting_effect(0) finally: aplay_mutex.release() logging.info("Releasing in answer") logging.info(user_text) time.sleep(0.01)
def play_music(name): try: with youtube_dl.YoutubeDL(ydl_opts) as ydl: meta = ydl.extract_info(name, download=False) except Exception: tts.say('Sorry, I can\'t find that song.') return if meta: info = meta['entries'][0] vlc_player.set_media(vlc_instance.media_new(info['url'])) tts.say('Playing ' + re.sub(r'[^\s\w]', '', info['title'])) vlc_player.play()
def say(words, lang=None, volume=60, pitch=130): """Says the given words in the given language with Google TTS engine. If lang is specified, e.g. "en-US", it will be used to say the given words. Otherwise, the language from aiy.i18n will be used. volume (optional) volume used to say the given words. pitch (optional) pitch to say the given words. Example: aiy.audio.say('This is an example', lang="en-US", volume=75, pitch=135) Any of the optional variables can be left out. """ if not lang: lang = aiy.i18n.get_language_code() tts.say(words, lang=lang, volume=volume, pitch=pitch)
def trouve_freq_souhaitee( frequence ): # trouve la fréquence à obtenir (la plus proche de celle jouée) k = 0 while k < 6 and frequence < Liste_frequences[k][ 1]: # Parcourt liste afin de trouver la fréquence la plus proche k += 1 if k == 0: # Cas extrême où la fréquence est supérieure à toutes tts.say('La corde détectée est {}'.format(Liste_frequences[0][2]), lang='fr-FR') print("la corde choisie est: ", Liste_frequences[0][0]) return Liste_frequences[0] elif k == 6: # Cas extrême où la fréquence est inférieure à toutes tts.say('La corde détectée est {}'.format(Liste_frequences[5][2]), lang='fr-FR') print("la corde choisie est : ", Liste_frequences[5][0]) return Liste_frequences[5] else: ecart_prec = Liste_frequences[k - 1][1] - frequence ecart_suiv = frequence - Liste_frequences[k][1] if ecart_prec < ecart_suiv: tts.say('La corde détectée est {}'.format(Liste_frequences[k - 1][2]), lang='fr-FR') print("la corde choisie est: ", Liste_frequences[k - 1][0]) return Liste_frequences[k - 1] else: tts.say('La corde détectée est {}'.format(Liste_frequences[k][2]), lang='fr-FR') print("la corde choisie est : ", Liste_frequences[k][0]) return Liste_frequences[k]
def _start_tracker(self): logging.info("Tracker started.") self._board.led.state = Led.ON self._tracker_active = True if not self._debug: googlevoice.say('Starting Barktracker.') time.sleep(2) self._lock.acquire() self._services = create_services() for service in self._services: try: bg_thread = Thread(target=service.start) bg_thread.start() except Exception as e: logging.error("Could not start service of class {}. Error: {}".format(service.__class__.__name__, e)) self._lock.release()
def fallback(self, path, args, types, src): ####timeout error handling,not working, need to fix while ('beta_absolute' not in path): self.deadtime += 1 print("deadtime ", deadtime) if (self.deadtime >= 1000): tts.say( "I am having trouble connecting to your Muse headband, shutting down, please try again." ) sys.exit() ### if (self.haslooped == False): tts.say("Great, I have succesfully connected to your Muse.") tts.say( "Let's improve your concentration, please get ready with your muse headset. Now concentrate and try to slow the light pulse by calming your mind" ) self.haslooped = True ## print( "Unknown message \n\t Source: '%s' \n\t Address: '%s' \n\t Types: '%s ' \n\t Payload: '%s'" % (src.url, path, types, args)) ## l_ear, l_forehead, r_forehead, r_ear = args l_ear_f = float(l_ear) l_forehead_f = float(l_forehead) r_forehead_f = float(r_forehead) r_ear_f = float(r_ear) #grab beta band signal subset from general eeg data if ('beta_absolute' in path): if (self.count >= 300): tts.say("Thank you ! Your maximum concentration level was " + str(int(self.avgmax * 100))) return print(path) avgf = ( l_ear_f + l_forehead_f + r_forehead_f + r_ear_f ) / 4 #avgValue(l_forehead_f, r_forehead_f) #l_ear_f, r_ear_f print("average eeg value (raw): ", avgf) avgp = max(0, avgf) print("absolute average eeg value", avgp) if (avgp > self.avgmax): self.avgmax = avgp self.board.led.state = Led.ON time.sleep(avgp * avgp) self.board.led.state = Led.OFF time.sleep(avgp * avgp) print(self.count) self.count += 1
def process_event(assistant, led, event): logging.info(event) if event.type == EventType.ON_START_FINISHED: led.state = Led.BEACON_DARK # Ready. #printMatrix('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: led.state = Led.ON # Listening. elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: print('You said:', event.args['text']) text = event.args['text'].lower() if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif text == 'ip address': assistant.stop_conversation() say_ip() elif text == 'led on': assistant.stop_conversation() led_on() pixels.show() elif text == 'led off': assistant.stop_conversation() led_off() elif text == 'goodbye': assistant.stop_conversation() msg = 'See you later allegator!' tts.say(msg) printMatrix(msg) elif text == 'see you later allegator!': assistant.stop_conversation() elif event.type == EventType.ON_END_OF_UTTERANCE: led.state = Led.PULSE_QUICK # Thinking. elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE): led.state = Led.BEACON_DARK # Ready. elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args[ 'is_fatal']: sys.exit(1)
def process_event(assistant, event): if event.type == EventType.ON_START_FINISHED: led.state = Led.BEACON_DARK # Ready. print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: led.state = Led.ON # Listening. elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: print('You said:', event.args['text'].lower()) text = event.args['text'].lower() if text.startswith('youtube '): assistant.stop_conversation() play_music(text[8:]) if text.startswith('vlc '): assistant.stop_conversation() player_action(text[4:]) if text.startswith('change volume to '): assistant.stop_conversation() change_volume(text.split(' ')[-1]) if text.startswith('chinese input'): assistant.stop_conversation() tts.say('you may speak chinese now.') chinese_input() elif text == 'ip address': assistant.stop_conversation() say_ip() elif event.type == EventType.ON_END_OF_UTTERANCE: led.state = Led.PULSE_QUICK # Thinking. elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE): led.state = Led.BEACON_DARK # Ready. elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args[ 'is_fatal']: sys.exit(1)
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() with Board() as board: assistant = AssistantServiceClientWithLed( board=board, volume_percentage=args.volume, language_code=args.language) tts.say('hola mundo') while True: logging.info('Press button to start conversation...') board.button.wait_for_press() logging.info('Conversation started!') assistant.conversation()
def open_connected_ev3(): # This opens the LEGO EV3 on the bluetooth (BT) interface. Note that the BT interface must be on and # paired with the LEGO EV3 for this to work. Function returns the pointer to the serial BT interface # (if successful) or None if not successful ## print('** ENTERED open_connected_ev3') EV3, ev3PortOpen = ev3_rpi_ctrl_pkg.openEv3() #Port ID if successful, False otherwise ## print('** PERFORMED ev3_rpi_ctrl_pkg.openEv3') ## print('\n-> Ev3PortOpen {}\n'.format(ev3PortOpen)) if ev3PortOpen is not None: print('\n-> Opened EV3 Brick on {}'.format(ev3PortOpen)) # Get the pointer to the open BT interface print('*** EV3 type',type(EV3)) print('\n-> EV3 Settings: name', EV3.name, EV3.get_settings()) print('*** Returning from open_connected_ev3()') return EV3, ev3PortOpen else: # If no port are found print('\n** EV3 does not appear to be open on any /dev/rfcomm port\n') tts.say('EV3 does not appear to be open on any /dev/rfcomm port') print('*** Returning from open_connected_ev3()') return None, None
def process_event(assistant, led, event): logging.info(event) if event.type == EventType.ON_START_FINISHED: led.state = Led.BEACON_DARK # Ready. print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: led.state = Led.ON # Listening. elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: print('You said:', event.args['text']) text = event.args['text'].lower() if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif text == 'ip address': assistant.stop_conversation() say_ip() ########edit what you want################ elif text == 'turn on the light': arduino.write(b'o') #send o assistant.stop_conversation() tts.say('Yes I will turn on the light') #response code elif text == 'turn off the light': arduino.write(b'l') #send l assistant.stop_conversation() tts.say('Yes I will turn off the light') #response code ######################################### elif event.type == EventType.ON_END_OF_UTTERANCE: led.state = Led.PULSE_QUICK # Thinking. elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE): led.state = Led.BEACON_DARK # Ready. elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args[ 'is_fatal']: sys.exit(1)
def process_event(assistant, led, event): logging.info(event) if event.type == EventType.ON_START_FINISHED: led.state = Led.BEACON_DARK # Ready. print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: led.state = Led.ON # Listening. elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: print("You said:", event.args["text"]) text = event.args["text"].lower() if text == "start the clock": assistant.stop_conversation() timer() question1() elif text == "two": tts.say("Good job! Answer one more question to stop the timer.") question2() elif text == "12": tts.say("The timer has been turned off!") else: tts.say("Wrong answer, try again") elif event.type == EventType.ON_END_OF_UTTERANCE: led.state = Led.PULSE_QUICK # Thinking. elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE): led.state = Led.BEACON_DARK # Ready. elif (event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args["is_fatal"]): sys.exit(1)
def reponse_bouton( est_juste, ecart ): ### réponse donnée par la couleur du bouton et la fréquence du clignotement with Leds() as leds: if est_juste: leds.update(Leds.rgb_on(Color.GREEN) ) # Vert fixe pendant 3 secondes si fréquence atteinte time.sleep(3) print('Corde accordée') tts.say('Corde accordée', lang='fr-FR') else: period = 10 * abs(ecart) leds.pattern = Pattern.blink( period) # donne fréquence de pulsation print("TOURNER LA CHEVILLE") if ecart > 0: tts.say('Tendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.BLUE) ) #Clignotement bleu pour augmenter pendant 5 secondes time.sleep(5) else: tts.say('Détendre la corde', lang='fr-FR') leds.update( Leds.rgb_pattern(Color.RED) ) #Clignotement rouge pour diminuer pendant 5 secondes time.sleep(5)
def _stop_tracker(self): logging.info("Tracker stopped.") self._board.led.state = Led.OFF self._tracker_active = False summaries = [] self._lock.acquire() for service in self._services: try: service.stop() summaries.append(service.generate_summary()) except Exception as e: logging.error("Could not stop service of class {}. Error: {}".format(service.__class__.__name__, e)) self._services = None self._lock.release() if not self._debug: googlevoice.say( "Good {}. Welcome back. Here's your summary: ".format(Daytime.part_of_day())) for summary in filter(None, summaries): logging.info(summary) googlevoice.say(summary)
def detect_wakeword(self): self.greet_start_time = time.time() user_text = "" while (True): self.greet_end_time = time.time() if (self.greet_end_time - self.greet_start_time >= 30): break try: user_text = self.assistant.conversation() except KeyboardInterrupt: lighting_controller.led_controller.switch_off() break except Exception: print("Internet out!!") lighting_controller.led_controller.blink() print('user_text is :', user_text) if (user_text in wake_words): aplay_mutex.acquire() try: logging.info("Locking in acknowledge fn") play_wav("ding.wav") finally: aplay_mutex.release() logging.info("Releasing in acknowledge fn") user_text = "" lighting_controller.led_controller.listening_lighting( ) #Listening lighting start break elif user_text != "": aplay_mutex.acquire() try: logging.info("Try again!") tts.say('Sorry, could not pick that up') self.greet_start_time = time.time() finally: aplay_mutex.release()
def accord_de_la_guitare( ): ### Réalise l'accord de toutes les cordes de la guitare with Board() as board: tts.say("commencer à accorder la guitare", lang='fr-FR') print("commencer à accorder la guitare") for i in range(5): accord_de_la_corde() tts.say('Accorder la corde suivante', lang='fr-FR') ####### De même, phrase à dire ####### print('Accorder la corde suivante') accord_de_la_corde() tts.say("la guitare est accordé", lang='fr-FR') print("la guitare est accordé")
def player_action(cmd): global vlc_volume if 'up' in cmd or 'louder' in cmd: if vlc_volume >= 100: tts.say('Volume already max!') else: vlc_volume = vlc_volume + 10 elif 'down' in cmd or 'softer' in cmd: if vlc_volume <= 0: tts.say('Volume already off!') else: vlc_volume = vlc_volume - 10 tts.say('Volume is ' + str(vlc_volume)) vlc_player.audio_set_volume(vlc_volume)
def main(): logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) hints = get_hints(args.language) client = CloudSpeechClient() with Board() as board: while True: if hints: logging.info('Say something, e.g. %s.' % ', '.join(hints)) else: logging.info('Say something.') text = client.recognize(language_code=args.language, hint_phrases=hints) if text is None: logging.info('You said nothing.') continue logging.info('You said: "%s"' % text) text = text.lower() if 'enciende la luz' in text: board.led.state = Led.ON tts.say('Light powered on') elif 'apaga la luz' in text: board.led.state = Led.OFF tts.say('Light powered off') elif 'parpadea' in text: board.led.state = Led.BLINK tts.say('Light is blinking') elif 'hasta luego' in text: tts.say('Shutting down now') break
def _work(self): tts.say("Time to work! The LED lights will turn off as time elapses.") self._board.led.state = Led.PULSE_SLOW record_file( AudioFormat.CD, filename=self._file, wait=self._wait, filetype="wav", ) self._done.clear() session_score = pyloudness.get_loudness(self._file)["Loudness Range"]["LRA"] tts.say("Session score: {:.2}".format(session_score)) if session_score > 5: tts.say("Try to be a little more quiet next round") self._score += session_score
def _work(self): tts.say("Time to work!") self._board.led.state = Led.PULSE_SLOW record_file( AudioFormat.CD, filename=self._file, wait=self._wait, filetype="wav", ) tts.say("Session ended") self._done.clear() session_score = pyloudness.get_loudness( self._file)["Loudness Range"]["LRA"] if session_score > 40: tts.say("Try to be a little more quiet next round") self._score += session_score