def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) #args = parser.parse_args() # parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() with Board() as board: assistant = AssistantServiceClientWithLed(board=board, volume_percentage=args.volume, language_code=args.language) #done=threading.Event() talk=True while True: logging.info('Conversation started!') #assistant.conversation2() if not assistant.conversation2(): if assistant.Listner(): continue
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) client = CloudSpeechClient() with Board() as board: assistant = AssistantServiceClientWithLed( board=board, volume_percentage=args.volume, language_code=args.language) text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Good morning Lauren, did you sleep well??') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Oh no.') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say( 'Maybe us watching a film so late wasn\'t a good idea. It\'s good to have some downtime from screens before bed.' ) text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Why don\'t we read a book tonight instead?') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say( 'Perfect. I can remind you later to call her if you\'d like?') text = client.recognize(language_code=args.language, hint_phrases=hints) while True: hs = False text = client.recognize(language_code=args.language, hint_phrases=hints) if 'are you here' in text: aiy.voice.tts.say('yes I am here') hs = True continue elif 'power off' in text: aiy.voice.tts.say('goodbye for now') break elif hs == False: logging.info('Conversation normal----!') assistant.conversation() continue
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) client = CloudSpeechClient() with Board() as board: assistant = AssistantServiceClientWithLed(board=board,volume_percentage=args.volume, language_code=args.language) text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Good morning Lauren, did you sleep well??') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('I’m glad that you\'re rested. Have you had breakfast yet?') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Your food shop isn\'t being delivered until tomorrow. We could either pop to Tesco or go to that nice deli on North End Road.') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Thought you might fancy their smashed avocado on toast!') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Don\'t forget your loyalty card. Pretty sure you\'re due a free coffee.') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Ha no problem. I\'ll wait in the living room until you\'re ready to go.') text = client.recognize(language_code=args.language, hint_phrases=hints) while True: hs=False text = client.recognize(language_code=args.language, hint_phrases=hints) if 'are you here' in text: aiy.voice.tts.say('yes I am here') hs=True continue elif 'power off' in text: aiy.voice.tts.say('goodbye for now') break elif hs==False: logging.info('Conversation normal----!') assistant.conversation() continue
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() logging.info('Initializing for language %s...', args.language) client = CloudSpeechClient() with Board() as board: assistant = AssistantServiceClientWithLed(board=board,volume_percentage=args.volume, language_code=args.language) text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Good morning Lauren, did you sleep well??') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('I\'m glad that you\'re rested. Have you had breakfast yet?') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Your food shop isn\'t being delivered until tomorrow. We could either pop to Tesco or go to that nice deli on North End Road.') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Good idea! Why not invite Alisa round for some mezze and wine? You guys haven\'t caught up in a while.') text = client.recognize(language_code=args.language, hint_phrases=hints) aiy.voice.tts.say('Sure. Do you want to call her now to see if she\'s free?') text = client.recognize(language_code=args.language, hint_phrases=hints) while True: hs=False text = client.recognize(language_code=args.language, hint_phrases=hints) if 'are you here' in text: aiy.voice.tts.say('yes I am here') hs=True continue elif 'power off' in text: aiy.voice.tts.say('goodbye for now') break elif hs==False: logging.info('Conversation normal----!') assistant.conversation() continue
def __init__(self, Board, volume, language): """To the user after a 10 min interval to avoid frequent triggers""" self.greet_timer_start = 0 self.greet_timer_end = 0 self.greet_timer_threshold = 600 """For timeout from greet to idle""" self.greet_start_time = 0 self.greet_end_time = 0 """For timeout form listening to idle""" self.listen_start_time = 0 self.listen_end_time = 0 """Voice assistant""" self.mixer = alsaaudio.Mixer('Speaker') self.assistant = AssistantServiceClientWithLed( board=Board, volume_percentage=volume, language_code=language)
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() with Board() as board: assistant = AssistantServiceClientWithLed( board=board, volume_percentage=args.volume, language_code=args.language) while True: logging.info('Press button to start conversation...') board.button.wait_for_press() logging.info('Conversation started!') assistant.conversation()
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) args = parser.parse_args() with Board() as board: assistant = AssistantServiceClientWithLed(board=board, volume_percentage=args.volume, language_code=args.language) while True: logging.info('Press button to start conversation...') board.button.wait_for_press() logging.info('Conversation started!') assistant.conversation() text = assistant.DialogStateOut.supplemental_display_text() aiy.audio.say(text + 'test test')
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) #args = parser.parse_args() # parser = argparse.ArgumentParser() parser.add_argument('--filename', '-f', default='recording.wav') args = parser.parse_args() with Board() as board: assistant = AssistantServiceClientWithLed( board=board, volume_percentage=args.volume, language_code=args.language) #done=threading.Event() while True: # logging.info('Press button to start conversation...') # board.button.wait_for_press() # done = threading.Event() # board.button.when_pressed = done.set #def wait(): # start = time.monotonic() # while not done.is_set(): # duration = time.monotonic() - start # print('Recording: %.02f seconds [Press button to stop]' % duration) # time.sleep(0.5) #record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') #with Leds() as leds: # leds.pattern = Pattern.blink(500) # leds.update(Leds.rgb_pattern(Color.GREEN)) # time.sleep(5) logging.info('Conversation started!') assistant.conversation2()
def main(): logging.basicConfig(level=logging.DEBUG) signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) parser = argparse.ArgumentParser(description='Assistant service example.') parser.add_argument('--language', default=locale_language()) parser.add_argument('--volume', type=volume, default=100) parser.add_argument('--model', default='PCH.pmdl') args = parser.parse_args() detector = snowboydecoder.HotwordDetector(args.model, sensitivity=0.5) with Board() as board: assistant = AssistantServiceClientWithLed( board=board, volume_percentage=args.volume, language_code=args.language) while True: logging.info('Speak own hotword and speak') detector.start() logging.info('Conversation started!') assistant.conversation()
class VoxxAssistant: """ VoxxAssistant class acts as the voice assistant to interact with the user and respond ot queries if they are present in the database""" """@params: Board,volume,laguage - args passed from main application""" def __init__(self, Board, volume, language): """To the user after a 10 min interval to avoid frequent triggers""" self.greet_timer_start = 0 self.greet_timer_end = 0 self.greet_timer_threshold = 600 """For timeout from greet to idle""" self.greet_start_time = 0 self.greet_end_time = 0 """For timeout form listening to idle""" self.listen_start_time = 0 self.listen_end_time = 0 """Voice assistant""" self.mixer = alsaaudio.Mixer('Speaker') self.assistant = AssistantServiceClientWithLed( board=Board, volume_percentage=volume, language_code=language) """ Voxx greets the user if they approach """ def greet(self): self.greet_timer_end = time.time() if (self.greet_timer_end - self.greet_timer_start > self.greet_timer_threshold): aplay_mutex.acquire() try: tts.say('Hello there!') finally: self.greet_timer_start = time.time() aplay_mutex.release() """ Looks for a list of wake words to enable conversation """ def detect_wakeword(self): self.greet_start_time = time.time() user_text = "" while (True): self.greet_end_time = time.time() if (self.greet_end_time - self.greet_start_time >= 30): break try: user_text = self.assistant.conversation() except KeyboardInterrupt: lighting_controller.led_controller.switch_off() break except Exception: print("Internet out!!") lighting_controller.led_controller.blink() print('user_text is :', user_text) if (user_text in wake_words): aplay_mutex.acquire() try: logging.info("Locking in acknowledge fn") play_wav("ding.wav") finally: aplay_mutex.release() logging.info("Releasing in acknowledge fn") user_text = "" lighting_controller.led_controller.listening_lighting( ) #Listening lighting start break elif user_text != "": aplay_mutex.acquire() try: logging.info("Try again!") tts.say('Sorry, could not pick that up') self.greet_start_time = time.time() finally: aplay_mutex.release() """ Starts conversation with Voxx after wake word detection. Timeout methodology to be decided!! """ def start_conversation(self): database = open("responses.pkl") with open('responses.pkl', 'rb') as file: database = pickle.load(file) while True: self.listen_start_time = time.time() user_text = "" logging.info('Speak hotword to start conversation') volumes = self.mixer.getvolume() print('Volume = ' + str(volumes[0])) self.detect_wakeword() self.listen_end_time = time.time() if (self.listen_end_time - self.listen_start_time >= 120 or self.greet_end_time - self.greet_start_time >= 30): break logging.info('Ask any question to Voxx! Conversation started!') #user_text = self.assistant.conversation() try: for i in range(0, 2): user_text = self.assistant.conversation() if user_text != "": break except Exception: print("Internet out!!") lighting_controller.led_controller.blink() """Starts lighting effects thread""" if user_text != "": breathing_thread = threading.Thread( target=lighting_controller.led_controller.breathing_effect) thinking_thread = threading.Thread( target=lighting_controller.led_controller.wave_effect) breathing_thread.start() thinking_thread.start() time.sleep(1) if (user_text.lower() in database.keys()): try: aplay_mutex.acquire() tts.say(database[user_text.lower()]) lighting_controller.led_controller.greeting_effect(0) finally: aplay_mutex.release() logging.info("Releasing in answer") elif user_text != "": try: aplay_mutex.acquire() tts.say('Sorry, I do not know the answer to that') lighting_controller.led_controller.greeting_effect(0) finally: aplay_mutex.release() logging.info("Releasing in answer") logging.info(user_text) time.sleep(0.01)