def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--credentials', type=existing_file, metavar='OAUTH2_CREDENTIALS_FILE', default=os.path.join( os.path.expanduser('/home/pi/.config'), 'google-oauthlib-tool', 'credentials.json' ), help='Path to store and read OAuth2 credentials') args = parser.parse_args() with open(args.credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) with Assistant(credentials) as assistant: subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Startup.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for event in assistant.start(): process_event(event) usrcmd=event.args if 'trigger'.lower() in str(usrcmd).lower(): assistant.stop_conversation() Action(str(usrcmd).lower()) if 'play'.lower() in str(usrcmd).lower(): assistant.stop_conversation() YouTube(str(usrcmd).lower()) if 'stop'.lower() in str(usrcmd).lower(): stop()
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--credentials', type=existing_file, metavar='OAUTH2_CREDENTIALS_FILE', default=os.path.join( os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json' ), help='Path to store and read OAuth2 credentials') parser.add_argument('--device_model_id', type=str, metavar='DEVICE_MODEL_ID', required=True, help='The device model ID registered with Google.') parser.add_argument('--project_id', type=str, metavar='PROJECT_ID', required=False, help='The project ID used to register device ' + 'instances.') args = parser.parse_args() with open(args.credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) with Assistant(credentials, args.device_model_id) as assistant: subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Startup.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) events = assistant.start() print('device_model_id:', args.device_model_id + '\n' + 'device_id:', assistant.device_id + '\n') if args.project_id: register_device(args.project_id, credentials, args.device_model_id, assistant.device_id) for event in events: process_event(event, assistant.device_id) usrcmd=event.args if 'trigger'.lower() in str(usrcmd).lower(): assistant.stop_conversation() Action(str(usrcmd).lower()) if 'stream'.lower() in str(usrcmd).lower(): assistant.stop_conversation() YouTube(str(usrcmd).lower()) if 'stop'.lower() in str(usrcmd).lower(): stop() if 'tune into'.lower() in str(usrcmd).lower(): assistant.stop_conversation() radio(str(usrcmd).lower()) if 'wireless'.lower() in str(usrcmd).lower(): assistant.stop_conversation() ESP(str(usrcmd).lower()) if 'parcel'.lower() in str(usrcmd).lower(): assistant.stop_conversation() track() if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower(): assistant.stop_conversation() feed(str(usrcmd).lower()) if 'on kodi'.lower() in str(usrcmd).lower(): assistant.stop_conversation() kodiactions(str(usrcmd).lower())
def assist(self): # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width) restart = False continue_conversation = True try: while continue_conversation: continue_conversation = False subprocess.Popen( ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: self.logger.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_recording() if resp.result.spoken_request_text: usrcmd = resp.result.spoken_request_text if 'trigger' in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'play'.lower() in str(usrcmd).lower(): YouTube(str(usrcmd).lower()) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() return continue_conversation if 'tune into'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation else: continue self.logger.info('Transcript of user request: "%s".', resp.result.spoken_request_text) GPIO.output(5, GPIO.LOW) GPIO.output(6, GPIO.HIGH) led.ChangeDutyCycle(50) self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write( resp.audio_out.audio_data) if resp.result.spoken_response_text: self.logger.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state_bytes = resp.result.conversation_state if resp.result.volume_percentage != 0: volume_percentage = resp.result.volume_percentage self.logger.info('Volume should be set to %s%%', volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info( 'Expecting follow-on query from user.') self.logger.info('Finished playing assistant response.') GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.')
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] subprocess.Popen( ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() #Uncomment the following after starting the Kodi #status=mutevolstatus() #vollevel=status[1] #with open('/home/pi/.volume.json', 'w') as f: #json.dump(vollevel, f) #kodi.Application.SetVolume({"volume": 0}) GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) logging.info('Recording audio request.') def iter_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected') GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_recording() if resp.speech_results: usrcmd = resp.speech_results if 'trigger' in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'stream'.lower() in str(usrcmd).lower(): YouTube(str(usrcmd).lower()) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() return continue_conversation if 'tune into'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation if 'parcel'.lower() in str(usrcmd).lower(): track() return continue_conversation if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower( ) in str(usrcmd).lower() or 'quote'.lower() in str( usrcmd).lower(): feed(str(usrcmd).lower()) return continue_conversation if 'on kodi'.lower() in str(usrcmd).lower(): kodiactions(str(usrcmd).lower()) return continue_conversation else: continue if resp.speech_results: logging.info( 'Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) GPIO.output(5, GPIO.LOW) GPIO.output(6, GPIO.HIGH) led.ChangeDutyCycle(50) logging.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if len(device_actions_futures): logging.info('Waiting for device executions to complete.') concurrent.futures.wait(device_actions_futures) logging.info('Finished playing assistant response.') GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) #Uncomment the following, after starting Kodi #with open('/home/pi/.volume.json', 'r') as f: #vollevel = json.load(f) #kodi.Application.SetVolume({"volume": vollevel}) self.conversation_stream.stop_playback() return continue_conversation