def _init_assistant(self): from google.auth.transport.grpc import secure_authorized_channel self.interactions = [] # Create an authorized gRPC channel. self.grpc_channel = secure_authorized_channel(self.credentials, self.http_request, self.api_endpoint) self.logger.info('Connecting to {}'.format(self.api_endpoint)) # Configure audio source and sink. audio_device = None audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width, ) self._install_device_handlers()
def setUp(self): self.source = DummyStream(b'audio data') self.sink = DummyStream() self.stream = audio_helpers.ConversationStream(source=self.source, sink=self.sink, iter_size=1024, sample_width=2) self.stream.volume_percentage = 100
def __assistantAudioSetup(self): self.audioDevice = audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size) self.audioSource = self.audioDevice self.audioSink = self.audioDevice self.conversationStream = audio_helpers.ConversationStream( source=self.audioSource, sink=self.audioSink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width)
def assist(self): # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width) restart = False continue_conversation = True try: while continue_conversation: continue_conversation = False subprocess.Popen( ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: self.logger.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_recording() if resp.result.spoken_request_text: usrcmd = resp.result.spoken_request_text if 'trigger' in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'play'.lower() in str(usrcmd).lower(): YouTube(str(usrcmd).lower()) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() return continue_conversation if 'tune into'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation else: continue self.logger.info('Transcript of user request: "%s".', resp.result.spoken_request_text) GPIO.output(5, GPIO.LOW) GPIO.output(6, GPIO.HIGH) led.ChangeDutyCycle(50) self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write( resp.audio_out.audio_data) if resp.result.spoken_response_text: self.logger.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state_bytes = resp.result.conversation_state if resp.result.volume_percentage != 0: volume_percentage = resp.result.volume_percentage self.logger.info('Volume should be set to %s%%', volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info( 'Expecting follow-on query from user.') self.logger.info('Finished playing assistant response.') GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.')
def assist(self): # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size ) ) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size ) ) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width ) restart = False continue_dialog = True try: while continue_dialog: continue_dialog = False self.conversation_stream.start_recording() self.logger.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: self.logger.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') self.conversation_stream.stop_recording() if resp.result.spoken_request_text: self.logger.info('Transcript of user request: "%s".', resp.result.spoken_request_text) self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write(resp.audio_out.audio_data) if resp.result.spoken_response_text: self.logger.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state_bytes = resp.result.conversation_state if resp.result.volume_percentage != 0: volume_percentage = resp.result.volume_percentage self.logger.info('Volume should be set to %s%%', volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_dialog = True self.logger.info('Expecting follow-on query from user.') self.logger.info('Finished playing assistant response.') self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.')
def return_ga(): api_endpoint = 'embeddedassistant.googleapis.com' project_id = "assistantdevice-1f1e8" device_model_id = "assistantdevice-1f1e8-ga1-v3slut" device_id = None credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') lang = "en-US" display = False verbose = False input_audio_file = None output_audio_file = None audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE grpc_deadline = 60 * 3 + 5 once = False # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logger.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logger.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logger.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logger.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logger.info('Turning device on') else: logger.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logger.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logger.info('Device is blinking.') time.sleep(delay) logger.info("Start Google Assistant") assistant = SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) return assistant
def assist(self, canvas): device_actions_futures = [] # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width) restart = False continue_dialog = True try: while continue_dialog: continue_dialog = False self.conversation_stream.start_recording() self.logger.info('Recording audio request.') def iter_log_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.') # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_log_assist_requests(), self.grpc_deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected.') logging.info('Stopping recording.') self.conversation_stream.stop_recording() if resp.speech_results: mess = ' '.join(r.transcript for r in resp.speech_results) logging.info('Transcript of user request: "%s".', mess) canvas[1]['text'] = mess if self.once: self.custom_command = google_control.custom_command_handler( mess, canvas) if len(resp.audio_out.audio_data ) > 0 and not self.custom_command: if not self.conversation_stream.playing: self.conversation_stream.stop_recording() self.conversation_stream.start_playback() logging.info('Playing assistant response.') self.conversation_stream.write( resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if self.display and resp.screen_out.data and not self.custom_command: system_browser = browser_helpers.system_browser system_browser.display(resp.screen_out.data) self.scrapper(canvas) self.logger.info('Finished playing assistant response.') self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.') self.once = True
def init_assistant( verbose: bool = False, language_code: str = 'en-US', credentials: str = os.path.join(os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json'), device_id: str = None, device_model_id: str = None, device_config: str = os.path.join(os.path.expanduser('~/.config'), 'googlesamples-assistant', 'device_config.json'), project_id: str = None, display: bool = False, ): # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT) logging.info('Connecting to %s', ASSISTANT_API_ENDPOINT) # Configure audio source and sink. audio_source = audio_device = audio_sink = ( audio_helpers.SoundDeviceStream( sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (ASSISTANT_API_ENDPOINT, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) return SampleAssistant(language_code, device_model_id, device_id, conversation_stream, display, grpc_channel, DEFAULT_GRPC_DEADLINE, device_handler)
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ if gender=='Male': subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Male.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) else: subprocess.Popen(["aplay", "{}/sample-audio-files/Startup-Female.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "slowly": delay = 2 elif speed == "quickly": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return def detected(): continue_conversation=assistant.assist() if continue_conversation: print('Continuing conversation') assistant.assist() signal.signal(signal.SIGINT, signal_handler) sensitivity = [0.5]*len(models) callbacks = [detected]*len(models) detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) def start_detector(): detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03) # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: if custom_wakeword: start_detector() else: button_state=GPIO.input(pushbuttontrigger) if button_state==True: continue else: pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break detector.terminate()
def main(api_endpoint=ASSISTANT_API_ENDPOINT, credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'), project_id=None, device_model_id=None, device_id=None, device_config=os.path.join( click.get_app_dir('googlesamples-assistant'), 'device_config.json'), lang='en-US', display=False, verbose=False, input_audio_file=None, output_audio_file=None, audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE, audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH, audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE, audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE, audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE, grpc_deadline=DEFAULT_GRPC_DEADLINE, once=False, *args, **kwargs): # Setup logging. #logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) logging.basicConfig(level=logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) audio_stream = None handle = None pa = None library_path = '../../lib/windows/amd64/libpv_porcupine.dll' model_file_path = '../../lib/common/porcupine_params.pv' keyword_file_paths = ['bumblebee_windows.ppn'] num_keywords = len(keyword_file_paths) sensitivities = [0.7] #0.2 handle = porcupine.Porcupine(library_path, model_file_path, keyword_file_paths=keyword_file_paths, sensitivities=sensitivities) pa = pyaudio.PyAudio() audio_stream = pa.open(rate=handle.sample_rate, channels=1, format=pyaudio.paInt16, input=True, frames_per_buffer=handle.frame_length) conversation_stream.volume_percentage = 100 print('Listening for keyword bumblebee...') with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: while True: pcm = audio_stream.read(handle.frame_length) pcm = struct.unpack_from("h" * handle.frame_length, pcm) result = handle.process(pcm) if num_keywords > 0 and result: play_audio_file('ding.wav') print('[%s] detected keyword!!!!' % str(datetime.now())) assistant.assist() play_audio_file('dong.wav') print('Listening for keyword bumblebee...') print('stopping ...') if handle is not None: handle.delete() if audio_stream is not None: audio_stream.close() if pa is not None: pa.terminate()
def main(api_endpoint, credentials, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') return # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) with SampleAssistant(conversation_stream, grpc_channel, grpc_deadline) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.converse() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. continue_conversation = assistant.converse()
def main(language, is_debug, is_answer, device_id=None, device_model_id=None): #################################################################################################### # # 初期定義 # #################################################################################################### grpc_deadline = 60 * 3 + 5 credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json') device_config = os.path.join(click.get_app_dir('googlesamples-assistant'), 'device_config.json') audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE #################################################################################################### # # 取り扱い説明書 # #################################################################################################### """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ #################################################################################################### # # Load OAuth 2.0 CREDENTIALS. # (認証) # #################################################################################################### try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: print('Error loading CREDENTIALS: %s' % e) print('Run google-oauthlib-tool to initialize ', 'new OAuth 2.0 CREDENTIALS.') sys.exit(-1) #################################################################################################### # # Create an authorized gRPC channel. # (grpc接続) # #################################################################################################### grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, ASSISTANT_API_ENDPOINT) # print("connection -> %s" % ASSISTANT_API_ENDPOINT) #################################################################################################### # # Configure audio source and sink. # (オーディオストリーム設定) # #################################################################################################### audio_device = None audio_sink = audio_source = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) #################################################################################################### # # Create conversation stream with the given audio source and sink. # #################################################################################################### conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if device_id is None or device_model_id is None: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] print("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: print(e) sys.exit(1) #################################################################################################### # # コールバック設定 # #################################################################################################### device_handler = device_helpers.DeviceRequestHandler(device_id) print(device_id, device_model_id) #################################################################################################### # # アシスタント起動 # #################################################################################################### ''' text=None if text is not None: return TextAssistant(language, device_model_id, device_id, IS_DISPLAY, grpc_channel, grpc_deadline) ''' return VoiceAssistant(language, device_model_id, device_id, conversation_stream, IS_DISPLAY, grpc_channel, grpc_deadline, device_handler, is_debug, is_answer)
def assist(self): # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width) restart = False continue_conversation = True try: while continue_conversation: continue_conversation = False subprocess.Popen( ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() self.logger.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: self.logger.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') self.conversation_stream.stop_recording() if resp.result.spoken_request_text: usr = resp.result.spoken_request_text if 'trigger' in str(usr): if 'shut down'.lower() in str(usr).lower(): subprocess.Popen([ "aplay", "/home/pi/GassistPi/sample-audio-files/Pi-Close.wav" ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) time.sleep(10) os.system("sudo shutdown -h now") break else: for num, name in enumerate(var): if name.lower() in str(usr).lower(): pinout = gpio[num] if 'on'.lower() in str(usr).lower(): GPIO.output(pinout, 1) subprocess.Popen([ "aplay", "/home/pi/GassistPi/sample-audio-files/Device-On.wav" ], stdin=subprocess. PIPE, stdout=subprocess. PIPE, stderr=subprocess. PIPE) elif 'off'.lower() in str(usr).lower(): GPIO.output(pinout, 0) subprocess.Popen([ "aplay", "/home/pi/GassistPi/sample-audio-files/Device-Off.wav" ], stdin=subprocess. PIPE, stdout=subprocess. PIPE, stderr=subprocess. PIPE) return continue_conversation else: continue self.logger.info('Transcript of user request: "%s".', resp.result.spoken_request_text) self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write( resp.audio_out.audio_data) if resp.result.spoken_response_text: self.logger.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state_bytes = resp.result.conversation_state if resp.result.volume_percentage != 0: volume_percentage = resp.result.volume_percentage self.logger.info('Volume should be set to %s%%', volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True self.logger.info( 'Expecting follow-on query from user.') self.logger.info('Finished playing assistant response.') self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.')
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') logging.warning('U CAN DO SOMETHING IN THERE FOR ON!!!') else: logging.info('Turning device off') logging.warning('U CAN DO SOMETHING IN THERE FOR OFF!!!') @device_handler.command('action.devices.commands.BrightnessAbsolute') def brightnessCheck(brightness): if brightness > 50: logging.info('brightness > 50') else: logging.info('brightness <= 50') @device_handler.command('action.devices.commands.ColorAbsolute') def color(color): if color.get('name') == "blue": logging.info('color is blue') else: logging.info('color is not blue') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "SLOWLY": delay = 2 elif speed == "QUICKLY": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: click.pause(info='Press Enter to send a new request...') continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) if output_audio_file: audio_sink = audio_helpers.WaveSink( open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width ) else: audio_sink = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size ) ) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ( 'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id) ) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id } session = google.auth.transport.requests.AuthorizedSession( credentials ) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) os.makedirs(os.path.dirname(device_config), exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: assistant.assist() return # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: button_state=GPIO.input(22) if button_state==True: continue else: pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(): args = imp.load_source('args', INFO_FILE) if not hasattr(args, 'credentials'): args.credentials = os.path.join(os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json') if not hasattr(args, 'device_config'): args.device_config = os.path.join(os.path.expanduser('~/.config'), 'googlesamples-assistant', 'device_config.json') verbose = False credentials = args.credentials project_id = args.project_id device_config = args.device_config device_id = '' device_model_id = args.device_model_id api_endpoint = 'embeddedassistant.googleapis.com' audio_sample_rate = audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE audio_sample_width = audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH audio_block_size = audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE audio_flush_size = audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE audio_iter_size = audio_helpers.DEFAULT_AUDIO_ITER_SIZE grpc_deadline = 60 * 3 + 5 lang = 'en-US' once = False play_audio_file(resources['startup']) # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler) as assistant: # If file arguments are supplied: # exit after the first turn of the conversation. # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once while True: if wait_for_user_trigger: if GPIO != None: button_state = GPIO.input(22) else: #use keyboard as a trigger button_state = get_key_stroke() if button_state == False: continue else: #button_state=False pass continue_conversation = assistant.assist() # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break
def main(api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): """Samples for the Google Assistant API. Examples: Run the sample with microphone input and speaker output: $ python -m googlesamples.assistant Run the sample with file input and speaker output: $ python -m googlesamples.assistant -i <input file> Run the sample with file input and output: $ python -m googlesamples.assistant -i <input file> -o <output file> """ # Setup logging. logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # D-Bus preparation cv_trigger = Condition(Lock()) eventq = Queue() dbus_obj = DBusSignals() loop = GLib.MainLoop() system_bus = SystemBus() def dbus_handler(sender, object, iface, signal, params): # logging.info(sender) # logging.info(object) # logging.info(signal) # logging.info(params) logging.debug('Received D-Bus signal: {}'.format(signal)) if signal == 'trigger': cv_trigger.acquire() cv_trigger.notify() cv_trigger.release() pub = system_bus.publish("io.respeaker.respeakerd", dbus_obj) sub = system_bus.subscribe(iface='respeakerd.signal', signal_fired=dbus_handler) def exit_dbus(): sub.unsubscribe() pub.unpublish() # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') exit_dbus() sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') exit_dbus() sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) exit_dbus() sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') # If file arguments are supplied: # exit after the first turn of the conversation. if input_audio_file or output_audio_file: with SampleAssistant(lang, device_model_id, device_id, conversation_stream, grpc_channel, grpc_deadline, device_handler, eventq) as assistant: dbus_obj.ready() assistant.assist() exit_dbus() return # else it's a long term run def assistant_thread(conversation_stream_): with SampleAssistant(lang, device_model_id, device_id, conversation_stream_, grpc_channel, grpc_deadline, device_handler, eventq) as assistant: dbus_obj.ready() # If no file arguments supplied: # keep recording voice requests using the microphone # and playing back assistant response using the speaker. # When the once flag is set, don't wait for a trigger. Otherwise, wait. wait_for_user_trigger = not once # capture device should always be opened when not playing conversation_stream_.start_recording() while True: if wait_for_user_trigger: logging.info("speak hotword to wake up") cv_trigger.acquire() cv_trigger.wait() cv_trigger.release() logging.info("wake up!") continue_conversation = False try: continue_conversation = assistant.assist() except PortAudioError as e: logging.warn('PortAudio Error: {}'.format(str(e))) eventq.put('on_idle') # wait for user trigger if there is no follow-up turn in # the conversation. wait_for_user_trigger = not continue_conversation # If we only want one conversation, break. if once and (not continue_conversation): break exit_dbus() logging.debug('Exit from the assistant thread...') loop.quit() def event_process_thread(): while True: event = eventq.get() if event == 'on_listen': dbus_obj.on_listen() elif event == 'on_think': dbus_obj.on_think() elif event == 'on_speak': dbus_obj.on_speak() elif event == 'on_idle': dbus_obj.on_idle() time.sleep(0.5) def on_exit(sig): exit_dbus() loop.quit() logging.info("Quit...") setup_signals( signals=[sys_signal.SIGINT, sys_signal.SIGTERM, sys_signal.SIGHUP], handler=on_exit) # make conversation_stream writable inside thread thrd1 = Thread(target=assistant_thread, args=(conversation_stream, )) thrd2 = Thread(target=event_process_thread) thrd1.daemon = True thrd2.daemon = True thrd1.start() thrd2.start() logging.info("Glib mainloop start running...") loop.run() dbus_obj.on_idle()
def main(keyword_file_paths, library_path, model_file_path, asensitivities, input_audio_device_index, output_path, show_audio_devices_info, api_endpoint, credentials, project_id, device_model_id, device_id, device_config, lang, display, verbose, input_audio_file, output_audio_file, audio_sample_rate, audio_sample_width, audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline, once, *args, **kwargs): # Load OAuth 2.0 credentials. try: with open(credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() #print http_request credentials.refresh(http_request) except Exception as e: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') sys.exit(-1) # Create an authorized gRPC channel. grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logging.info('Connecting to %s', api_endpoint) # Configure audio source and sink. audio_device = None if input_audio_file: audio_source = audio_helpers.WaveSource( open(input_audio_file, 'rb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_source = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) if output_audio_file: audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'), sample_rate=audio_sample_rate, sample_width=audio_sample_width) else: audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=audio_sample_rate, sample_width=audio_sample_width, block_size=audio_block_size, flush_size=audio_flush_size)) # Create conversation stream with the given audio source and sink. conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=audio_iter_size, sample_width=audio_sample_width, ) if not device_id or not device_model_id: try: with open(device_config) as f: device = json.load(f) device_id = device['id'] device_model_id = device['model_id'] logging.info("Using device model %s and device id %s", device_model_id, device_id) except Exception as e: logging.warning('Device config not found: %s' % e) logging.info('Registering device') if not device_model_id: logging.error('Option --device-model-id required ' 'when registering a device instance.') sys.exit(-1) if not project_id: logging.error('Option --project-id required ' 'when registering a device instance.') sys.exit(-1) device_base_url = ('https://%s/v1alpha2/projects/%s/devices' % (api_endpoint, project_id)) device_id = str(uuid.uuid1()) payload = { 'id': device_id, 'model_id': device_model_id, 'client_type': 'SDK_SERVICE' } session = google.auth.transport.requests.AuthorizedSession( credentials) r = session.post(device_base_url, data=json.dumps(payload)) if r.status_code != 200: logging.error('Failed to register device: %s', r.text) sys.exit(-1) logging.info('Device registered: %s', device_id) pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True) with open(device_config, 'w') as f: json.dump(payload, f) device_handler = device_helpers.DeviceRequestHandler(device_id) @device_handler.command('action.devices.commands.OnOff') def onoff(on): if on: logging.info('Turning device on') else: logging.info('Turning device off') @device_handler.command('com.example.commands.BlinkLight') def blink(speed, number): logging.info('Blinking device %s times.' % number) delay = 1 if speed == "slowly": delay = 2 elif speed == "quickly": delay = 0.5 for i in range(int(number)): logging.info('Device is blinking.') time.sleep(delay) if show_audio_devices_info: PorcupineDemo.show_audio_devices_info() else: if not keyword_file_paths: raise ValueError('keyword file paths are missing') keyword_file_paths = [x.strip() for x in keyword_file_paths.split(',')] if isinstance(asensitivities, float): sensitivities = [asensitivities] * len(keyword_file_paths) else: sensitivities = [float(x) for x in asensitivities.split(',')] PorcupineDemo(lang, device_model_id, device_id, conversation_stream, display, grpc_channel, grpc_deadline, device_handler, library_path, model_file_path, keyword_file_paths, sensitivities, input_device_index=input_audio_device_index, output_path=output_path).run() print input_audio_device_index
def __init__(self, credentials_file=os.path.join(os.path.expanduser('~'), '.config', 'google-oauthlib-tool', 'credentials.json'), device_config=os.path.join(os.path.expanduser('~'), '.config', 'googlesamples-assistant', 'device_config.json'), lang='en-US', conversation_start_fifo=os.path.join(os.path.sep, 'tmp', 'pushtotalk.fifo'), *args, **kwargs): """ Params: credentials_file -- Path to the Google OAuth credentials file (default: ~/.config/google-oauthlib-tool/credentials.json) device_config -- Path to device_config.json. Register your device and create a project, then run the pushtotalk.py script from googlesamples to create your device_config.json lang -- Assistant language (default: en-US) """ super().__init__(*args, **kwargs) self.lang = lang self.credentials_file = credentials_file self.device_config = device_config self.conversation_start_fifo = conversation_start_fifo try: os.mkfifo(self.conversation_start_fifo) except FileExistsError: pass with open(self.device_config) as f: device = json.load(f) self.device_id = device['id'] self.device_model_id = device['model_id'] # Load OAuth 2.0 credentials. try: with open(self.credentials_file, 'r') as f: credentials = google.oauth2.credentials.Credentials( token=None, **json.load(f)) http_request = google.auth.transport.requests.Request() credentials.refresh(http_request) except: logging.error('Error loading credentials: %s', e) logging.error('Run google-oauthlib-tool to initialize ' 'new OAuth 2.0 credentials.') raise # Create an authorized gRPC channel. self.grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, self.api_endpoint) logging.info('Connecting to %s', self.api_endpoint) # Configure audio source and sink. audio_device = None audio_source = audio_device = ( audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) audio_sink = audio_device = (audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=audio_source, sink=audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width, ) self.device_handler = device_helpers.DeviceRequestHandler( self.device_id)