def _create_config_request(self): audio_in_config = embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ, ) audio_out_config = embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ, volume_percentage=50, ) device_config = embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.model_id, ) dialog_state_in = embedded_assistant_pb2.DialogStateIn( conversation_state=self._conversation_state, language_code=aiy.i18n.get_language_code(), ) assist_config = embedded_assistant_pb2.AssistConfig( audio_in_config=audio_in_config, audio_out_config=audio_out_config, device_config=device_config, dialog_state_in=dialog_state_in, ) return embedded_assistant_pb2.AssistRequest(config=assist_config)
def gen_assist_requests2(self, str): if self.toLang == '': self.toLang = '英語' config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query='"' + str + '" を' + self.toLang + 'に翻訳して') if self.display: config.screen_out_config.screen_mode = PLAYING # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) if self.display: config.screen_out_config.screen_mode = PLAYING # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_text_assist_requests(self, text_query): """Yields: AssistRequest messages to send to the API in text form""" config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False req = embedded_assistant_pb2.AssistRequest(config=config) assistant_helpers.log_assist_request_without_audio(req) yield req
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) if self.display: config.screen_out_config.screen_mode = embedded_assistant_pb2.ScreenOutConfig.PLAYING self.is_new_conversation = False yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.sample_rate, volume_percentage=self.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) while self.listening: try: data = self.audio_queue.get(timeout=1) except queue.Empty: print('no data available') break # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(self, chunks): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=16000, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=1, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( # language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in chunks(): # Subsequent requests need audio data, but not config. if data: yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(input_stream): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=lang, conversation_state=b'' ) config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=16000, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=100, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=device_id, device_model_id=device_model_id, ) ) # Send first AssistRequest message with configuration. yield embedded_assistant_pb2.AssistRequest(config=config) while True: # Read user request from file. data = input_stream.read(block_size) if not data: break # Send following AssitRequest message with audio chunks. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'' ) if self.conversation_state: logging.debug('Sending conversation state.') dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ) ) # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=50, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query="オウム返し " + text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False if self.display: config.screen_out_config.screen_mode = PLAYING req = embedded_assistant_pb2.AssistRequest(config=config) assistant_helpers.log_assist_request_without_audio(req) yield req
def _requests(self, recorder): audio_in_config = embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ) audio_out_config = embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ, volume_percentage=self._volume_percentage) dialog_state_in = embedded_assistant_pb2.DialogStateIn( conversation_state=self._conversation_state, language_code=self._language_code) config = embedded_assistant_pb2.AssistConfig( audio_in_config=audio_in_config, audio_out_config=audio_out_config, device_config=self._device_config, dialog_state_in=dialog_state_in) yield embedded_assistant_pb2.AssistRequest(config=config) for chunk in recorder.record(AUDIO_FORMAT, chunk_duration_sec=0.1, on_start=self._recording_started, on_stop=self._recording_stopped): yield embedded_assistant_pb2.AssistRequest(audio_in=chunk)
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code='en-US', conversation_state=None, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id='5a1b2c3d4', device_model_id='assistant', ), text_query=text_query) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_requests(): dialog_state = embedded_assistant_pb2.DialogStateIn( language_code=self.language, conversation_state=self.conversation_state or b"") audio_config = embedded_assistant_pb2.AudioOutConfig( encoding="LINEAR16", sample_rate_hertz=16000, volume_percentage=0) device_config = embedded_assistant_pb2.DeviceConfig( device_id=self.deviceID, device_model_id=self.modelID) config = embedded_assistant_pb2.AssistConfig( audio_out_config=audio_config, dialog_state_in=dialog_state, device_config=device_config, text_query=query) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(handler_input: HandlerInput, text_query: str) -> AssistRequest: """Yields: AssistRequest messages to send to the API.""" model_id = data.GOOGLE_ASSISTANT_API['model_id'] device_id = util.get_device_id(handler_input) # TODO: hardcoded locale? language_code = 'it-IT' # TODO: hardcoded default volume? volume = util.get_persistent_attribute(handler_input, 'volume', default=50) conversation_state = util.get_session_attribute( handler_input, 'conversation_state') # type: list conversation_state = bytes( conversation_state) if conversation_state is not None else None is_new_conversation = conversation_state is None config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=data.DEFAULT_AUDIO_SAMPLE_RATE, volume_percentage=volume, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=language_code, conversation_state=conversation_state, is_new_conversation=is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=device_id, device_model_id=model_id, ), text_query=text_query) # Continue current conversation with later requests. req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='MP3', sample_rate_hertz=16000, volume_percentage=self.volume, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( # https://github.com/googlesamples/assistant-sdk-python/issues/284 # language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=100, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=r.language, conversation_state=self.conversation_state, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.cfg.device_id, device_model_id=self.cfg.device_model_id, ), text_query=r.request) screen_mode = r.screen_mode if r.screen_mode else self.cfg.screen_mode config.screen_out_config.screen_mode = getattr( embedded_assistant_pb2.ScreenOutConfig, screen_mode) self.is_new_conversation = True req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: dialog_state_in.conversation_state = self.conversation_state gConfig = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding=config.writtenAssist['audio_out_config'] ['encoding'], sample_rate_hertz=config.writtenAssist['audio_out_config'] ['sample_rate_hertz'], volume_percentage=config.writtenAssist['audio_out_config'] ['volume_percentage'], ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) req = embedded_assistant_pb2.AssistRequest(config=gConfig) assistant_helpers.log_assist_request_without_audio(req) yield req
def gen_assist_requests(self): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: logging.debug('Sending conversation state.') dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: yield embedded_assistant_pb2.AssistRequest(audio_in=data)