def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" global query config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) config1 = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=query, ) if self.display: config.screen_out_config.screen_mode = PLAYING # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. print query if query == "audio": print "audio" yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: ## Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data) else: yield embedded_assistant_pb2.AssistRequest(config=config1)
def _create_config_request(self): audio_in_config = embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ, ) audio_out_config = embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ, volume_percentage=50, ) device_config = embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.model_id, ) dialog_state_in = embedded_assistant_pb2.DialogStateIn( conversation_state=self._conversation_state, language_code=aiy.i18n.get_language_code(), ) assist_config = embedded_assistant_pb2.AssistConfig( audio_in_config=audio_in_config, audio_out_config=audio_out_config, device_config=device_config, dialog_state_in=dialog_state_in, ) return embedded_assistant_pb2.AssistRequest(config=assist_config)
def iter_log_assist_requests(): if (text_query): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) self.is_new_conversation = False req = embedded_assistant_pb2.AssistRequest(config=config) assistant_helpers.log_assist_request_without_audio(req) yield req else: for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.')
def gen_assist_requests2(self, str): if self.toLang == '': self.toLang = '英語' config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query='"' + str + '" を' + self.toLang + 'に翻訳して') if self.display: config.screen_out_config.screen_mode = PLAYING # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) if self.display: config.screen_out_config.screen_mode = embedded_assistant_pb2.ScreenOutConfig.PLAYING self.is_new_conversation = False yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.sample_rate, volume_percentage=self.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) while self.listening: try: data = self.audio_queue.get(timeout=1) except queue.Empty: print('no data available') break # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_assist_requests(self, chunks): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=16000, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=1, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( # language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in chunks(): # Subsequent requests need audio data, but not config. if data: yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_final_request(self, text_query, valence, action): # The "repeat after me [string] command makes the assistant say "You said [string]" # I'm only asking it to play music so I have it always saying the following hard code custom command # With some extra time, I could figure out the proper way to make it say a custom response if action == 'pause': text_query = 'repeat after me to pause the music, so I\'ll do that now' elif action == 'play': text_query = "repeat after me play music with a " + valence + " tone, so I will play " + valence + " music for you" config = embedded_assistant_pb2.AssistConfig( text_query=text_query, audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), ) yield embedded_assistant_pb2.AssistRequest(config=config)
def converseRequestGenerator(self): assistantConfig = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversationStream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversationStream.sample_rate, volume_percentage=self.conversationStream.volume_percentage, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.deviceId, device_model_id=self.modelId, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code='en-US', conversation_state=self.conversationStateBytes, is_new_conversation=self.isNewConversation, ), ) # The first request to send the the metadata about the voice request yield embedded_assistant_pb2.AssistRequest(config=assistantConfig) # Send the rest of the audio data for audioData in self.conversationStream: yield embedded_assistant_pb2.AssistRequest(audio_in=audioData)
def gen_assist_requests(input_stream): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=lang, conversation_state=b'' ) config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=16000, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=100, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=device_id, device_model_id=device_model_id, ) ) # Send first AssistRequest message with configuration. yield embedded_assistant_pb2.AssistRequest(config=config) while True: # Read user request from file. data = input_stream.read(block_size) if not data: break # Send following AssitRequest message with audio chunks. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False if self.display: config.screen_out_config.screen_mode = PLAYING req = embedded_assistant_pb2.AssistRequest(config=config) assistant_helpers.log_assist_request_without_audio(req) yield req
def __init__(self, language_code='en-US', volume_percentage=100): self._volume_percentage = volume_percentage # Mutable state. self._conversation_state = None # Mutable state. self._language_code = language_code ## credentials = auth_helpers.get_assistant_credentials() device_model_id, device_id = device_helpers.get_ids_for_service(credentials) logger.info('device_model_id: %s', device_model_id) logger.info('device_id: %s', device_id) http_request = google.auth.transport.requests.Request() try: credentials.refresh(http_request) except Exception as e: raise RuntimeError('Error loading credentials: %s', e) api_endpoint = ASSISTANT_API_ENDPOINT grpc_channel = google.auth.transport.grpc.secure_authorized_channel( credentials, http_request, api_endpoint) logger.info('Connecting to %s', api_endpoint) ## self._assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel) self._device_config = embedded_assistant_pb2.DeviceConfig( device_model_id=device_model_id, device_id=device_id)
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'' ) if self.conversation_state: logging.debug('Sending conversation state.') dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ) ) # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def _iter_assist_requests(handler_input: HandlerInput, text_query: str) -> AssistRequest: """Yields: AssistRequest messages to send to the API.""" model_id = data.GOOGLE_ASSISTANT_API['model_id'] device_id = skill_helpers.get_device_id(handler_input) locale = getattr(handler_input.request_envelope.request, 'locale', 'en-US') conversation_state = skill_helpers.get_session_attribute( handler_input, 'conversation_state') # type: list is_new_conversation = conversation_state is None blob = bytes(conversation_state) if not is_new_conversation else None config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=data.DEFAULT_AUDIO_SAMPLE_RATE, volume_percentage=100, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=locale, conversation_state=blob, is_new_conversation=is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=device_id, device_model_id=model_id, ), text_query=text_query) # Continue current conversation with later requests. req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def gen_assist_requests(self): """Yields: AssistRequest messages to send to the API.""" config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ) ) # Continue current conversation with later requests. self.is_new_conversation = False # The first AssistRequest must contain the AssistConfig # and no audio data. yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: # Subsequent requests need audio data, but not config. yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def gen_text_assist_requests(self, text_query): """Yields: AssistRequest messages to send to the API in text form""" config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False req = embedded_assistant_pb2.AssistRequest(config=config) assistant_helpers.log_assist_request_without_audio(req) yield req
def iter_requests(): dialog_state = embedded_assistant_pb2.DialogStateIn( language_code=self.language, conversation_state=self.conversation_state or b"") audio_config = embedded_assistant_pb2.AudioOutConfig( encoding="LINEAR16", sample_rate_hertz=16000, volume_percentage=0) device_config = embedded_assistant_pb2.DeviceConfig( device_id=self.deviceID, device_model_id=self.modelID) config = embedded_assistant_pb2.AssistConfig( audio_out_config=audio_config, dialog_state_in=dialog_state, device_config=device_config, text_query=query) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code='en-US', conversation_state=None, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id='5a1b2c3d4', device_model_id='assistant', ), text_query=text_query) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=0, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(handler_input: HandlerInput, text_query: str) -> AssistRequest: """Yields: AssistRequest messages to send to the API.""" model_id = data.GOOGLE_ASSISTANT_API['model_id'] device_id = util.get_device_id(handler_input) # TODO: hardcoded locale? language_code = 'it-IT' # TODO: hardcoded default volume? volume = util.get_persistent_attribute(handler_input, 'volume', default=50) conversation_state = util.get_session_attribute( handler_input, 'conversation_state') # type: list conversation_state = bytes( conversation_state) if conversation_state is not None else None is_new_conversation = conversation_state is None config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=data.DEFAULT_AUDIO_SAMPLE_RATE, volume_percentage=volume, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=language_code, conversation_state=conversation_state, is_new_conversation=is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=device_id, device_model_id=model_id, ), text_query=text_query) # Continue current conversation with later requests. req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='MP3', sample_rate_hertz=16000, volume_percentage=self.volume, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( # https://github.com/googlesamples/assistant-sdk-python/issues/284 # language_code=self.language_code, conversation_state=self.conversation_state, is_new_conversation=self.is_new_conversation, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) # Continue current conversation with later requests. self.is_new_conversation = False req = embedded_assistant_pb2.AssistRequest(config=config) yield req
def iter_assist_requests(): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: dialog_state_in.conversation_state = self.conversation_state gConfig = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding=config.writtenAssist['audio_out_config'] ['encoding'], sample_rate_hertz=config.writtenAssist['audio_out_config'] ['sample_rate_hertz'], volume_percentage=config.writtenAssist['audio_out_config'] ['volume_percentage'], ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, ), text_query=text_query, ) req = embedded_assistant_pb2.AssistRequest(config=gConfig) assistant_helpers.log_assist_request_without_audio(req) yield req
def gen_assist_requests(self): dialog_state_in = embedded_assistant_pb2.DialogStateIn( language_code=self.language_code, conversation_state=b'') if self.conversation_state: logging.debug('Sending conversation state.') dialog_state_in.conversation_state = self.conversation_state config = embedded_assistant_pb2.AssistConfig( audio_in_config=embedded_assistant_pb2.AudioInConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, ), audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=self.conversation_stream.sample_rate, volume_percentage=self.conversation_stream.volume_percentage, ), dialog_state_in=dialog_state_in, device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.device_id, device_model_id=self.device_model_id, )) yield embedded_assistant_pb2.AssistRequest(config=config) for data in self.conversation_stream: yield embedded_assistant_pb2.AssistRequest(audio_in=data)
def iter_assist_requests(): config = embedded_assistant_pb2.AssistConfig( audio_out_config=embedded_assistant_pb2.AudioOutConfig( encoding='LINEAR16', sample_rate_hertz=16000, volume_percentage=100, ), dialog_state_in=embedded_assistant_pb2.DialogStateIn( language_code=r.language, conversation_state=self.conversation_state, is_new_conversation=True, ), device_config=embedded_assistant_pb2.DeviceConfig( device_id=self.cfg.device_id, device_model_id=self.cfg.device_model_id, ), text_query=r.request) screen_mode = r.screen_mode if r.screen_mode else self.cfg.screen_mode config.screen_out_config.screen_mode = getattr( embedded_assistant_pb2.ScreenOutConfig, screen_mode) self.is_new_conversation = True req = embedded_assistant_pb2.AssistRequest(config=config) yield req