def gen_assist_requests(input_stream):
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=lang,
         conversation_state=b''
     )
     config = embedded_assistant_pb2.AssistConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
             volume_percentage=100,
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=device_id,
             device_model_id=device_model_id,
         )
     )
     # Send first AssistRequest message with configuration.
     yield embedded_assistant_pb2.AssistRequest(config=config)
     while True:
         # Read user request from file.
         data = input_stream.read(block_size)
         if not data:
             break
         # Send following AssitRequest message with audio chunks.
         yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #2
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=b''
            )
        if self.conversation_state:
            logging.debug('Sending conversation state.')
            dialog_state_in.conversation_state = self.conversation_state
        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=dialog_state_in,
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            )
        )
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #3
0
    def _requests(self, recorder):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ)

        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=self._volume_percentage)

        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
            conversation_state=self._conversation_state,
            language_code=self._language_code)

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
            device_config=self._device_config,
            dialog_state_in=dialog_state_in)

        yield embedded_assistant_pb2.AssistRequest(config=config)

        for chunk in recorder.record(AUDIO_FORMAT,
                                     chunk_duration_sec=0.1,
                                     on_start=self._recording_started,
                                     on_stop=self._recording_stopped):
            yield embedded_assistant_pb2.AssistRequest(audio_in=chunk)
예제 #4
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        if self.display:
            config.screen_out_config.screen_mode = PLAYING
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        if self.display:
            config.screen_out_config.screen_mode = embedded_assistant_pb2.ScreenOutConfig.PLAYING
        self.is_new_conversation = False
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #6
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.sample_rate,
                volume_percentage=self.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        while self.listening:
            try:
                data = self.audio_queue.get(timeout=1)
            except queue.Empty:
                print('no data available')
                break
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #7
0
    def gen_assist_requests(self, chunks):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
                volume_percentage=1,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                # language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=True,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in chunks():
            # Subsequent requests need audio data, but not config.
            if data:
                yield embedded_assistant_pb2.AssistRequest(audio_in=data)
    def converseRequestGenerator(self):
        assistantConfig = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversationStream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversationStream.sample_rate,
                volume_percentage=self.conversationStream.volume_percentage,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.deviceId,
                device_model_id=self.modelId,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code='en-US',
                conversation_state=self.conversationStateBytes,
                is_new_conversation=self.isNewConversation,
            ),
        )

        # The first request to send the the metadata about the voice request
        yield embedded_assistant_pb2.AssistRequest(config=assistantConfig)

        # Send the rest of the audio data
        for audioData in self.conversationStream:
            yield embedded_assistant_pb2.AssistRequest(audio_in=audioData)
예제 #9
0
    def gen_text_assist_requests(self, text_query):
        """Yields: AssistRequest messages to send to the API in text form"""

        config = embedded_assistant_pb2.AssistConfig(
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ),
            text_query=text_query,
        )

        # Continue current conversation with later requests.
        self.is_new_conversation = False

        req = embedded_assistant_pb2.AssistRequest(config=config)
        assistant_helpers.log_assist_request_without_audio(req)
        yield req
예제 #10
0
 def iter_log_assist_requests():
     if (text_query):
         config = embedded_assistant_pb2.AssistConfig(
             audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                 encoding='LINEAR16',
                 sample_rate_hertz=16000,
                 volume_percentage=0,
             ),
             dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                 language_code=self.language_code,
                 conversation_state=self.conversation_state,
                 is_new_conversation=self.is_new_conversation,
             ),
             device_config=embedded_assistant_pb2.DeviceConfig(
                 device_id=self.device_id,
                 device_model_id=self.device_model_id,
             ),
             text_query=text_query,
         )
         self.is_new_conversation = False
         req = embedded_assistant_pb2.AssistRequest(config=config)
         assistant_helpers.log_assist_request_without_audio(req)
         yield req
     else:
         for c in self.gen_assist_requests():
             assistant_helpers.log_assist_request_without_audio(c)
             yield c
     logging.debug('Reached end of AssistRequest iteration.')
예제 #11
0
 def iter_assist_requests():
     config = embedded_assistant_pb2.AssistConfig(
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
             volume_percentage=0,
         ),
         dialog_state_in=embedded_assistant_pb2.DialogStateIn(
             language_code=self.language_code,
             conversation_state=self.conversation_state,
             is_new_conversation=self.is_new_conversation,
         ),
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ),
         text_query=text_query,
     )
     # Continue current conversation with later requests.
     self.is_new_conversation = False
     if self.display:
         config.screen_out_config.screen_mode = PLAYING
     req = embedded_assistant_pb2.AssistRequest(config=config)
     assistant_helpers.log_assist_request_without_audio(req)
     yield req
예제 #12
0
    def _create_config_request(self):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
        )
        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=50,
        )
        device_config = embedded_assistant_pb2.DeviceConfig(
            device_id=self.device_id,
            device_model_id=self.model_id,
        )
        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
            conversation_state=self._conversation_state,
            language_code=aiy.i18n.get_language_code(),
        )
        assist_config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
            device_config=device_config,
            dialog_state_in=dialog_state_in,
        )

        return embedded_assistant_pb2.AssistRequest(config=assist_config)
    def gen_assist_requests2(self, str):
        if self.toLang == '':
            self.toLang = '英語'

        config = embedded_assistant_pb2.AssistConfig(
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ),
            text_query='"' + str + '" を' + self.toLang + 'に翻訳して')
        if self.display:
            config.screen_out_config.screen_mode = PLAYING
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
예제 #14
0
def _iter_assist_requests(handler_input: HandlerInput,
                          text_query: str) -> AssistRequest:
    """Yields: AssistRequest messages to send to the API."""

    model_id = data.GOOGLE_ASSISTANT_API['model_id']
    device_id = skill_helpers.get_device_id(handler_input)

    locale = getattr(handler_input.request_envelope.request, 'locale', 'en-US')

    conversation_state = skill_helpers.get_session_attribute(
        handler_input, 'conversation_state')  # type: list
    is_new_conversation = conversation_state is None
    blob = bytes(conversation_state) if not is_new_conversation else None

    config = embedded_assistant_pb2.AssistConfig(
        audio_out_config=embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=data.DEFAULT_AUDIO_SAMPLE_RATE,
            volume_percentage=100,
        ),
        dialog_state_in=embedded_assistant_pb2.DialogStateIn(
            language_code=locale,
            conversation_state=blob,
            is_new_conversation=is_new_conversation,
        ),
        device_config=embedded_assistant_pb2.DeviceConfig(
            device_id=device_id,
            device_model_id=model_id,
        ),
        text_query=text_query)
    # Continue current conversation with later requests.
    req = embedded_assistant_pb2.AssistRequest(config=config)
    yield req
예제 #15
0
 def gen_final_request(self, text_query, valence, action):
     # The "repeat after me [string] command makes the assistant say "You said [string]"
     # I'm only asking it to play music so I have it always saying the following hard code custom command
     # With some extra time, I could figure out the proper way to make it say a custom response
     if action == 'pause':
         text_query = 'repeat after me to pause the music, so I\'ll do that now'
     elif action == 'play':
         text_query = "repeat after me play music with a " + valence + " tone, so I will play " + valence + " music for you"
     config = embedded_assistant_pb2.AssistConfig(
         text_query=text_query,
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=self.conversation_stream.sample_rate,
             volume_percentage=self.conversation_stream.volume_percentage,
         ),
         dialog_state_in=embedded_assistant_pb2.DialogStateIn(
             language_code=self.language_code,
             conversation_state=self.conversation_state,
             is_new_conversation=True,
         ),
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ),
     )
     yield embedded_assistant_pb2.AssistRequest(config=config)
예제 #16
0
def log_assist_request_without_audio(assist_request):
    """Log AssistRequest fields without audio data."""
    if logging.getLogger().isEnabledFor(logging.DEBUG):
        resp_copy = embedded_assistant_pb2.AssistRequest()
        resp_copy.CopyFrom(assist_request)
        if len(resp_copy.audio_in) > 0:
            size = len(resp_copy.audio_in)
            resp_copy.ClearField('audio_in')
            logging.debug('AssistRequest: audio_in (%d bytes)', size)
            return
        logging.debug('AssistRequest: %s', resp_copy)
예제 #17
0
 def gen_assist_requests(self):
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=self.language_code, conversation_state=b'')
     if self.conversation_state:
         logging.debug('Sending conversation state.')
         dialog_state_in.conversation_state = self.conversation_state
     config = embedded_assistant_pb2.AssistConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=self.conversation_stream.sample_rate,
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=self.conversation_stream.sample_rate,
             volume_percentage=self.conversation_stream.volume_percentage,
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ))
     yield embedded_assistant_pb2.AssistRequest(config=config)
     for data in self.conversation_stream:
         yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #18
0
파일: gassist.py 프로젝트: fredi-68/Ram
        def iter_requests():
            dialog_state = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language,
                conversation_state=self.conversation_state or b"")

            audio_config = embedded_assistant_pb2.AudioOutConfig(
                encoding="LINEAR16",
                sample_rate_hertz=16000,
                volume_percentage=0)
            device_config = embedded_assistant_pb2.DeviceConfig(
                device_id=self.deviceID, device_model_id=self.modelID)

            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=audio_config,
                dialog_state_in=dialog_state,
                device_config=device_config,
                text_query=query)
            req = embedded_assistant_pb2.AssistRequest(config=config)
            yield req
예제 #19
0
 def iter_assist_requests():
     config = embedded_assistant_pb2.AssistConfig(
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
             volume_percentage=0,
         ),
         dialog_state_in=embedded_assistant_pb2.DialogStateIn(
             language_code='en-US',
             conversation_state=None,
             is_new_conversation=True,
         ),
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id='5a1b2c3d4',
             device_model_id='assistant',
         ),
         text_query=text_query)
     req = embedded_assistant_pb2.AssistRequest(config=config)
     yield req
예제 #20
0
 def iter_assist_requests():
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=self.language_code, conversation_state=b'')
     if self.conversation_state:
         dialog_state_in.conversation_state = self.conversation_state
     config = embedded_assistant_pb2.AssistConfig(
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
             volume_percentage=0,
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ),
         text_query=text_query,
     )
     req = embedded_assistant_pb2.AssistRequest(config=config)
     yield req
def iter_assist_requests(handler_input: HandlerInput,
                         text_query: str) -> AssistRequest:
    """Yields: AssistRequest messages to send to the API."""

    model_id = data.GOOGLE_ASSISTANT_API['model_id']
    device_id = util.get_device_id(handler_input)

    # TODO: hardcoded locale?
    language_code = 'it-IT'

    # TODO: hardcoded default volume?
    volume = util.get_persistent_attribute(handler_input, 'volume', default=50)

    conversation_state = util.get_session_attribute(
        handler_input, 'conversation_state')  # type: list
    conversation_state = bytes(
        conversation_state) if conversation_state is not None else None
    is_new_conversation = conversation_state is None

    config = embedded_assistant_pb2.AssistConfig(
        audio_out_config=embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=data.DEFAULT_AUDIO_SAMPLE_RATE,
            volume_percentage=volume,
        ),
        dialog_state_in=embedded_assistant_pb2.DialogStateIn(
            language_code=language_code,
            conversation_state=conversation_state,
            is_new_conversation=is_new_conversation,
        ),
        device_config=embedded_assistant_pb2.DeviceConfig(
            device_id=device_id,
            device_model_id=model_id,
        ),
        text_query=text_query)
    # Continue current conversation with later requests.
    req = embedded_assistant_pb2.AssistRequest(config=config)
    yield req
예제 #22
0
 def iter_assist_requests():
     config = embedded_assistant_pb2.AssistConfig(
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='MP3',
             sample_rate_hertz=16000,
             volume_percentage=self.volume,
         ),
         dialog_state_in=embedded_assistant_pb2.DialogStateIn(
             # https://github.com/googlesamples/assistant-sdk-python/issues/284
             # language_code=self.language_code,
             conversation_state=self.conversation_state,
             is_new_conversation=self.is_new_conversation,
         ),
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ),
         text_query=text_query,
     )
     # Continue current conversation with later requests.
     self.is_new_conversation = False
     req = embedded_assistant_pb2.AssistRequest(config=config)
     yield req
예제 #23
0
 def iter_assist_requests():
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=self.language_code, conversation_state=b'')
     if self.conversation_state:
         dialog_state_in.conversation_state = self.conversation_state
     gConfig = embedded_assistant_pb2.AssistConfig(
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding=config.writtenAssist['audio_out_config']
             ['encoding'],
             sample_rate_hertz=config.writtenAssist['audio_out_config']
             ['sample_rate_hertz'],
             volume_percentage=config.writtenAssist['audio_out_config']
             ['volume_percentage'],
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ),
         text_query=text_query,
     )
     req = embedded_assistant_pb2.AssistRequest(config=gConfig)
     assistant_helpers.log_assist_request_without_audio(req)
     yield req
예제 #24
0
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=100,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=r.language,
                    conversation_state=self.conversation_state,
                    is_new_conversation=True,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.cfg.device_id,
                    device_model_id=self.cfg.device_model_id,
                ),
                text_query=r.request)

            screen_mode = r.screen_mode if r.screen_mode else self.cfg.screen_mode
            config.screen_out_config.screen_mode = getattr(
                embedded_assistant_pb2.ScreenOutConfig, screen_mode)
            self.is_new_conversation = True
            req = embedded_assistant_pb2.AssistRequest(config=config)
            yield req
예제 #25
0
 def _create_audio_request(self, data):
     return embedded_assistant_pb2.AssistRequest(audio_in=data)