예제 #1
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=b''
            )
        if self.conversation_state:
            logging.debug('Sending conversation state.')
            dialog_state_in.conversation_state = self.conversation_state
        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=dialog_state_in,
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            )
        )
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #2
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            )
        )
        if self.display:
            config.screen_out_config.screen_mode = PLAYING
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        if self.display:
            config.screen_out_config.screen_mode = embedded_assistant_pb2.ScreenOutConfig.PLAYING
        self.is_new_conversation = False
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #4
0
    def _create_config_request(self):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
        )
        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=50,
        )
        device_config = embedded_assistant_pb2.DeviceConfig(
            device_id=self.device_id,
            device_model_id=self.model_id,
        )
        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
            conversation_state=self._conversation_state,
            language_code=aiy.i18n.get_language_code(),
        )
        assist_config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
            device_config=device_config,
            dialog_state_in=dialog_state_in,
        )

        return embedded_assistant_pb2.AssistRequest(config=assist_config)
예제 #5
0
파일: grpc.py 프로젝트: Xiaowenbi/Face-it-
    def _requests(self, recorder):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16', sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ)

        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=self._volume_percentage)

        dialog_state_in = embedded_assistant_pb2.DialogStateIn(
            conversation_state=self._conversation_state,
            language_code=self._language_code)

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
            device_config=self._device_config,
            dialog_state_in=dialog_state_in)

        yield embedded_assistant_pb2.AssistRequest(config=config)

        for chunk in recorder.record(AUDIO_FORMAT,
                                     chunk_duration_sec=0.1,
                                     on_start=self._recording_started,
                                     on_stop=self._recording_stopped):
            yield embedded_assistant_pb2.AssistRequest(audio_in=chunk)
예제 #6
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.sample_rate,
                volume_percentage=self.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        while self.listening:
            try:
                data = self.audio_queue.get(timeout=1)
            except queue.Empty:
                print('no data available')
                break
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #7
0
 def gen_assist_requests(input_stream):
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=lang,
         conversation_state=b''
     )
     config = embedded_assistant_pb2.AssistConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=16000,
             volume_percentage=100,
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=device_id,
             device_model_id=device_model_id,
         )
     )
     # Send first AssistRequest message with configuration.
     yield embedded_assistant_pb2.AssistRequest(config=config)
     while True:
         # Read user request from file.
         data = input_stream.read(block_size)
         if not data:
             break
         # Send following AssitRequest message with audio chunks.
         yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #8
0
    def gen_assist_requests(self, chunks):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=16000,
                volume_percentage=1,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                # language_code=self.language_code,
                conversation_state=self.conversation_state,
                is_new_conversation=True,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_model_id,
            ))
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in chunks():
            # Subsequent requests need audio data, but not config.
            if data:
                yield embedded_assistant_pb2.AssistRequest(audio_in=data)
예제 #9
0
 def gen_assist_requests(self):
     dialog_state_in = embedded_assistant_pb2.DialogStateIn(
         language_code=self.language_code, conversation_state=b'')
     if self.conversation_state:
         logging.debug('Sending conversation state.')
         dialog_state_in.conversation_state = self.conversation_state
     config = embedded_assistant_pb2.AssistConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=self.conversation_stream.sample_rate,
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=self.conversation_stream.sample_rate,
             volume_percentage=self.conversation_stream.volume_percentage,
         ),
         dialog_state_in=dialog_state_in,
         device_config=embedded_assistant_pb2.DeviceConfig(
             device_id=self.device_id,
             device_model_id=self.device_model_id,
         ))
     yield embedded_assistant_pb2.AssistRequest(config=config)
     for data in self.conversation_stream:
         yield embedded_assistant_pb2.AssistRequest(audio_in=data)