Exemple #1
0
 def gen_converse_requests():
     converse_state = None
     if conversation_state_bytes:
         logging.debug('Sending converse_state: %s',
                       conversation_state_bytes)
         converse_state = embedded_assistant_pb2.ConverseState(
             conversation_state=conversation_state_bytes,
         )
     config = embedded_assistant_pb2.ConverseConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=int(audio_sample_rate),
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=int(audio_sample_rate),
             volume_percentage=volume_percentage,
         ),
         converse_state=converse_state
     )
     # The first ConverseRequest must contain the ConverseConfig
     # and no audio data.
     yield embedded_assistant_pb2.ConverseRequest(config=config)
     for data in conversation_stream:
         # Subsequent requests need audio data, but not config.
         yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemple #2
0
    def gen_converse_requests(self):
        """Yields: ConverseRequest messages to send to the API."""

        converse_state = None
        if self.conversation_state:
            logging.debug('Sending converse_state: %s',
                          self.conversation_state)
            converse_state = embedded_assistant_pb2.ConverseState(
                conversation_state=self.conversation_state, )
        config = embedded_assistant_pb2.ConverseConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            converse_state=converse_state)
        # The first ConverseRequest must contain the ConverseConfig
        # and no audio data.
        yield embedded_assistant_pb2.ConverseRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemple #3
0
    def gen_assist_requests(self):
        """Yields: AssistRequest messages to send to the API."""

        config = embedded_assistant_pb2.AssistConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                conversation_state=self.conversation_state,
                is_new_conversation=self.is_new_conversation,
            ),
            device_config=embedded_assistant_pb2.DeviceConfig(
                device_id=self.device_id,
                device_model_id=self.device_id,
            ))
        if self.display:
            config.screen_out_config.screen_mode = PLAYING
        # Continue current conversation with later requests.
        self.is_new_conversation = False
        # The first AssistRequest must contain the AssistConfig
        # and no audio data.
        yield embedded_assistant_pb2.AssistRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.AssistRequest(audio_in=data)
Exemple #4
0
 def gen_converse_requests():
   converse_state = None
   if self.conversation_state_bytes:
       converse_state = embedded_assistant_pb2.ConverseState( conversation_state=self.conversation_state_bytes)
   config = embedded_assistant_pb2.ConverseConfig(audio_in_config=embedded_assistant_pb2.AudioInConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate)), audio_out_config=embedded_assistant_pb2.AudioOutConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate), volume_percentage=self.volume_percentage), converse_state=converse_state)
   yield embedded_assistant_pb2.ConverseRequest(config=config)
   for data in self.conversation_stream:
      yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemple #5
0
    def _create_config_request(self):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
        )
        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=100,
        )
        converse_config = embedded_assistant_pb2.ConverseConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
        )

        return embedded_assistant_pb2.ConverseRequest(config=converse_config)
def gen_converse_requests(samples,
                          sample_rate,
                          conversation_state=None,
                          volume_percentage=50):
    """Returns a generator of ConverseRequest proto messages from the
       given audio samples.

    Args:
      samples: generator of audio samples.
      sample_rate: audio data sample rate in hertz.
      conversation_state: opaque bytes describing current conversation state.
    """
    audio_in_config = embedded_assistant_pb2.AudioInConfig(
        encoding='LINEAR16',
        sample_rate_hertz=int(sample_rate),
    )
    audio_out_config = embedded_assistant_pb2.AudioOutConfig(
        encoding='LINEAR16',
        sample_rate_hertz=int(sample_rate),
        volume_percentage=volume_percentage,
    )
    state_config = None
    if conversation_state:
        logging.debug('Sending converse_state: %s', conversation_state)
        state_config = embedded_assistant_pb2.ConverseState(
            conversation_state=conversation_state, )
    converse_config = embedded_assistant_pb2.ConverseConfig(
        audio_in_config=audio_in_config,
        audio_out_config=audio_out_config,
        converse_state=state_config,
    )
    # The first ConverseRequest must contain the ConverseConfig
    # and no audio data
    yield embedded_assistant_pb2.ConverseRequest(config=converse_config)
    for data in samples:
        # Subsequent requests need audio data, but not config.
        yield embedded_assistant_pb2.ConverseRequest(audio_in=data)