Exemplo n.º 1
0
 def gen_converse_requests():
     converse_state = None
     if conversation_state_bytes:
         logging.debug('Sending converse_state: %s',
                       conversation_state_bytes)
         converse_state = embedded_assistant_pb2.ConverseState(
             conversation_state=conversation_state_bytes,
         )
     config = embedded_assistant_pb2.ConverseConfig(
         audio_in_config=embedded_assistant_pb2.AudioInConfig(
             encoding='LINEAR16',
             sample_rate_hertz=int(audio_sample_rate),
         ),
         audio_out_config=embedded_assistant_pb2.AudioOutConfig(
             encoding='LINEAR16',
             sample_rate_hertz=int(audio_sample_rate),
             volume_percentage=volume_percentage,
         ),
         converse_state=converse_state
     )
     # The first ConverseRequest must contain the ConverseConfig
     # and no audio data.
     yield embedded_assistant_pb2.ConverseRequest(config=config)
     for data in conversation_stream:
         # Subsequent requests need audio data, but not config.
         yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemplo n.º 2
0
    def gen_converse_requests(self):
        """Yields: ConverseRequest messages to send to the API."""

        converse_state = None
        if self.conversation_state:
            logging.debug('Sending converse_state: %s',
                          self.conversation_state)
            converse_state = embedded_assistant_pb2.ConverseState(
                conversation_state=self.conversation_state, )
        config = embedded_assistant_pb2.ConverseConfig(
            audio_in_config=embedded_assistant_pb2.AudioInConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
            ),
            audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                encoding='LINEAR16',
                sample_rate_hertz=self.conversation_stream.sample_rate,
                volume_percentage=self.conversation_stream.volume_percentage,
            ),
            converse_state=converse_state)
        # The first ConverseRequest must contain the ConverseConfig
        # and no audio data.
        yield embedded_assistant_pb2.ConverseRequest(config=config)
        for data in self.conversation_stream:
            # Subsequent requests need audio data, but not config.
            yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemplo n.º 3
0
 def gen_converse_requests():
   converse_state = None
   if self.conversation_state_bytes:
       converse_state = embedded_assistant_pb2.ConverseState( conversation_state=self.conversation_state_bytes)
   config = embedded_assistant_pb2.ConverseConfig(audio_in_config=embedded_assistant_pb2.AudioInConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate)), audio_out_config=embedded_assistant_pb2.AudioOutConfig(encoding='LINEAR16', sample_rate_hertz=int(self.audio_sample_rate), volume_percentage=self.volume_percentage), converse_state=converse_state)
   yield embedded_assistant_pb2.ConverseRequest(config=config)
   for data in self.conversation_stream:
      yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemplo n.º 4
0
def log_converse_request_without_audio(converse_request):
    """Log ConverseRequest fields without audio data."""
    if logging.getLogger().isEnabledFor(logging.DEBUG):
        resp_copy = embedded_assistant_pb2.ConverseRequest()
        resp_copy.CopyFrom(converse_request)
        if len(resp_copy.audio_in) > 0:
            size = len(resp_copy.audio_in)
            resp_copy.ClearField('audio_in')
            logging.debug('ConverseRequest: audio_in (%d bytes)', size)
            return
        logging.debug('ConverseRequest: %s', resp_copy)
Exemplo n.º 5
0
def gen_converse_requests(samples,
                          sample_rate,
                          conversation_state=None,
                          volume_percentage=50):
    """Returns a generator of ConverseRequest proto messages from the
       given audio samples.

    Args:
      samples: generator of audio samples.
      sample_rate: audio data sample rate in hertz.
      conversation_state: opaque bytes describing current conversation state.
    """
    audio_in_config = embedded_assistant_pb2.AudioInConfig(
        encoding='LINEAR16',
        sample_rate_hertz=int(sample_rate),
    )
    audio_out_config = embedded_assistant_pb2.AudioOutConfig(
        encoding='LINEAR16',
        sample_rate_hertz=int(sample_rate),
        volume_percentage=volume_percentage,
    )
    state_config = None
    if conversation_state:
        logging.debug('Sending converse_state: %s', conversation_state)
        state_config = embedded_assistant_pb2.ConverseState(
            conversation_state=conversation_state, )
    converse_config = embedded_assistant_pb2.ConverseConfig(
        audio_in_config=audio_in_config,
        audio_out_config=audio_out_config,
        converse_state=state_config,
    )
    # The first ConverseRequest must contain the ConverseConfig
    # and no audio data
    yield embedded_assistant_pb2.ConverseRequest(config=converse_config)
    for data in samples:
        # Subsequent requests need audio data, but not config.
        yield embedded_assistant_pb2.ConverseRequest(audio_in=data)
Exemplo n.º 6
0
    def _create_config_request(self):
        audio_in_config = embedded_assistant_pb2.AudioInConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
        )
        audio_out_config = embedded_assistant_pb2.AudioOutConfig(
            encoding='LINEAR16',
            sample_rate_hertz=AUDIO_SAMPLE_RATE_HZ,
            volume_percentage=100,
        )
        converse_config = embedded_assistant_pb2.ConverseConfig(
            audio_in_config=audio_in_config,
            audio_out_config=audio_out_config,
        )

        return embedded_assistant_pb2.ConverseRequest(config=converse_config)
Exemplo n.º 7
0
 def _create_audio_request(self, data):
     return embedded_assistant_pb2.ConverseRequest(audio_in=data)