コード例 #1
0
    def init_sound(self):
        # Configure audio source and sink.
        audio_device = None
        if input_audio_file:
            self._audio_source = audio_helpers.WaveSource(
                open(input_audio_file, 'rb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            self._audio_source = audio_device = (
                audio_device or audio_helpers.SoundDeviceStream(
                    sample_rate=audio_sample_rate,
                    sample_width=audio_sample_width,
                    block_size=audio_block_size,
                    flush_size=audio_flush_size))
        if output_audio_file:
            self._audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            self._audio_sink = audio_device = (
                audio_device or audio_helpers.SoundDeviceStream(
                    sample_rate=audio_sample_rate,
                    sample_width=audio_sample_width,
                    block_size=audio_block_size,
                    flush_size=audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self._conversation_stream = audio_helpers.ConversationStream(
            source=self._audio_source,
            sink=self._audio_sink,
            iter_size=audio_iter_size,
        )
コード例 #2
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        mic = Microphone()
        continue_conversation = False
        while True:
            if continue_conversation or mic.wakeup('respeaker'):
                continue_conversation = assistant.converse()
コード例 #3
0
def main(*args, **kwargs):

    # init ROS
    rospy.init_node('assistant_robin_server')

    # get parameters
    api_endpoint = rospy.get_param(
        '~api_endpoint', 'embeddedassistant.googleapis.com'
    )  # Address of Google Assistant API service.
    credentials = rospy.get_param(
        '~credentials',
        os.path.join(click.get_app_dir(common_settings.ASSISTANT_APP_NAME),
                     common_settings.ASSISTANT_CREDENTIALS_FILENAME)
    )  # Path to read OAuth2 credentials.
    verbose = rospy.get_param('~verbose', False)  # Verbose logging.
    audio_sample_rate = rospy.get_param(
        '~audio_sample_rate', common_settings.DEFAULT_AUDIO_SAMPLE_RATE
    )  # Audio sample rate in hertz.
    audio_sample_width = rospy.get_param(
        '~audio_sample_width', common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
    )  # Audio sample width in bytes.
    audio_iter_size = rospy.get_param(
        '~audio_iter_size', common_settings.DEFAULT_AUDIO_ITER_SIZE
    )  # Size of each read during audio stream iteration in bytes.
    audio_block_size = rospy.get_param(
        '~audio_block_size', common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
    )  # Block size in bytes for each audio device read and write operation.
    audio_flush_size = rospy.get_param(
        '~audio_flush_size', common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE
    )  # Size of silence data in bytes written during flush operation.
    grpc_deadline = rospy.get_param(
        '~grpc_deadline',
        common_settings.DEFAULT_GRPC_DEADLINE)  # gRPC deadline in seconds.

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        rospy.logerr('Error loading credentials: %s', e)
        rospy.logerr('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    rospy.logdebug('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))
    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        global ass
        ass = assistant
        s = rospy.Service('activate', Activate, handle_activate)
        rospy.loginfo("Assistant ready.")

        rospy.spin()
コード例 #4
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         grpc_deadline) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.converse()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.converse()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
コード例 #5
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load credentials.
    try:
        creds = auth_helpers.load_credentials(
            credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
        return

    # Create gRPC channel
    grpc_channel = auth_helpers.create_grpc_channel(
        api_endpoint,
        creds,
        ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
        grpc_channel_options=kwargs.get('grpc_channel_option'))
    logging.info('Connecting to %s', api_endpoint)
    # Create Google Assistant API gRPC client.
    assistant = embedded_assistant_pb2.EmbeddedAssistantStub(grpc_channel)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
    )

    # Interactive by default.
    wait_for_user_trigger = True
    # If file arguments are supplied, don't wait for user trigger.
    if input_audio_file or output_audio_file:
        wait_for_user_trigger = False

    # Stores an opaque blob provided in ConverseResponse that,
    # when provided in a follow-up ConverseRequest,
    # gives the Assistant a context marker within the current state
    # of the multi-Converse()-RPC "conversation".
    # This value, along with MicrophoneMode, supports a more natural
    # "conversation" with the Assistant.
    conversation_state_bytes = None

    # Stores the current volument percentage.
    # Note: No volume change is currently implemented in this sample
    volume_percentage = 50

    while True:

        conversation_stream.start_recording()
        logging.info('Recording audio request.')

        # This generator yields ConverseRequest to send to the gRPC
        # Google Assistant API.
        def gen_converse_requests():
            converse_state = None
            if conversation_state_bytes:
                logging.debug('Sending converse_state: %s',
                              conversation_state_bytes)
                converse_state = embedded_assistant_pb2.ConverseState(
                    conversation_state=conversation_state_bytes, )
            config = embedded_assistant_pb2.ConverseConfig(
                audio_in_config=embedded_assistant_pb2.AudioInConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                ),
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=int(audio_sample_rate),
                    volume_percentage=volume_percentage,
                ),
                converse_state=converse_state)
            # The first ConverseRequest must contain the ConverseConfig
            # and no audio data.
            yield embedded_assistant_pb2.ConverseRequest(config=config)
            for data in conversation_stream:
                # Subsequent requests need audio data, but not config.
                yield embedded_assistant_pb2.ConverseRequest(audio_in=data)

        def iter_converse_requests():
            for c in gen_converse_requests():
                assistant_helpers.log_converse_request_without_audio(c)
                yield c
            conversation_stream.start_playback()

        # This generator yields ConverseResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in assistant.Converse(iter_converse_requests(),
                                       grpc_deadline):
            assistant_helpers.log_converse_response_without_audio(resp)
            if resp.error.code != code_pb2.OK:
                logging.error('server error: %s', resp.error.message)
                break
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                conversation_stream.stop_recording()
            if resp.result.spoken_request_text:
                logging.info('Transcript of user request: "%s".',
                             resp.result.spoken_request_text)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                conversation_stream.write(resp.audio_out.audio_data)
            if resp.result.spoken_response_text:
                logging.info(
                    'Transcript of TTS response '
                    '(only populated from IFTTT): "%s".',
                    resp.result.spoken_response_text)
            if resp.result.conversation_state:
                conversation_state_bytes = resp.result.conversation_state
            if resp.result.volume_percentage != 0:
                volume_percentage = resp.result.volume_percentage
                logging.info('Volume should be set to %s%%', volume_percentage)
            if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                wait_for_user_trigger = False
                logging.info('Expecting follow-on query from user.')
            elif resp.result.microphone_mode == CLOSE_MICROPHONE:
                wait_for_user_trigger = True
        logging.info('Finished playing assistant response.')
        conversation_stream.stop_playback()
        # If file arguments are supplied, end the conversation.
        if input_audio_file or output_audio_file:
            break
        if wait_for_user_trigger:
            break
    conversation_stream.close()
コード例 #6
0
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')
        # Setup logging.
        logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception as e:
            #logging.error('Error loading credentials: %s', e)
            #logging.error('Run auth_helpers to initialize new OAuth2 credentials.')
            ue.log_error('Error loading credentials: %s', e)
            ue.log_error(
                'Run auth_helpers to initialize new OAuth2 credentials.')
            return

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(
            api_endpoint,
            creds,
            ssl_credentials_file=kwargs.get('ssl_credentials_for_testing'),
            grpc_channel_options=kwargs.get('grpc_channel_option'))
        ue.log('Connecting to %s', api_endpoint)

        # Configure audio source and sink.
        audio_device = None
        if input_audio_file:
            audio_source = audio_helpers.WaveSource(
                open(input_audio_file, 'rb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_source = audio_device = (audio_device
                                           or audio_helpers.SoundDeviceStream(
                                               sample_rate=audio_sample_rate,
                                               sample_width=audio_sample_width,
                                               block_size=audio_block_size,
                                               flush_size=audio_flush_size))
        if output_audio_file:
            audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_sink = audio_device = (audio_device
                                         or audio_helpers.SoundDeviceStream(
                                             sample_rate=audio_sample_rate,
                                             sample_width=audio_sample_width,
                                             block_size=audio_block_size,
                                             flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )
コード例 #7
0
    def begin_play(self):
        """Samples for the Google Assistant API.

        Examples:
          Run the sample with microphone input and speaker output:

            $ python -m googlesamples.assistant

          Run the sample with file input and speaker output:

            $ python -m googlesamples.assistant -i <input file>

          Run the sample with file input and output:

            $ python -m googlesamples.assistant -i <input file> -o <output file>
        """
        ue.log('Initializing Google Samples API.')

        # Initialize defaults
        credentials = os.path.join(
            sys.path[0], common_settings.ASSISTANT_CREDENTIALS_FILENAME)

        # Load credentials.
        try:
            creds = auth_helpers.load_credentials(
                credentials, scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
        except Exception:
            # Maybe we didn't load the credentials yet?
            # This could happen on first run
            creds = auth_helpers.credentials_flow_interactive(
                credentials, common_settings.ASSISTANT_OAUTH_SCOPE)
            auth_helpers.save_credentials(credentials, creds)
            try:
                creds = auth_helpers.load_credentials(
                    credentials,
                    scopes=[common_settings.ASSISTANT_OAUTH_SCOPE])
            except Exception as e:
                ue.log_error('Error loading credentials: ' + str(e))
                ue.log_error(
                    'Run auth_helpers to initialize new OAuth2 credentials.')
                return

        ue.log('Begin play done!')

        # Define endpoint
        # This might where you can inject custom API.AI behaviors?
        api_endpoint = ASSISTANT_API_ENDPOINT

        # Create an authorized gRPC channel.
        grpc_channel = auth_helpers.create_grpc_channel(api_endpoint, creds)
        ue.log('Connecting to ' + str(api_endpoint))

        # Set up audio parameters
        audio_sample_rate = common_settings.DEFAULT_AUDIO_SAMPLE_RATE
        audio_sample_width = common_settings.DEFAULT_AUDIO_SAMPLE_WIDTH
        audio_iter_size = common_settings.DEFAULT_AUDIO_ITER_SIZE
        audio_block_size = common_settings.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE
        audio_flush_size = common_settings.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE

        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))

        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        ue.log('Audio device: ' + str(audio_device))

        self.assistant = SampleAssistant(conversation_stream, grpc_channel,
                                         common_settings.DEFAULT_GRPC_DEADLINE)
        self.assistant.converse()
コード例 #8
0
    def assist(self):

        # Configure audio source and sink.
        self.audio_device = None
        self.audio_source = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        self.audio_sink = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=self.audio_source,
            sink=self.audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width)
        restart = False
        continue_dialog = True
        try:
            while continue_dialog:
                continue_dialog = False
                # snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
                self.conversation_stream.start_recording()
                self.logger.info('Recording audio request.')

                # This generator yields ConverseResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Converse(
                        self._iter_converse_requests(), self.grpc_deadline):
                    assistant_helpers.log_converse_response_without_audio(resp)
                    if resp.error.code != code_pb2.OK:
                        self.logger.error('server error: %s',
                                          resp.error.message)
                        break
                    if resp.event_type == Assistant.END_OF_UTTERANCE:
                        self.logger.info('End of audio request detected')
                        self.conversation_stream.stop_recording()
                    if resp.result.spoken_request_text:
                        self.logger.info('Transcript of user request: "%s".',
                                         resp.result.spoken_request_text)
                        self.logger.info('Playing assistant response.')
                    if len(resp.audio_out.audio_data) > 0:
                        self.conversation_stream.write(
                            resp.audio_out.audio_data)
                    if resp.result.spoken_response_text:
                        self.logger.info(
                            'Transcript of TTS response '
                            '(only populated from IFTTT): "%s".',
                            resp.result.spoken_response_text)
                    if resp.result.conversation_state:
                        self.conversation_state_bytes = resp.result.conversation_state
                    if resp.result.volume_percentage != 0:
                        volume_percentage = resp.result.volume_percentage
                        self.logger.info('Volume should be set to %s%%',
                                         volume_percentage)
                    if resp.result.microphone_mode == self.DIALOG_FOLLOW_ON:
                        continue_dialog = True
                        self.logger.info(
                            'Expecting follow-on query from user.')
                self.logger.info('Finished playing assistant response.')
                self.conversation_stream.stop_playback()
        except Exception as e:
            self._create_assistant()
            self.logger.exception('Skipping because of connection reset')
            restart = True
        try:
            self.conversation_stream.close()
            if restart:
                self.assist()
        except Exception:
            self.logger.error('Failed to close conversation_stream.')