示例#1
0
	async def gassist(self, ctx, *, query):
		'''PFXgassist <query>'''
		await ctx.channel.trigger_typing()
		loop = self.bot.loop
		vc = True
		uploadresp = False
		player = False
		try:
			if not ctx.author.voice.channel:
				vc = False
				uploadresp = True
				#return await ctx.send('You must be in a voice channel to use this!')
		except AttributeError:
			vc = False
			uploadresp = True
			#return await ctx.send('You must be in a voice channel to use this!')
		if vc:
			player = self.bot.wavelink.get_player(ctx.guild.id, cls=MusicPlayer)
			player.gassist = True
			player.current_gassist_query = query
		if player:
			if isinstance(player.current, MusicTrack):
				uploadresp = True
				#return await ctx.send('I\'m currently playing music so I can\'t play the response.')
		try:
			audio_sink = audio_helpers.WaveSink(
				open(f'{ctx.author.id}.mp3', 'wb'),
				sample_rate=16000,
				sample_width=2
			)
			stream = audio_helpers.ConversationStream(
				source=None,
				sink=audio_sink,
				iter_size=3200,
				sample_width=2,
			)
			await loop.run_in_executor(None, func=functools.partial(gassistant.assist, query, stream))
			if os.path.exists(f'{ctx.author.id}.mp3'):
				if uploadresp:
					file = discord.File(f'{ctx.author.id}.mp3', 'gassist.mp3')
					await self.bot.loop.run_in_executor(None, func=functools.partial(self.bot.datadog.increment, 'gassist.uploaded'))
					return await ctx.send(file=file)
				alt_ctx = await copy_context_with(ctx, content=ctx.prefix + f'play {ctx.author.id}.mp3')
				await alt_ctx.command.reinvoke(alt_ctx)
				await self.bot.loop.run_in_executor(None, func=functools.partial(self.bot.datadog.increment, 'gassist.played'))
				await asyncio.sleep(3)
				track = player.current
				if track == None:
					await player.destroy_controller()
					await player.destroy()
					await player.disconnect()
					return
				if track.title == 'Unknown title':
					length = track.length / 1000
					await asyncio.sleep(length)
					await player.destroy_controller()
					await player.destroy()
					await player.disconnect()
		except Exception as e:
			raise e
示例#2
0
def handle_response(response):

    #TODO Turn into a series of case statements
    #Customize this with your own response actions

    if response.query_result.action == "ask_for_hours":
        if response.query_result.fulfillment_text == "What is the professor?":
            send('2.4.1')
        else:
            send('3.3.1')

    if response.query_result.action == "rotate_servo":
        send('1.4.1')

    if response.query_result.action == "ask_for_event":
        send('3.3.1')

    if response.query_result.fulfillment_text == "What was that?":
        send('4.0.1')

        #Setup audio for GoogleAssistant API
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width)
        #Create Assistant object from pushtotalk.py
        KnightAssistant = pushtotalk.SampleAssistant(
            lang, device_model_id, device_id, conversation_stream,
            grpc_channel, grpc_deadline, device_handler)
        KnightAssistant.assist()  #Run the assistant
        send('1.0.3')
        play_audio_file('output.wav')  #Play response
        listen()
    else:
        os.system("./simple_google_tts en \"" +
                  response.query_result.fulfillment_text +
                  "\"")  #TODO Run as function rather than using os.system()
        send('1.0.3')
        listen()
示例#3
0
def query():
    # Load OAuth 2.0 credentials.
    credentials = os.path.join(click.get_app_dir('google-oauthlib-tool'),
                               'credentials.json')
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        print("credentials didnt work")
        return

    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, ASSISTANT_API_ENDPOINT)

    sample_rate_d = 16000
    sample_width_d = 2
    iterSize = 3200

    input = os.path.join(gettempdir(), "in.wav")
    output = os.path.join(gettempdir(), "out.wav")

    audio_source = audio_helpers.WaveSource(open(input, 'rb'),
                                            sample_rate=sample_rate_d,
                                            sample_width=sample_width_d)

    audio_sink = audio_helpers.WaveSink(open(output, 'wb'),
                                        sample_rate=sample_rate_d,
                                        sample_width=sample_width_d)

    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=iterSize,
        sample_width=sample_width_d,
    )

    with SampleAssistant(conversation_stream, grpc_channel,
                         DEFAULT_GRPC_DEADLINE) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        assistant.converse()
        return
示例#4
0
def main(api_endpoint, credentials, device_model_id, device_id, device_config,
         query, lang, display, verbose, grpc_deadline, audio_sample_rate,
         audio_sample_width, audio_iter_size, audio_block_size,
         audio_flush_size, output_audio_file, *args, **kwargs):

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)

    audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                        sample_rate=audio_sample_rate,
                                        sample_width=audio_sample_width)

    conversation_stream = audio_helpers.ConversationStream(
        source=None,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    with TextAssistant(conversation_stream, lang, device_model_id, device_id,
                       display, grpc_channel, grpc_deadline) as assistant:
        logging.error(query)
        response_text = assistant.assist(text_query=query)

        if response_text:
            click.echo('<@assistant> %s' % response_text)
示例#5
0
def configure_conversation_stream(input_audio_file, audio_sample_rate,
                                  audio_sample_width, audio_iter_size,
                                  audio_block_size, audio_flush_size):
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size)
    audio_sink = audio_helpers.WaveSink(open(LOCAL_AUDIO_FILE, 'wb'),
                                        sample_rate=audio_sample_rate,
                                        sample_width=audio_sample_width)
    return audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )
示例#6
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('com.example.commands.SearchCar')
    def search_cars(filter, param):
        print('SEARCH CARS\n' + 'FILTER: ' + filter + ', PARAM: ' + param)

        try:
            response = requests.get(
                'http://localhost:5000/api/cars?{}={}'.format(filter, param))
            data = json.loads(response.text)
            cars = data['cars']
        except:
            print("Problem communicating with server")
            cars = []

        print('%-2s | %-10s | %-10s | %-8s | %s | %s | %s' %
              ("ID", "Make", "Body Type", "Colour", "No. Seats", "Cost/Hour",
               "Location"))
        print(
            '---+------------+------------+----------+-----------+-----------+----------------------'
        )

        for car in cars:
            print('%-2d | %-10s | %-10s | %-8s | %-9d | $%-8d | %s' %
                  (car['id'], car['make'], car['body_type'], car['colour'],
                   car['no_seats'], car['cost_per_hour'], car['location']))

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
         credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'),
                                  'credentials.json'),
         project_id=None,
         device_model_id=None,
         device_id=None,
         device_config=os.path.join(
             click.get_app_dir('googlesamples_assistant'),
             'device_config.json'),
         lang="en_GB",
         display=True,
         verbose=False,
         input_audio_file=None,
         output_audio_file=None,
         audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
         audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
         audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
         audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
         audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
         grpc_deadline=DEFAULT_GRPC_DEADLINE,
         once=False,
         *args,
         **kwargs):

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    #|=============================================|
    #|                                             |
    #| Handle commands for Google Assistant Stuff  |
    #|                                             |
    #|=============================================|
    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.SetVolume')
    def changeVolume(volumeLevel, isPercentage):
        if (isPercentage):
            os.system(
                'pactl set-sink-volume "alsa_output.usb-Generic_USB2.0_Device_20130100ph0-00.analog-stereo" '
                + str(volumeLevel) + '%')

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                input("PRESS ENTER TO SPEAK")
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#8
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        query = ''
        cached = []
        current_detected = memcache.Client(['127.0.0.1:11211'], debug=0)
        assistant.assist(text_query='Talk to my test app')
        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        #wait_for_user_trigger = not once
        while True:

            while True:
                name = current_detected.get('Name')
                if name is not '':
                    query = name
                    break

            click.echo('<you> %s' % query)

            #always set MODE = TRUE for text input for detection input
            #if not assistant.MODE:
            #    assistant.switch_mode()

            #first request made to Dialogflow to notify the user detected
            continue_conversation = assistant.assist(text_query=query)

            #decision fork, whether it is first detection for check in
            #or subsequent detections for action tracking
            if query not in cached:
                cached.append(query)
                continue_conversation = assistant.assist(text_query='start')
            else:
                assistant.switch_mode()
                continue_conversation = assistant.assist(text_query=None)
                assistant.switch_mode()
示例#9
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, "r") as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error("Error loading credentials: %s", e)
        logging.error("Run google-oauthlib-tool to initialize "
                      "new OAuth 2.0 credentials.")
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info("Connecting to %s", api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, "rb"),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
        )
    else:
        audio_source = audio_device = audio_device or audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size,
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, "wb"),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
        )
    else:
        audio_sink = audio_device = audio_device or audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size,
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device["id"]
                device_model_id = device["model_id"]
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning("Device config not found: %s" % e)
            logging.info("Registering device")
            if not device_model_id:
                logging.error("Option --device-model-id required "
                              "when registering a device instance.")
                sys.exit(-1)
            if not project_id:
                logging.error("Option --project-id required "
                              "when registering a device instance.")
                sys.exit(-1)
            device_base_url = "https://%s/v1alpha2/projects/%s/devices" % (
                api_endpoint,
                project_id,
            )
            device_id = str(uuid.uuid1())
            payload = {
                "id": device_id,
                "model_id": device_model_id,
                "client_type": "SDK_SERVICE",
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error("Failed to register device: %s", r.text)
                sys.exit(-1)
            logging.info("Device registered: %s", device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, "w") as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command("action.devices.commands.OnOff")
    def onoff(on):
        if on:
            logging.info("Turning device on")
        else:
            logging.info("Turning device off")

    @device_handler.command("com.example.commands.BlinkLight")
    def blink(speed, number):
        logging.info("Blinking device %s times." % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info("Device is blinking.")
            time.sleep(delay)

    with SampleAssistant(
            lang,
            device_model_id,
            device_id,
            conversation_stream,
            display,
            grpc_channel,
            grpc_deadline,
            device_handler,
    ) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                print("Press button to initiate a new request")
                dots.fill(0x00000F)  # lite blue LEDs
                dots.show()
                while button.value:
                    time.sleep(0.1)
            # red LEDs
            dots.fill(0xFF0000)
            dots.show()
            continue_conversation = assistant.assist()
            # LEDs off
            dots.fill(0x000000)
            dots.show()

            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
    def audioRecorderCallback(self, snowboy_audio_file):
        # Got keyword from Snowboy, now handle the audio from the file Snowboy recorded
        rospy.loginfo(
            "SPEECH: Snowboy got keyword, Handling Audio that was recorded...")
        suggested_response = ""
        service_response = 0
        partial_result = False  # just always send final text
        phrase_heard_uppercase = ""

        # Handle case where mic disabled by system (talking, or moving servos)
        if not self.mic_system_enabled:
            rospy.loginfo("SPEECH: MIC disabled by SYSTEM.  Ignoring input")
            return

        # Handle case where mic disabled by user (including case where turning mic back on)
        if not self.mic_user_enabled:
            # mic disabled by User (don't listen)
            if self.mic_user_enable_pending:
                # User said to turn mic back on!
                rospy.loginfo("SPEECH: MIC now enabled by USER.")
                self.mic_user_enable_pending = False  # reset flag
                self.mic_user_enabled = True  # enable mic now (ignoring whatever was in the buffer)
                self.local_voice_say_text("Ok, I am listening")
            else:
                rospy.loginfo("SPEECH: MIC disabled by USER.  Ignoring input")

            return

        #=====================================================================================
        # Normal operation - first handle the audio from the file Snowboy recorded
        rospy.loginfo(self.logname + "handling audio from Snowboy...")

        audio_device = None
        read_from_file = True  # first read is file from Snowboy

        display_assistant_responses = False  # Display HTML!
        grpc_deadline = DEFAULT_GRPC_DEADLINE

        rospy.loginfo('initializing SampleAssistant...')
        with SampleAssistant('en-US', self.device_model_id, self.device_id,
                             display_assistant_responses, self.grpc_channel,
                             grpc_deadline, self.device_handler) as assistant:

            # If user asked an open-ended question, handle follow up question without waiting for robot name!
            continue_conversation = True  # go through loop at least once
            while continue_conversation:
                if read_from_file:
                    audio_source = audio_helpers.WaveSource(
                        open(snowboy_audio_file, 'rb'),
                        sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
                        sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH)
                    read_from_file = False  # After handling Snowboy's initial buffer, everyting else is mic input
                else:
                    audio_source = audio_device = (
                        audio_device or audio_helpers.SoundDeviceStream(
                            sample_rate=audio_helpers.
                            DEFAULT_AUDIO_SAMPLE_RATE,
                            sample_width=audio_helpers.
                            DEFAULT_AUDIO_SAMPLE_WIDTH,
                            block_size=audio_helpers.
                            DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
                            flush_size=audio_helpers.
                            DEFAULT_AUDIO_DEVICE_FLUSH_SIZE))

                rospy.loginfo('Setting up Output device (speaker or file)...')
                send_response_to_file = False  # DAVES change this to hide Google's spoken response!
                if send_response_to_file:
                    audio_sink = audio_helpers.WaveSink(
                        open(output_audio_file, 'wb'),
                        sample_rate=audio_sample_rate,
                        sample_width=audio_sample_width)
                else:
                    audio_sink = audio_device = (
                        audio_helpers.SoundDeviceStream(
                            sample_rate=audio_helpers.
                            DEFAULT_AUDIO_SAMPLE_RATE,
                            sample_width=audio_helpers.
                            DEFAULT_AUDIO_SAMPLE_WIDTH,
                            block_size=audio_helpers.
                            DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
                            flush_size=audio_helpers.
                            DEFAULT_AUDIO_DEVICE_FLUSH_SIZE))

                # Create conversation stream with the given audio source and sink.
                rospy.loginfo('Creating Conversation Stream...')
                conversation_stream = audio_helpers.ConversationStream(
                    source=audio_source,
                    sink=audio_sink,
                    iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
                    sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
                )

                assistant.set_conversation_stream(
                    conversation_stream
                )  # pass in the current stream (file or mic input)

                rospy.loginfo('Calling Assist...')
                assistant_response = None
                assistant_response_ascii = None
                continue_conversation, assistant_response = assistant.assist()

                rospy.loginfo('Done with conversation / response.')
                if assistant_response:
                    try:
                        # Handle Unicode
                        assistant_response_ascii = assistant_response.encode(
                            'ascii', errors='ignore')
                        rospy.loginfo('FINAL ASSISTANT RESPONSE TEXT: [%s]',
                                      assistant_response_ascii)

                        if not use_google_assistant_voice:
                            self.local_voice_say_text(assistant_response_ascii)

                    except Exception as e:
                        rospy.logwarn(
                            'Bad FINAL ASCII response from Assistant: %s', e)

        # END OF BLOCK FROM GOOGLE_CLOUD
        self.pub_eye_color.publish(
            eye_color_default)  # restore eye color to normal
        os.remove(snowboy_audio_file)
示例#11
0
def main(api_endpoint, credentials, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    # audio_device = None
    # if input_audio_file:
    #     audio_source = audio_helpers.WaveSource(
    #         open(input_audio_file, 'rb'),
    #         sample_rate=audio_sample_rate,
    #         sample_width=audio_sample_width
    #     )
    # else:
    #     audio_source = audio_device = (
    #         audio_device or audio_helpers.SoundDeviceStream(
    #             sample_rate=audio_sample_rate,
    #             sample_width=audio_sample_width,
    #             block_size=audio_block_size,
    #             flush_size=audio_flush_size
    #         )
    #     )
    # if output_audio_file:
    #     audio_sink = audio_helpers.WaveSink(
    #         open(output_audio_file, 'wb'),
    #         sample_rate=audio_sample_rate,
    #         sample_width=audio_sample_width
    #     )
    # else:
    #     audio_sink = audio_device = (
    #         audio_device or audio_helpers.SoundDeviceStream(
    #             sample_rate=audio_sample_rate,
    #             sample_width=audio_sample_width,
    #             block_size=audio_block_size,
    #             flush_size=audio_flush_size
    #         )
    #     )

    # Our Audio setup
    # audio_device, audio_dump = None
    # audio_source, audio_source_file = None
    # audo_sink, audio_sink_file = None

    audio_device = None

    if not input_audio_file:
        print("\nI am here to inform you that... \nYou done f****d  up\n")
        exit()

    audio_source_file = audio_helpers.WaveSource(
        open(input_audio_file, 'rb'),
        sample_rate=audio_sample_rate,
        sample_width=audio_sample_width)

    audio_source = audio_device = (audio_device
                                   or audio_helpers.SoundDeviceStream(
                                       sample_rate=audio_sample_rate,
                                       sample_width=audio_sample_width,
                                       block_size=audio_block_size,
                                       flush_size=audio_flush_size))

    if output_audio_file:
        audio_sink_file = audio_helpers.WaveSink(
            open(output_audio_file, "wb"),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)

    audio_sink = audio_device = (audio_device
                                 or audio_helpers.SoundDeviceStream(
                                     sample_rate=audio_sample_rate,
                                     sample_width=audio_sample_width,
                                     block_size=audio_block_size,
                                     flush_size=audio_flush_size))

    # Create conversation stream with the given audio source and sink.
    conversation_stream_file = audio_helpers.ConversationStream(
        source=audio_source_file,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width)

    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    global auditer, audsw, audsr, audbs, audfs, iaud
    auditer = audio_iter_size
    audsw = audio_sample_width
    audsr = audio_sample_rate
    audbs = audio_block_size
    audfs = audio_flush_size
    iaud = input_audio_file

    with SampleAssistant(conversation_stream, grpc_channel, grpc_deadline,
                         conversation_stream_file) as assistant:

        assistant.csf = conversation_stream_file
        continue_conversation = assistant.converse()
示例#12
0
def main(api_endpoint, credentials,
         device_model_id, device_id, lang, display, verbose,
         grpc_deadline,audio_sample_rate, audio_block_size,
         audio_iter_size, audio_sample_width, audio_flush_size,
         audio_output_file, *args, **kwargs):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        return

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    audio_device = None
    audio_source = audio_device = (
        audio_device or audio_helpers.SoundDeviceStream(
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width,
            block_size=audio_block_size,
            flush_size=audio_flush_size
        )
    )

    if audio_output_file:
        audio_sink = audio_helpers.WaveSink(
            open(audio_output_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )

    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width
    )

    with SampleTextAssistant(lang, device_model_id, device_id,conversation_stream,
                            display, grpc_channel, grpc_deadline) as assistant:
        while True:
            query = click.prompt('')
            click.echo('<you> %s' % query)
            response_text, response_html = assistant.assist(text_query=query)
            if display and response_html:
                system_browser = browser_helpers.system_browser
                system_browser.display(response_html)
            if response_text:
                click.echo('<@assistant> %s' % response_text)
示例#13
0
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
        credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'),
        device_config=os.path.join(click.get_app_dir('googlesamples-assistant'),'device_config.json'),
        device_id=None,
        project_id=None,
        device_model_id=None,
        input_audio_file=None,
        output_audio_file=None,
        audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
        audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
        audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
        audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
        audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
        lang='en-US', display=False,
        verbose=False,
        once=False,
        grpc_deadline=DEFAULT_GRPC_DEADLINE, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    connectMQTT()

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # keep recording voice requests using the microphone    
        # and playing back assistant response using the speaker.    
        # This will loop as long as assist() returns true   
        # meaning that a follow on query for the user is    
        # expected. If the once flag is set only one request    
        # is performed no matter what assist() returns.
        # assist() can be thought of as a state continue_conversation
        while assistant.assist():   
            if once:    
                break
示例#14
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        Now = datetime.datetime.now()
        DateTimeStamp = "{:%d/%m/%Y}".format(Now)
        print(DateTimeStamp)
        cached_list = cached.find_one({'ref': DateTimeStamp})['cached']
        print(cached_list)
        current_detected = memcache.Client(['127.0.0.1:11211'], debug=0)
        assistant.assist(text_query='Talk to my test app')
        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        #wait_for_user_trigger = not once
        while True:
            if assistant.MODE is 0 or assistant.MODE is 1:
                query = ''
                while True:
                    name = current_detected.get('Name')
                    if name is not '':
                        query = name
                        assistant.switch_mode(1)
                        if name not in cached_list:
                            cached_list.append(name)
                            cached.update({'ref': DateTimeStamp},
                                          {'$push': {
                                              'cached': name
                                          }})
                            query = name + "first"
                            assistant.switch_mode(0)
                        break

                if assistant.MODE is 0:
                    click.echo('<you> %s' % query)
                    text, continue_conversation = assistant.assist(
                        text_query=query)
                    #print(text)
                elif assistant.MODE is 1:
                    click.echo('<you> %s' % query)
                    text, continue_conversation = assistant.assist(
                        text_query=query)
                    #print(text)
                    assistant.switch_mode(2)
            elif assistant.MODE is 2:
                text, continue_conversation = assistant.assist(text_query=None)
                print(text)
                if text == 'Please report activity again.':
                    print('recording again')
                    assistant.switch_mode(2)
                    #text, continue_conversation = assistant.assist(text_query=None)
                else:
                    print('why??')
                    assistant.switch_mode(1)
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            #wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if not continue_conversation:
                assistant.assist(text_query='Talk to my test app')

            time.sleep(1)
示例#15
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config,
         lang, display, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, hotword_model, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            pin13.write(1)
            logging.info('Turning device on')
        else:
            pin13.write(0)
            logging.info('Turning device off')
            
    @device_handler.command('action.devices.commands.BrightnessAbsolute')
    def brightnessCheck(brightness):
        pin13.write(brightness/100)
        logging.info('ok , brightness is '  , brightness)

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        import snowboydecoder

        def listening():
            detector = snowboydecoder.HotwordDetector(hotword_model, sensitivity=0.397, audio_gain=1)
            print("Say dalilaa .....or, Press Ctrl+C to exit")

            detector.start(detected_callback=detectedCallback, sleep_time=0.01)
            detector.terminate()

        def detectedCallback():
            continue_conversation = assistant.assist()
            print("Say dalilaa ...... or  Press Ctrl+C to exit")
            # listening()

        listening()
示例#16
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    context = zmq.Context.instance()
    socket = context.socket(zmq.REP)
    socket.bind('tcp://127.0.0.1:5555')

    while True:
        print('receiving socket message...')
        msg = socket.recv_string()
        if msg == 'stop':
            socket.send_string('stopping')
            context.destroy()
            break
        if msg != 'start':
            socket.send_string('invalid message')
            continue
        print('received start message')
        # Configure audio source and sink.
        audio_device = None
        audio_source = audio_helpers.WaveSource(
            open('in.wav', 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
        if output_audio_file:
            audio_sink = audio_helpers.WaveSink(
                open(output_audio_file, 'wb'),
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width)
        else:
            audio_sink = audio_device = (audio_device
                                         or audio_helpers.SoundDeviceStream(
                                             sample_rate=audio_sample_rate,
                                             sample_width=audio_sample_width,
                                             block_size=audio_block_size,
                                             flush_size=audio_flush_size))
        # Create conversation stream with the given audio source and sink.
        conversation_stream = audio_helpers.ConversationStream(
            source=audio_source,
            sink=audio_sink,
            iter_size=audio_iter_size,
            sample_width=audio_sample_width,
        )

        device_handler = device_helpers.DeviceRequestHandler(device_id)

        @device_handler.command('action.devices.commands.OnOff')
        def onoff(on):
            if on:
                logging.info('Turning device on')
            else:
                logging.info('Turning device off')

        if not device_id or not device_model_id:
            try:
                with open(device_config) as f:
                    device = json.load(f)
                    device_id = device['id']
                    device_model_id = device['model_id']
            except Exception as e:
                logging.warning('Device config not found: %s' % e)
                logging.info('Registering device')
                if not device_model_id:
                    logging.error('Option --device-model-id required '
                                  'when registering a device instance.')
                    sys.exit(-1)
                if not project_id:
                    logging.error('Option --project-id required '
                                  'when registering a device instance.')
                    sys.exit(-1)
                device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                                   (api_endpoint, project_id))
                device_id = str(uuid.uuid1())
                payload = {'id': device_id, 'model_id': device_model_id}
                session = google.auth.transport.requests.AuthorizedSession(
                    credentials)
                r = session.post(device_base_url, data=json.dumps(payload))
                if r.status_code != 200:
                    logging.error('Failed to register device: %s', r.text)
                    sys.exit(-1)
                logging.info('Device registered: %s', device_id)
                os.makedirs(os.path.dirname(device_config), exist_ok=True)
                with open(device_config, 'w') as f:
                    json.dump(payload, f)

        with SampleAssistant(lang, device_model_id, device_id,
                             conversation_stream, grpc_channel, grpc_deadline,
                             device_handler) as assistant:
            # If file arguments are supplied:
            # exit after the first turn of the conversation.
            #if input_audio_file or output_audio_file:
            assistant.assist()
            print("accepting another request...")

        socket.send_string('done')
def main(api_endpoint=ASSISTANT_API_ENDPOINT,
         credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'),
                                  'credentials.json'),
         device_config=os.path.join(
             click.get_app_dir('googlesamples-assistant'),
             'device_config.json'),
         device_id=None,
         project_id=None,
         device_model_id=None,
         input_audio_file=None,
         output_audio_file=None,
         audio_sample_rate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
         audio_sample_width=audio_helpers.DEFAULT_AUDIO_SAMPLE_WIDTH,
         audio_block_size=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE,
         audio_flush_size=audio_helpers.DEFAULT_AUDIO_DEVICE_FLUSH_SIZE,
         audio_iter_size=audio_helpers.DEFAULT_AUDIO_ITER_SIZE,
         lang='ko-KR',
         verbose=False,
         once=False,
         grpc_deadline=DEFAULT_GRPC_DEADLINE):
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))

    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
            print('JCH MP3 Play')
            #launch subprocess
            #python cannot play mp3
            #subprocess.call(['lxterminal', '-e', 'python runMP3.py'])
            subprocess.call(['lxterminal', '-e', './runMP3.sh'])
            print('JCH fork process is run. parent process is still running')
        else:
            logging.info('Turning device off')
            killMP3Pid()
            print('JCH turn off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    #JCH : get MP3 player pid
    def killMP3Pid():
        count = 1
        pid = -1
        cmd = ['ps', '-ef']

        fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
        for line in fd_popen:
            if line.find('runMP3') != -1:
                list = line.split()
                pid = list[1]
                print('bash pid:' + str(pid))
                os.kill(int(pid), signal.SIGTERM)  #or signal.SIGKILL
                break
        fd_popen.close()

        fd_popen = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout
        for line in fd_popen:
            if line.find('omxplayer') != -1:
                print('find')
                list = line.split()
                pid = list[1]
                print('pid:' + str(pid))
                os.kill(int(pid), signal.SIGTERM)
                if count == 2:
                    break
                else:
                    count = count + 1
        fd_popen.close()

    #

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:

        while assistant.assist():
            if once:
                break
示例#18
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config,
         lang, display, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Porcupine setup
    library_path = "lib/linux/x86_64/libpv_porcupine.so" # Path to Porcupine's C library available under lib/${SYSTEM}/${MACHINE}/
    model_file_path = "lib/common/porcupine_params.pv" # It is available at lib/common/porcupine_params.pv
    keyword_file_paths = ['picovoice_linux.ppn', 'ok_google_linux_2020-04-28_v1.7.0.ppn', 'hey_google_linux_2020-04-28_v1.7.0.ppn']
    sensitivities = [0.8, 0.9, 0.9]
    porcupine = None
    pa = None
    audio_stream = None

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_device = (
            audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
            )
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream, display,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        wait_for_user_trigger = not once

        try:
            porcupine = Porcupine(
                library_path,
                model_file_path,
                keyword_file_paths=keyword_file_paths,
                sensitivities=sensitivities)
            pa = pyaudio.PyAudio()
            audio_stream = pa.open(
                rate=porcupine.sample_rate,
                channels=1,
                format=pyaudio.paInt16,
                input=True,
                frames_per_buffer=porcupine.frame_length,
                input_device_index=2)
            while True:
                #print('listening')
                pcm = audio_stream.read(porcupine.frame_length)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                #print('test')
                result = porcupine.process(pcm)
                if result >= 0:
                    print('detected keyword')
                    continue_conversation = assistant.assist()
                    wait_for_user_trigger = not continue_conversation
        except KeyboardInterrupt:
            print('stopping ...')
        finally:
            if porcupine is not None:
                porcupine.delete()
            if audio_stream is not None:
                audio_stream.close()
            if pa is not None:
                pa.terminate()
示例#19
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

	Examples:
	  Run the sample with microphone input and speaker output:

		$ python -m googlesamples.assistant

	  Run the sample with file input and speaker output:

		$ python -m googlesamples.assistant -i <input file>

	  Run the sample with file input and output:

		$ python -m googlesamples.assistant -i <input file> -o <output file>
	"""
    ############################################################################3
    global updt_time, query, resp_text, mute, startmouth, TezHead, beep, faceFound, name2, onceface, facerec_en, keyboard_on

    # Setup logging.
    Kpx = 1
    Kpy = 1
    Ksp = 40

    ## Head X and Y angle limits
    time.sleep(5)
    Xmax = 725
    Xmin = 290
    Ymax = 550
    Ymin = 420
    keyboard_on = False
    ## Initial Head position

    Xcoor = 511
    Ycoor = 450
    Facedet = 0

    ## Time head wait turned
    touch_wait = 2

    no_face_tm = time.time()
    face_det_tm = time.time()
    last_face_det_tm = time.time()
    touch_tm = 0
    touch_samp = time.time()
    qbo_touch = 0
    touch_det = False
    face_not_found_idx = 0
    mutex_wait_touch = False
    faceFound = False
    onceface = False
    dist = 100
    audio_response1 = '/home/pi/Reebo_Python/up.wav'
    wavep = wave.open(audio_response1, 'rb')
    audio_response2 = '/home/pi/Reebo_Python/HiTej.wav'
    wavep2 = wave.open(audio_response2, 'rb')
    facerec_en = False

    ############################################################################3
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alphapi@raspber2/projects/%s/devices' %
                (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "slowly":
            delay = 2
        elif speed == "quickly":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    #~ def findquery():

    #############################   FACEREC THREAD     ##################################################33
    def facerec():

        global name2

        f = open("/home/pi/Reebo_Python/face_features.pkl", 'rb')
        details = pickle.load(f)

        # Initialize some variables
        face_locations = []
        face_encodings = []
        face_names = []
        name2 = []
        unknown_picture = fr.load_image_file("/home/pi/Reebo_Python/test.jpg")

        # Grab a single frame of video
        # frame = unknown_picture

        # Resize frame of video to 1/4 size for faster face recognition processing
        # small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

        # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
        # rgb_small_frame = small_frame[:, :, ::-1]

        # Find all the faces and face encodings in the current frame of video
        face_locations = fr.face_locations(unknown_picture)
        face_encodings = fr.face_encodings(unknown_picture, face_locations)

        print("{0} persons identified".format(len(face_locations)))

        face_names = []
        for face_encoding in face_encodings:
            matches = fr.compare_faces(details['encodings'], face_encoding,
                                       0.45)
            name = "Unknown"

            # If a match was found in known_face_encodings, just use the first one.
            if True in matches:
                first_match_index = matches.index(True)
                name = details["name"][first_match_index]

            face_names.append(name)

        print(face_names)
        for i in range(0, len(face_names)):

            name_temp = str(face_names[i]).replace('photos/', "")
            name_temp = str(name_temp).replace(']\'', "")
            name2.append(str(name_temp))
        print name2
        n = open("/home/pi/Reebo_Python/names.txt", 'w')
        for i in face_names:
            n.write(i + "\n")

        n.close()

        for (top, right, bottom, left), name in zip(face_locations,
                                                    face_names):
            if not name:
                continue
            if name == "warner":
                cv2.rectangle(unknown_picture, (left, top), (right, bottom),
                              (255, 0, 0), 2)
                cv2.rectangle(unknown_picture, (left, bottom - 25),
                              (right, bottom), (255, 0, 0), 1)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(unknown_picture, name, (left + 6, bottom - 6),
                            font, 0.5, (255, 255, 255), 1)
            else:
                cv2.rectangle(unknown_picture, (left, top), (right, bottom),
                              (0, 0, 255), 2)
                cv2.rectangle(unknown_picture, (left, bottom - 25),
                              (right, bottom), (0, 0, 255), 1)
                font = cv2.FONT_HERSHEY_DUPLEX
                cv2.putText(unknown_picture, name, (left + 6, bottom - 6),
                            font, 0.5, (255, 255, 255), 1)
            cv2.imwrite("/home/pi/Reebo_Python/result.png", unknown_picture)

    def findFace():
        global name2, faceFound, onceface, facerec_en, updt_time
        found_tm = time.time()
        onceface = False
        touch_samp = time.time()
        Xmax = 725
        Xmin = 290
        Ymax = 550
        Ymin = 420
        qbo_touch = 0

        while True:
            #print("find face " + str(time.time()))
            try:

                faceFound = False
                #    while not faceFound :
                # This variable is set to true if, on THIS loop a face has already been found
                # We search for a face three diffrent ways, and if we have found one already-
                # there is no reason to keep looking.
                #thread.start_new_thread(WaitForSpeech, ())
                #	WaitForSpeech()
                #    ServoHome()
                Cface = [0, 0]
                t_ini = time.time()
                while time.time() - t_ini < 0.01:  # wait for present frame
                    t_ini = time.time()
                    aframe = webcam.read()[
                        1]  #print "t: " + str(time.time()-t_ini)

                fface = frontalface.detectMultiScale(
                    aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING +
                                     cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT +
                                     cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (60, 60))
                pfacer = profileface.detectMultiScale(
                    aframe, 1.3, 4, (cv2.cv.CV_HAAR_DO_CANNY_PRUNING +
                                     cv2.cv.CV_HAAR_FIND_BIGGEST_OBJECT +
                                     cv2.cv.CV_HAAR_DO_ROUGH_SEARCH), (80, 80))
                if fface != ():  # if we found a frontal face...
                    for f in fface:  # f in fface is an array with a rectangle representing a face
                        faceFound = True
                        face = f

                elif pfacer != ():  # if we found a profile face...
                    for f in pfacer:
                        faceFound = True
                        face = f

                if faceFound:
                    updt_time = time.time()
                    #facerec()
                    if onceface == False:
                        cv2.imwrite("/home/pi/Reebo_Python/test.jpg", aframe)

                        onceface = True
                    found_tm = time.time()
                    x, y, w, h = face
                    Cface = [
                        (w / 2 + x), (h / 2 + y)
                    ]  # we are given an x,y corner point and a width and height, we need the center
                    TezHead.SetNoseColor(4)
                    #print "face ccord: " + str(Cface[0]) + "," + str(Cface[1])
                    faceOffset_X = 160 - Cface[0]
                    if (faceOffset_X > 20) | (faceOffset_X < -20):
                        time.sleep(0.002)
                        # acquire mutex
                        TezHead.SetAngleRelative(1, faceOffset_X >> 1)
                        # release mutex
                        #wait for move
                        time.sleep(0.05)
                        #print "MOVE REL X: " + str(faceOffset_X >> 1)
                    faceOffset_Y = Cface[1] - 120
                    if (faceOffset_Y > 20) | (faceOffset_Y < -20):
                        time.sleep(0.002)
                        # acquire mutex
                        TezHead.SetAngleRelative(2, faceOffset_Y >> 1)
                        # release mutex
                        #wait for move
                        time.sleep(0.05)
                if time.time() - found_tm > 0.5:
                    TezHead.SetNoseColor(0)

            except Exception as e:
                print e
                pass
            try:
                current_touched = cap.touched()
                #last_touched = cap.touched()
                cap.set_thresholds(10, 6)
                # Check each pin's last and current state to see if it was pressed or released.
                i = 0
                for i in [1, 11]:
                    pin_bit = 1 << i
                    # Each pin is represented by a bit in the touched value.  A value of 1
                    # First check if transitioned from not touched to touched.
                    if current_touched & pin_bit:  #and not last_touched & pin_bit:
                        print('{0} touched!'.format(i))
                        qbo_touch = int(i)
            ##            # Next check if transitioned from touched to not touched.
            ##            if not current_touched & pin_bit and last_touched & pin_bit:
            ##                print('{0} released!'.format(i))
            ##        # Update last state and wait a short period before repeating.
            ##        last_touched = current_touched
            #time.sleep(0.1)
            except:
                #print sys.exc_info()
                #print "error"
                pass

            if (time.time() - touch_samp >
                    0.5):  # & (time.time() - last_face_det_tm > 3):
                touch_samp = time.time()
                #~ time.sleep(0.002)
                if qbo_touch in [1, 11]:
                    if qbo_touch == 1:
                        print("right")
                        TezHead.SetServo(1, Xmax - 50, 100)
                        time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        #thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0
                    elif qbo_touch == [2]:
                        #~ time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0

                    elif qbo_touch == 11:
                        print("left")
                        TezHead.SetServo(1, Xmin + 50, 100)
                        time.sleep(0.002)
                        TezHead.SetServo(2, Ymin - 5, 100)
                        #thread.start_new_thread(WaitTouchMove, ())
                        # wait for begin touch move.
                        time.sleep(1)
                        qbo_touch = 0

    def distance():
        # set Trigger to HIGH
        GPIO.output(GPIO_TRIGGER, True)

        # set Trigger after 0.01ms to LOW
        time.sleep(0.00001)
        GPIO.output(GPIO_TRIGGER, False)

        StartTime = time.time()
        StopTime = time.time()

        # save StartTime
        while GPIO.input(GPIO_ECHO) == 0:
            StartTime = time.time()

        # save time of arrival
        while GPIO.input(GPIO_ECHO) == 1:
            StopTime = time.time()

        # time difference between start and arrival
        TimeElapsed = StopTime - StartTime
        # multiply with the sonic speed (34300 cm/s)
        # and divide by 2, because there and back
        distance = (TimeElapsed * 34300) / 2

        return distance
        ##################################  SOCKET THREAD   ######################################################
    def socket_thread(conn):

        print 'Socket.IO Thread Started.'

        def empid_received():
            socket.emit('event-ask-cardno')
            print "ASK CARD NO"

        def cardno_received():
            print "Card No received"
            conn.send(False)

        socket.on('event-empid-received', empid_received)
        socket.on('event-cardno-received', cardno_received)
        socket.wait()

    def findquery(parent_conn):
        global resp_text, mute, query, beep
        keyboard_on = False
        if resp_text == "Sorry, I can't help.":
            query = "Talk to Reebo"
            mute = True
        elif resp_text == "Alright! Say Cheese!":
            print "camera"
            aframe = webcam.read()[1]
            cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe)
            socket.emit('event-take-selfie')
            #mute=False

        elif resp_text.startswith("Can you please smile for the camera?"):
            mute = False
            beep = False
            print "BEEP"
            time.sleep(5)
            aframe = webcam.read()[1]
            cv2.imwrite("/home/pi/reebo-backend/selfie.jpg", aframe)
            socket.emit('event-take-selfie')
            query = "Say@#$: Thank you. Please enter your employee ID and card number"
            assistant.assist()
            socket.emit('event-ask-empid')
            keyboard_on = True
            print "KEYBOARD in findquery: ", keyboard_on
            keyboard_on = parent_conn.recv()
            query = "Say@#$: Thank You. You will be granted access shortly"
            mute = False
            beep = False

    if len(sys.argv) > 1:
        port = sys.argv[1]
    else:
        port = '/dev/serial0'

    try:
        # Open serial port
        ser = serial.Serial(port,
                            baudrate=115200,
                            bytesize=serial.EIGHTBITS,
                            stopbits=serial.STOPBITS_ONE,
                            parity=serial.PARITY_NONE,
                            rtscts=False,
                            dsrdtr=False,
                            timeout=0)
        print "Open serial port sucessfully."
        print(ser.name)
    except Exception as e:
        print e
        print "Error opening serial port."
        sys.exit()

    try:
        cap = MPR121.MPR121()
        time.sleep(3)
        #
        if not cap.begin():
            print('Error initializing MPR121.  Check your wiring!')
    except Exception as e:
        print(e)
        pass

    TezHead = TezCmd.Controller(ser)
    TezHead.SetMouth(0x110E00)

    time.sleep(1)
    #TezHead.SetPid(1, 26, 12, 16)
    TezHead.SetPid(1, 26, 2, 16)

    #TezHead.SetPid(2, 26, 12, 16)
    TezHead.SetPid(2, 26, 2, 16)
    time.sleep(1)
    TezHead.SetServo(1, Xcoor, 100)
    TezHead.SetServo(2, Ycoor, 100)
    time.sleep(1)
    TezHead.SetNoseColor(0)

    webcam = cv2.VideoCapture(
        -1)  # Get ready to start getting images from the webcam
    webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH,
               320)  # I have found this to be about the highest-
    webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT,
               240)  # resolution you'll want to attempt on the pi
    #webcam.set(cv2.CV_CAP_PROP_BUFFERSIZE, 2)		# frame buffer storage

    if not webcam:
        print "Error opening WebCAM"
        sys.exit(1)

    #open = False

    frontalface = cv2.CascadeClassifier(
        "/home/pi/Documents/Python projects/haarcascade_frontalface_alt2.xml"
    )  # frontal face pattern detection
    profileface = cv2.CascadeClassifier(
        "/home/pi/Documents/Python projects/haarcascade_profileface.xml"
    )  # side face pattern detection
    #parent_conn, child_conn = Pipe()

    t1 = Thread(target=findFace)
    t1.start()
    t3 = Thread(target=facerec)
    parent_conn, child_conn = Pipe()
    socket_thd = Thread(target=socket_thread, args=(child_conn, ))
    socket_thd.start()

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        button_once = False
        #playsound('/home/pi/env/HiTej.wav')
        print "playsound"
        mute = True
        query = "Talk to Reebo"
        print query
        #################################################################################3
        #~ query,mute=findquery()
        #####################################FIND QUERY AND MUTE#####################3
        assistant.assist()
        mute = False
        query = "audio"
        time.sleep(1)
        updt_time = time.time()
        stream = conversation_stream
        num_frames = wavep.getnframes(
        )  # number of frames in audio response file
        resp_samples = wavep.readframes(num_frames)  # get frames from wav file
        num_frames2 = wavep2.getnframes(
        )  # number of frames in audio response file
        resp_samples2 = wavep2.readframes(
            num_frames2)  # get frames from wav file
        name = ""
        while True:
            #if wait_for_user_trigger:

            #logging.info('Press key')
            #x=raw_input()
            #~ stream.start_recording() # unelegant method to access private methods..

            findquery(parent_conn)
            if mute == False or beep == True:
                print "beep"
                stream.start_playback()
                #~ stream.stop_recording()
                stream.write(
                    resp_samples)  # write response sample to output stream
                print "HI"
                stream.stop_playback()

            assistant.assist()
            beep = False
            query = "audio"
            mute = False
            #updt_time=time.time()
            print time.time() - updt_time
            dist = distance()
            #~ if dist<50:
            #~ print dist
            #~ updt_time=time.time()
            if time.time() - updt_time > 10:
                name2 = ""
                if onceface == True:
                    facerec_en = False
                    print "Thread Status", t3.is_alive()
                    if t3.is_alive():
                        t3.terminate()
                        t3.join(1)
                        print "t3 terminated"
                    print facerec_en
                    onceface = False
                print("in loop")
                query = "audio"
                dist = distance()
                print faceFound
                while faceFound == False:
                    time.sleep(0.1)
                    #print "FACE FALSE"
                #~ if dist>60:
                #~ #mute=False
                #~ updt_time=time.time()
                #~ print query
                #~ while dist>60:
                #~ dist=distance()
                #~ time.sleep(0.1)
                #~ print dist
                #~ query="Hi"
                #~ print query
                #~ assistant.assist()
                #~ print ("playback")
                #~ socket.emit('event-robot-message',"Hi! Do you want some help ?")
                print "Thread Status", t3.is_alive()
                t3 = Thread(target=facerec)
                t3.start()

                #~ query="Talk to Tej"
                #~ mute=True
                #~ assistant.assist()
                #time.sleep(3)
                stream.start_playback()
                #~ stream.stop_recording()
                stream.write(
                    resp_samples2)  # write response sample to output stream
                socket.emit(
                    'event-robot-message',
                    "Hi! My Name is Reebo. I\'ll be your personal assistant for today"
                )
                stream.stop_playback()
                query = "Say:@#$: "
                if len(name2) >= 1:
                    for i in range(0, len(name2)):
                        if name2[i] != "" and name2[i] != "Unknown":
                            query = query + " Hi " + str(name2[i]) + "!"

                query = query + "What can I do for you?"
                mute = False
                print query
                assistant.assist()
                #time.sleep(0.1)
                #~ stream.start_playback()
                #~ stream.stop_recording()
                #~ stream.write(resp_samples2) # write response sample to output stream
                #~ stream.stop_playback()
                #~ #query="Talk to Tej"
                #~ mute=True
                #~ assistant.assist()

                #~ updt_time= time.time()
                query = "audio"
                mute = False
示例#20
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)
    GPIO.setmode(GPIO.BCM)
    GPIO.setup(12, GPIO.OUT, initial=GPIO.LOW)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once

        porcupine = PorcupineDemo(library_path=pvporcupine.LIBRARY_PATH,
                                  model_path=pvporcupine.MODEL_PATH,
                                  keyword_paths=[
                                      pvporcupine.KEYWORD_PATHS[x]
                                      for x in ['terminator']
                                  ],
                                  sensitivities=[0.95],
                                  output_path=None,
                                  input_device_index=None)

        porcupine_thread = threading.Thread(target=porcupine.run)
        porcupine_thread.start()

        bill = Billy()

        while True:
            if wait_for_user_trigger:
                while not porcupine.detected:
                    time.sleep(0.1)
                #click.pause(info='Press Enter to send a new request...')
            assistant.bill.eye_on()
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
            assistant.bill.eye_off()
示例#21
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """

    # GPIO setup
    GPIO.setmode(GPIO.BCM)
    GPIO.setwarnings(False)
    GPIO.setup(sBUTTON, GPIO.IN)
    GPIO.setup(gBUTTON, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    GPIO.setup(gLED, GPIO.OUT, initial=GPIO.HIGH)

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    image_dir = '/home/pi/robot/image/'

    def camera():
        now = datetime.now()
        dir_name = now.strftime('%Y%m%d')
        dir_path = image_dir + dir_name + '/'
        file_name = now.strftime('%H%M%S') + '.jpg'
        fname = dir_path + file_name
        try:
            os.mkdir(dir_path)
        except OSError:
            logging.info('Date dir already exists')
        os.system('raspistill -o ' + fname)
        return fname

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
            #Klight added
            GPIO.output(gLED, GPIO.HIGH)
            time.sleep(1)
            GPIO.output(gLED, GPIO.LOW)
            #os.system('python /home/pi/robot/blinkt_color.py')

        else:
            logging.info('Turning device off')
            GPIO.output(gLED, GPIO.LOW)

    # Kblink
    @device_handler.command('com.acme.commands.blink_light')
    def blinker(number, lightKey):
        logging.info('Blinking device %s times.' % number)
        for i in range(int(number)):
            logging.info('Device is blinking %s/%s time.' % (i, number))
            time.sleep(0.5)
            GPIO.output(gLED, GPIO.HIGH)
            time.sleep(0.5)
            GPIO.output(gLED, GPIO.LOW)

    # Kcamera
    @device_handler.command('com.acme.commands.pi_camera')
    def picamera(number, cameraKey):
        logging.info('Taking a %s %s times.' % (cameraKey, number))
        GPIO.output(gLED, GPIO.HIGH)
        if cameraKey:  # in ('picture', 'camera', 'photo'):
            fname = camera()
            result = os.system(
                'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face '
                + fname)  #robot/vision.py "" '+fname)
            logging.info('Image:' + fname)
            GPIO.output(gLED, GPIO.LOW)

    @device_handler.command('com.acme.commands.pi_jp')
    def pijp(number, cameraKey):
        logging.info(cameraKey)
        GPIO.output(gLED, GPIO.HIGH)
        if cameraKey:  # in ('picture', 'camera', 'photo'):
            fname = camera()
            result = os.system(
                'python3 /home/pi/AIY-projects-python/src/examples/voice/visiontalk.py face '
                + fname)  #robot/vision.py "" '+fname)
            logging.info('Image:' + fname)
            GPIO.output(gLED, GPIO.LOW)

    @device_handler.command('com.acme.commands.pi_motor')
    def pimotor(number, directionKey):
        logging.info(directionKey)
        GPIO.output(gLED, GPIO.HIGH)
        result = os.system('python3 /home/pi/robot/motor.py')
        GPIO.output(gLED, GPIO.LOW)
        """if color.get('name') == "blue": #shoot:
          logging.info('Camera shoot!')
          GPIO.output(gLED, GPIO.HIGH)
      else:
          logging.info('Something else happened.')
          GPIO.output(gLED, GPIO.LOW)"""

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                # GPIO button added
                state = GPIO.input(gBUTTON)
                logging.info("Push button to Google talk!")
                GPIO.output(gLED, GPIO.HIGH)
                time.sleep(0.2)
                GPIO.output(gLED, GPIO.LOW)
                if state:
                    pass  #continue
                else:
                    continue  #pass

                #click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#22
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)
示例#23
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.acme.commands.play_kkbox')
    def play_music(songName
                   ):  # You must match the parameters from the Action Package.
        logging.info('play %s ' % songName)
        # url = 'https://widget.kkbox.com/v1/?id=4kxvr3wPWkaL9_y3o_&type=song&terr=TW&lang=TC&autoplay=true&loop=true'
        # result = subprocess.Popen(['firefox', url], stdout=subprocess.PIPE)
        # print(result.stdout)

        from kkbox_partner_sdk.auth_flow import KKBOXOAuth

        CLIENT_ID = 'cea7cb81a731b46caeb9b8c0e25abd22'
        CLIENT_SECRET = '6317f7914dcc9e1fb50d01f744b3f1fb'

        auth = KKBOXOAuth(CLIENT_ID, CLIENT_SECRET)
        token = auth.fetch_access_token_by_client_credentials()
        print(token)

        from kkbox_partner_sdk.api import KKBOXAPI

        kkboxapi = KKBOXAPI(token)

        keyword = '女武神'
        types = ['track']
        result = kkboxapi.search_fetcher.search(keyword, types)

        tracks = result['tracks']['data']
        # print('搜尋結果是:{}'.format(tracks))

        track_id = result['tracks']['data'][0]['id']
        track_info = kkboxapi.track_fetcher.fetch_track(track_id)
        url = track_info['url']
        print('歌曲資訊連結是:{}'.format(url))
        send(url)

        tickets = kkboxapi.ticket_fetcher.fetch_media_provision(track_id)
        url = tickets['url']
        print('下載位置連結是:{}'.format(url))

        print('底下是播放資訊')
        import subprocess
        subprocess.run(['ffplay', '-nodisp', '-autoexit', url])

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        once = True

        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#24
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """

    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    # text to speech : text를 음성으로 바꾸어서 스피커에 출력
    def tts(text, lang='ko'):
        if lang == None:
            speech = gTTS(text=text)
        else:
            speech = gTTS(text=text, lang=lang)
        speech.save('tmp.mp3')
        os.system("omxplayer tmp.mp3")
        os.remove('tmp.mp3')

    # speech to text : speech를 text를 바꾸어서 text 변수에 저장
    def stt(commands=None, is_respon=False):
        # voice recognition/respone.*******
        continue_conversation, stt_tmp = assistant.assist(commands=commands,
                                                          is_respon=is_respon)

        wait_for_user_trigger = not continue_conversation

        #if once and (not continue_conversation):
        #    break

        text = stt_tmp

        return text

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        wait_for_user_trigger = not once

        # 내가 말할 command들
        select = ['컨트롤', '교육']
        control = ['초음파', '추적', '불빛', '명령', '꺼']
        yn = ['네', '아니']
        move = ['앞으로', '뒤로', '오른쪽', '왼쪽']

        first = True
        more = False

        # dc,servo,ultra sonic,led
        module = m.mode()

        while True:
            if first == True:
                tts("컨트롤모드와 교육모드 중에 선택해주세요 ,.,.,.")
                first = False

            text = stt(select, is_respon=True)
            print("[INFO] answer : ", text)

            if select[0] in text:
                print('동작 모드 ')
                tts('동작모드 입니다....   ')

                while True:

                    if more == True:
                        tts("동작모드를 더 하실껀가요   ")

                        text = stt(is_respon=False)

                        if yn[0] in text:
                            more = False
                            tts('다시 시작할게요   ')
                        if yn[1] in text:
                            more = False
                            first = True
                            break

                    text = stt(is_respon=False)
                    print("[INFO] answer : ", text)

                    if control[0] in text:
                        print("초음파 모드")
                        tts('초음파 모드 입니다   ')
                        sel = random.randrange(2)

                        if sel == 0:
                            print('1')
                            module.avoid()
                        else:
                            print('2')
                            module.avoid2()

                        tts("초음파 모드가 끝났어요   ")

                        more = True

                    elif control[1] in text:
                        print('추적 모드')
                        tts('추적 모드 입니다   ')
                        module.tracking()

                        tts("추적 모드가 끝났어요   ")

                        more = True

                    elif control[2] in text:
                        print('불빛 모드')
                        tts('블빛 모드 입니다   ')
                        module.servo_led()

                        tts("붗빛 모드가 끝났어요 ")

                        more = True

                    elif control[3] in text:
                        print('명령모드')
                        tts('명령 모드 입니다   ')

                        try:
                            start = time.time()
                            while True:
                                if time.time() - start > 60:
                                    break
                                text = stt(commands=move, is_respon=False)
                                print(text)
                                if move[0] in text:
                                    #if module.distance() > 80:
                                    module.go(100, 100)
                                    sleep(2)
                                    module.stop()
                                elif move[1] in text:
                                    module.back(100, 100)
                                    sleep(2)
                                    module.stop()
                                elif move[2] in text:
                                    module.spin_right(100, 100)
                                    sleep(3)
                                    module.stop()
                                elif move[3] in text:
                                    module.spin_left(100, 100)
                                    sleep(3)
                                    module.stop()

                        except KeyboardInterrupt:
                            module.stop()

                        module.stop()
                        tts("명령모드가 끝났어요    ")

                        more = True

                    elif control[4] in text:
                        tts("끝낼게요    ")
                        first = True
                        break

            elif select[1] in text:
                #DB Setting
                host = '192.168.0.8'
                user = '******'
                dbname = 'Education'
                password = '******'
                ser = 1
                name = 'LEE'
                e = Education(host, user, dbname, password, ser, name)
                conn, cur = e.connection()
                e.dbclean(conn, cur)
                tts("교육모드 입니다.   ")

                count = 0
                stage = 0

                while True:
                    distance = module.distance()

                    if distance < 50:
                        count += 1

                    if count > 100:
                        count = 0

                        tts("반가워요 제가 동화를 들려드릴게요  ")
                        sleep(1)
                        os.system(
                            "omxplayer ~/workspace/Raspi_google_robot/stt/ka_01.mp3"
                        )

                        tts("동화가 끝났어요 재미있으셨나요  ")
                        sleep(1)

                        while True:
                            text = stt(is_respon=False)
                            e.isfun(conn, cur, text)
                            if yn[0] in text:
                                tts("고마워요 다음에 또 들려줄게요  ")
                                break
                            elif yn[1] in text:
                                tts("나중에는 더 재미있는 이야기를 들려줄게요  ")
                                break
                        e.search_isfun(conn, cur)
                        tts("우리 같이 숫자 공부해요  ")
                        sleep(1)
                        tts("일 더하기 이는 무엇일까요  ")

                        life = 5
                        math_pt = 100
                        while True:
                            text = stt(is_respon=False)
                            print(text)

                            if '3' in text:
                                tts("정답이에요 축하해요  ")
                                break
                            elif not text:
                                pass
                            else:
                                tts("틀렸어요 다시한번 말해주세요  ")
                                life -= 1
                                math_pt -= 5
                                if life == 0:
                                    tts("코인을 다썼어요 아쉽네요 다음 기회에 또 봐요  ")
                                    break
                        e.math(conn, cur, math_pt)
                        e.search_math(conn, cur)
                        time.sleep(1)

                        tts("우리 같이 발음을 맞춰봐요  ")

                        life = 5
                        english_pt = 100

                        quiz = 'orange'

                        tts(quiz, lang=None)

                        while True:
                            text = stt(is_respon=False)
                            print(text)

                            if '오렌지' in text:
                                tts("정답 이에요")
                                break
                            elif not text:
                                pass
                            else:
                                tts("틀렸어요 다시한번 말해주세요    ")
                                life -= 1
                                english_pt -= 5
                                if life == 0:
                                    tts("아쉽네요 다음 기회에 또 봐요     ")
                                    break
                        e.english(conn, cur, english_pt)
                        e.search_english(conn, cur)
                        time.sleep(1)

                        tts("교육모드가 끝났습니다.    ")
                        e.dbclose(conn, cur)
                        first = True
                        break
            else:
                first = True
        m.clean()
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, verbose, input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width, audio_iter_size,
         audio_block_size, audio_flush_size, grpc_deadline, once, *args,
         **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    if not device_id or not device_model_id:
        logging.error(
            'No device_id or no device_model_id found. Please check config.py')
        sys.exit(0)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#26
0
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.' % number)
        delay = 1
        if speed == "SLOWLY":
            delay = 2
        elif speed == "QUICKLY":
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # ウェイクワードでの起動
        porcupine = None
        pa = None
        audio_stream = None
        try:
            # ハンドル作成
            porcupine = pvporcupine.create(keywords=['jarvis', 'snowboy'])
            # pyaudioでの録音
            pa = pyaudio.PyAudio()
            audio_stream = pa.open(rate=porcupine.sample_rate,
                                   channels=1,
                                   format=pyaudio.paInt16,
                                   input=True,
                                   frames_per_buffer=porcupine.frame_length)

            # 待ちループ
            def get_next_audio_frame():
                pcm = audio_stream.read(porcupine.frame_length,
                                        exception_on_overflow=False)
                pcm = struct.unpack_from("h" * porcupine.frame_length, pcm)
                return pcm

            while True:
                keyword_index = porcupine.process(get_next_audio_frame())
                if keyword_index >= 0:
                    # detection event logic/callback
                    continue_conversation = assistant.assist()
                    while audio_stream.get_read_available() > 0:
                        get_next_audio_frame()

        finally:
            if porcupine is not None:
                porcupine.delete()

            if audio_stream is not None:
                audio_stream.close()

            if pa is not None:
                pa.terminate()

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
示例#27
0
def main(api_endpoint, credentials, project_id,
         device_model_id, device_id, device_config, lang, verbose,
         input_audio_file, output_audio_file,
         audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size,
         grpc_deadline, once, *args, **kwargs):
    """Samples for the Google Assistant API.

    Examples:
      Run the sample with microphone input and speaker output:

        $ python -m googlesamples.assistant

      Run the sample with file input and speaker output:

        $ python -m googlesamples.assistant -i <input file>

      Run the sample with file input and output:

        $ python -m googlesamples.assistant -i <input file> -o <output file>
    """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_source = audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
        )
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(
            open(output_audio_file, 'wb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width
        )
    else:
        audio_sink = audio_helpers.SoundDeviceStream(
                sample_rate=audio_sample_rate,
                sample_width=audio_sample_width,
                block_size=audio_block_size,
                flush_size=audio_flush_size
        )
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info("Using device model %s and device id %s",
                             device_model_id,
                             device_id)
        except Exception as e:
            logging.warning('Device config not found: %s' % e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = (
                'https://%s/v1alpha2/projects/%s/devices' % (api_endpoint,
                                                             project_id)
            )
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials
            )
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    with SampleAssistant(lang, device_model_id, device_id,
                         conversation_stream,
                         grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            print("[joe debug]")
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')
            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break
def main(api_endpoint, credentials, project_id, device_model_id, device_id,
         device_config, lang, display, verbose, input_audio_file,
         output_audio_file, audio_sample_rate, audio_sample_width,
         audio_iter_size, audio_block_size, audio_flush_size, grpc_deadline,
         once, *args, **kwargs):
    """Samples for the Google Assistant API.

      Examples:
        Run the sample with microphone input and speaker output:

          $ python -m googlesamples.assistant

        Run the sample with file input and speaker output:

          $ python -m googlesamples.assistant -i <input file>

        Run the sample with file input and output:

          $ python -m googlesamples.assistant -i <input file> -o <output file>
      """
    # Setup logging.
    logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)

    # Load OAuth 2.0 credentials.
    try:
        with open(credentials, 'r') as f:
            credentials = google.oauth2.credentials.Credentials(token=None,
                                                                **json.load(f))
            http_request = google.auth.transport.requests.Request()
            credentials.refresh(http_request)
    except Exception as e:
        logging.error('Error loading credentials: %s', e)
        logging.error('Run google-oauthlib-tool to initialize '
                      'new OAuth 2.0 credentials.')
        sys.exit(-1)

    # Create an authorized gRPC channel.
    grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
        credentials, http_request, api_endpoint)
    logging.info('Connecting to %s', api_endpoint)

    # Configure audio source and sink.
    audio_device = None
    if input_audio_file:
        audio_source = audio_helpers.WaveSource(
            open(input_audio_file, 'rb'),
            sample_rate=audio_sample_rate,
            sample_width=audio_sample_width)
    else:
        audio_source = audio_device = (audio_device
                                       or audio_helpers.SoundDeviceStream(
                                           sample_rate=audio_sample_rate,
                                           sample_width=audio_sample_width,
                                           block_size=audio_block_size,
                                           flush_size=audio_flush_size))
    if output_audio_file:
        audio_sink = audio_helpers.WaveSink(open(output_audio_file, 'wb'),
                                            sample_rate=audio_sample_rate,
                                            sample_width=audio_sample_width)
    else:
        audio_sink = audio_device = (audio_device
                                     or audio_helpers.SoundDeviceStream(
                                         sample_rate=audio_sample_rate,
                                         sample_width=audio_sample_width,
                                         block_size=audio_block_size,
                                         flush_size=audio_flush_size))
    # Create conversation stream with the given audio source and sink.
    conversation_stream = audio_helpers.ConversationStream(
        source=audio_source,
        sink=audio_sink,
        iter_size=audio_iter_size,
        sample_width=audio_sample_width,
    )

    if not device_id or not device_model_id:
        try:
            with open(device_config) as f:
                device = json.load(f)
                device_id = device['id']
                device_model_id = device['model_id']
                logging.info('Using device model %s and device id %s',
                             device_model_id, device_id)
        except Exception as e:
            logging.warning('Device config not found: %s', e)
            logging.info('Registering device')
            if not device_model_id:
                logging.error('Option --device-model-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            if not project_id:
                logging.error('Option --project-id required '
                              'when registering a device instance.')
                sys.exit(-1)
            device_base_url = ('https://%s/v1alpha2/projects/%s/devices' %
                               (api_endpoint, project_id))
            device_id = str(uuid.uuid1())
            payload = {
                'id': device_id,
                'model_id': device_model_id,
                'client_type': 'SDK_SERVICE'
            }
            session = google.auth.transport.requests.AuthorizedSession(
                credentials)
            r = session.post(device_base_url, data=json.dumps(payload))
            if r.status_code != 200:
                logging.error('Failed to register device: %s', r.text)
                sys.exit(-1)
            logging.info('Device registered: %s', device_id)
            pathlib.Path(os.path.dirname(device_config)).mkdir(exist_ok=True)
            with open(device_config, 'w') as f:
                json.dump(payload, f)

    device_handler = device_helpers.DeviceRequestHandler(device_id)

    yutube_html_template = """
    <!DOCTYPE html>
      <html>
        <body>
           <iframe id="player" type="text/html" width="640" height="390"
            src="http://www.youtube.com/embed/%s?enablejsapi=1&modestbranding=1&autohide=1&mute=1&showinfo=0&controls=0&autoplay=1"
            frameborder="0"></iframe>
        </body>"""
    api_key = ""  # TODO
    custom_search = ""  # TODO
    image_search = ImageSearch(api_key, custom_search)
    youtube = YouTubeSearchService(api_key)

    @device_handler.command('action.devices.commands.OnOff')
    def onoff(on):
        if on:
            logging.info('Turning device on')
        else:
            logging.info('Turning device off')

    @device_handler.command('com.example.commands.BlinkLight')
    def blink(speed, number):
        logging.info('Blinking device %s times.', number)
        delay = 1
        if speed == 'SLOWLY':
            delay = 2
        elif speed == 'QUICKLY':
            delay = 0.5
        for i in range(int(number)):
            logging.info('Device is blinking.')
            time.sleep(delay)

    with SampleAssistant(lang, device_model_id, device_id, conversation_stream,
                         display, grpc_channel, grpc_deadline,
                         device_handler) as assistant:
        # If file arguments are supplied:
        # exit after the first turn of the conversation.
        if input_audio_file or output_audio_file:
            assistant.assist()
            return

        # If no file arguments supplied:
        # keep recording voice requests using the microphone
        # and playing back assistant response using the speaker.
        # When the once flag is set, don't wait for a trigger. Otherwise, wait.
        wait_for_user_trigger = not once
        while True:
            if wait_for_user_trigger:
                click.pause(info='Press Enter to send a new request...')

            # After stopping the hotwordAssistant in assist() method it needs to
            # be recreated.
            hotwordAssistant = HotwordAssistant(
                device_model_id=device_model_id)
            hotwordAssistant.assist()

            continue_conversation = assistant.assist()
            # wait for user trigger if there is no follow-up turn in
            # the conversation.
            wait_for_user_trigger = not continue_conversation

            # If we only want one conversation, break.
            if once and (not continue_conversation):
                break