コード例 #1
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code, conversation_state=b'')
            if self.conversation_state:
                dialog_state_in.conversation_state = self.conversation_state
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='MP3',
                    sample_rate_hertz=16000,
                    volume_percentage=100,
                ),
                dialog_state_in=dialog_state_in,
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        display_text = None
        response_audio_data = b''
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.audio_out.audio_data:
                response_audio_data += resp.audio_out.audio_data

            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text

        audio_url = save_and_upload(response_audio_data)

        text_response = display_text
        if text_response == None:
            logging.info('Response text is None')
            text_response = error_msg

        session_attributes = {}
        card_title = text_query
        # speech_output=text_response
        # reprompt_text=text_response
        should_end_session = False

        ssml_output = "<speak><audio src='" + audio_url + "' /></speak>"

        return build_response(
            session_attributes,
            build_speechlet_response(card_title, ssml_output, text_response,
                                     should_end_session))
コード例 #2
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            dialog_state_in = embedded_assistant_pb2.DialogStateIn(
                language_code=self.language_code, conversation_state=b'')
            if self.conversation_state:
                dialog_state_in.conversation_state = self.conversation_state
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=80,
                ),
                dialog_state_in=dialog_state_in,
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            req = embedded_assistant_pb2.AssistRequest(config=config,
                                                       audio_in=None)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        def normalize_audio(buf, volume_percentage):
            scale = math.pow(2, 1.0 * volume_percentage / 100) - 1
            arr = array.array('h', buf)
            for idx in range(0, len(arr)):
                arr[idx] = int(arr[idx] * scale)
            buf = arr.tostring()
            return buf

        def getVolume():
            f = open(
                "/home/pi/.config/google-assistant-library/assistant/volume/system",
                "r")
            volume = f.readline()
            volume = int(float(volume) * 100)
            return volume

        display_text = None
        buffer = b''
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.audio_out.audio_data:
                if len(resp.audio_out.audio_data) > 0:
                    buffer = buffer + resp.audio_out.audio_data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                display_text = resp.dialog_state_out.supplemental_display_text
        buffer = normalize_audio(buffer, getVolume())
        audio.play_audio(buffer)
        return display_text
コード例 #3
0
    def assist(self, text_query):
        global s
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=50,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query="オウム返し " + text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = None
        html_response = None

        bytes = 0
        if not s is None:
            s = None
        s = sd.RawStream(
            samplerate=audio_helpers.DEFAULT_AUDIO_SAMPLE_RATE,
            dtype='int16',
            channels=1,
            blocksize=audio_helpers.DEFAULT_AUDIO_DEVICE_BLOCK_SIZE)
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            bytes += len(resp.audio_out.audio_data)
            s.write(resp.audio_out.audio_data)
            s.start()
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response, html_response
    def assist(self):
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        #logging.info('Recording audio request.')
        msg('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')
            msg('Reached end of AssistRequest iteration.')

        done = False;
        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.

        phrase = ''

        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                msg('End of audio request detected.')
                msg('Stopping recording.')
                self.conversation_stream.stop_recording()
                done = True;
            if resp.speech_results and done:
                phrase = ''
                for r in resp.speech_results:
                    phrase = phrase + r.transcript
                phrase = phrase.replace(' ', '').strip()
                if phrase == 'エディターを起動' or phrase == 'エディタを起動':
                    subprocess.Popen(['/usr/bin/leafpad'])
                    self.conversation_stream.stop_recording()
                    return True
                elif phrase == '終了':
                    print ('**** END ****')
                    self.conversation_stream.stop_recording()
                    return False
                else:
                    phrase += '。'
                    time.sleep(0.1)
                    clip.copy(phrase)
                    clipboard_text = clip.paste() 
                    pyautogui.hotkey('ctrl', 'v')
                    self.conversation_stream.stop_recording()
                    return True

        if phrase == '':
            self.conversation_stream.stop_recording()
            return True
コード例 #5
0
    def assist(self, text):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests(text):
            yield self.gen_assist_requests(text)

        text_response = None
        for resp in self.assistant.Assist(iter_assist_requests(text),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response
コード例 #6
0
    def assist(self, text_query):
        """Send a text request to the Assistant and playback the response.
        """
        def iter_assist_requests():
            config = embedded_assistant_pb2.AssistConfig(
                audio_out_config=embedded_assistant_pb2.AudioOutConfig(
                    encoding='LINEAR16',
                    sample_rate_hertz=16000,
                    volume_percentage=0,
                ),
                dialog_state_in=embedded_assistant_pb2.DialogStateIn(
                    language_code=self.language_code,
                    conversation_state=self.conversation_state,
                    is_new_conversation=self.is_new_conversation,
                ),
                device_config=embedded_assistant_pb2.DeviceConfig(
                    device_id=self.device_id,
                    device_model_id=self.device_model_id,
                ),
                text_query=text_query,
            )
            # Continue current conversation with later requests.
            self.is_new_conversation = False
            if self.display:
                config.screen_out_config.screen_mode = PLAYING
            req = embedded_assistant_pb2.AssistRequest(config=config)
            assistant_helpers.log_assist_request_without_audio(req)
            yield req

        text_response = None
        html_response = None
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.screen_out.data:
                html_response = resp.screen_out.data
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                self.conversation_state = conversation_state
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text
        return text_response, html_response
コード例 #7
0
    def assist(self, text_query=None, language_code='en-US'):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        text_response = None
        continue_conversation = False
        give_audio = True
        device_actions_futures = []
        self.language_code = language_code

        def iter_log_assist_requests():
            for c in self.gen_assist_requests(text_query):
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        if text_query is None:
            self.conversation_stream.start_recording()
            logger.info('Recording audio request.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)

            if text_query is None:
                if resp.event_type == self.END_OF_UTTERANCE:
                    logger.info('End of audio request detected.')
                    logger.info('Stopping recording.')
                    self.conversation_stream.stop_recording()
                if resp.speech_results:
                    logger.info(
                        'Transcript of user request: "%s".',
                        ' '.join(r.transcript for r in resp.speech_results))
            if resp.dialog_state_out.supplemental_display_text:
                text_response = resp.dialog_state_out.supplemental_display_text

            if text_response is not None:
                if len(text_response.split()) >= 3:
                    if text_response.split()[len(text_response.split()) -
                                             1] == 'RecognizeMe':
                        give_audio = False

            if len(resp.audio_out.audio_data) > 0 and give_audio:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logger.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logger.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == self.DIALOG_FOLLOW_ON:
                continue_conversation = True
                logger.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == self.CLOSE_MICROPHONE:
                continue_conversation = False
            # if resp.device_action.device_request_json:
            #     device_request = json.loads(
            #         resp.device_action.device_request_json
            #     )
            #     fs = self.device_handler(device_request)
            #     if fs:
            #         device_actions_futures.extend(fs)
            # if self.display and resp.screen_out.data:
            #     system_browser = browser_helpers.system_browser
            #     system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logger.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logger.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()

        if text_response:
            logger.info(text_response)

        return continue_conversation, text_response
コード例 #8
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(
            ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"],
            stdin=subprocess.PIPE,
            stdout=subprocess.PIPE,
            stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        #Uncomment the following after starting the Kodi
        #status=mutevolstatus()
        #vollevel=status[1]
        #with open('/home/pi/.volume.json', 'w') as f:
        #json.dump(vollevel, f)
        #kodi.Application.SetVolume({"volume": 0})
        GPIO.output(5, GPIO.HIGH)
        led.ChangeDutyCycle(100)
        if ismpvplaying():
            if os.path.isfile("/home/pi/.mediavolume.json"):
                mpvsetvol = os.system(
                    "echo '" +
                    json.dumps({"command": ["set_property", "volume", "10"]}) +
                    "' | socat - /tmp/mpvsocket")
            else:
                mpvgetvol = subprocess.Popen(
                    [("echo '" +
                      json.dumps({"command": ["get_property", "volume"]}) +
                      "' | socat - /tmp/mpvsocket")],
                    shell=True,
                    stdout=subprocess.PIPE)
                output = mpvgetvol.communicate()[0]
                for currntvol in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                        json.dump(currntvol, vol)
                mpvsetvol = os.system(
                    "echo '" +
                    json.dumps({"command": ["set_property", "volume", "10"]}) +
                    "' | socat - /tmp/mpvsocket")

        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                GPIO.output(5, GPIO.LOW)
                led.ChangeDutyCycle(0)
                self.conversation_stream.stop_recording()

            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))

                for r in resp.speech_results:
                    usercommand = str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd = str(usercommand).lower()
                    idx = usrcmd.find('stability')
                    usrcmd = usrcmd[:idx]
                    usrcmd = usrcmd.replace("stability", "", 1)
                    usrcmd = usrcmd.strip()
                    usrcmd = usrcmd.replace('transcript: "', '', 1)
                    usrcmd = usrcmd.replace('"', '', 1)
                    usrcmd = usrcmd.strip()
                    print(str(usrcmd))
                    if 'trigger'.lower() in str(usrcmd).lower():
                        Action(str(usrcmd).lower())
                        return continue_conversation
                    if 'stream'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile(
                                "/home/pi/GassistPi/src/trackchange.py"):
                            os.system(
                                'rm /home/pi/GassistPi/src/trackchange.py')
                            os.system(
                                'echo "from actions import youtubeplayer\n\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            os.system(
                                'echo "youtubeplayer()\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        else:
                            os.system(
                                'echo "from actions import youtubeplayer\n\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            os.system(
                                'echo "youtubeplayer()\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        return continue_conversation
                    if 'stop'.lower() in str(usrcmd).lower():
                        stop()
                        return continue_conversation
                    if 'radio'.lower() in str(usrcmd).lower():
                        radio(str(usrcmd).lower())
                        return continue_conversation
                    if 'wireless'.lower() in str(usrcmd).lower():
                        ESP(str(usrcmd).lower())
                        return continue_conversation
                    if 'parcel'.lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower(
                    ) in str(usrcmd).lower() or 'quote'.lower() in str(
                            usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if 'on kodi'.lower() in str(usrcmd).lower():
                        kodiactions(str(usrcmd).lower())
                        return continue_conversation
                    if 'chromecast'.lower() in str(usrcmd).lower():
                        if 'play'.lower() in str(usrcmd).lower():
                            chromecast_play_video(str(usrcmd).lower())
                        else:
                            chromecast_control(usrcmd)
                        return continue_conversation
                    if 'pause music'.lower() in str(usrcmd).lower(
                    ) or 'resume music'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'pause music'.lower() in str(usrcmd).lower():
                                playstatus = os.system("echo '" + json.dumps({
                                    "command": ["set_property", "pause", True]
                                }) + "' | socat - /tmp/mpvsocket")
                            elif 'resume music'.lower() in str(usrcmd).lower():
                                playstatus = os.system("echo '" + json.dumps({
                                    "command":
                                    ["set_property", "pause", False]
                                }) + "' | socat - /tmp/mpvsocket")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if 'music volume'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'set'.lower() in str(usrcmd).lower(
                            ) or 'change'.lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower(
                                ) or 'maximum' in str(usrcmd).lower():
                                    settingvollevel = 100
                                    with open('/home/pi/.mediavolume.json',
                                              'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                elif 'zero'.lower() in str(usrcmd).lower(
                                ) or 'minimum' in str(usrcmd).lower():
                                    settingvollevel = 0
                                    with open('/home/pi/.mediavolume.json',
                                              'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                else:
                                    for settingvollevel in re.findall(
                                            r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open('/home/pi/.mediavolume.json',
                                                  'w') as vol:
                                            json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                            elif 'increase'.lower() in str(usrcmd).lower(
                            ) or 'decrease'.lower() in str(usrcmd).lower(
                            ) or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        "/home/pi/.mediavolume.json"):
                                    with open('/home/pi/.mediavolume.json',
                                              'r') as vol:
                                        oldvollevel = json.load(vol)
                                        for oldvollevel in re.findall(
                                                r'\b\d+\b', str(oldvollevel)):
                                            oldvollevel = int(oldvollevel)
                                else:
                                    mpvgetvol = subprocess.Popen(
                                        [("echo '" + json.dumps({
                                            "command":
                                            ["get_property", "volume"]
                                        }) + "' | socat - /tmp/mpvsocket")],
                                        shell=True,
                                        stdout=subprocess.PIPE)
                                    output = mpvgetvol.communicate()[0]
                                    for oldvollevel in re.findall(
                                            r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel = int(oldvollevel)

                                if 'increase'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit()
                                           for char in str(usrcmd)):
                                        for changevollevel in re.findall(
                                                r'\b\d+\b', str(usrcmd)):
                                            changevollevel = int(
                                                changevollevel)
                                    else:
                                        changevollevel = 10
                                    newvollevel = oldvollevel + changevollevel
                                    print(newvollevel)
                                    if newvollevel > 100:
                                        settingvollevel == 100
                                    elif newvollevel < 0:
                                        settingvollevel == 0
                                    else:
                                        settingvollevel = newvollevel
                                    with open('/home/pi/.mediavolume.json',
                                              'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                if 'decrease'.lower() in str(usrcmd).lower(
                                ) or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit()
                                           for char in str(usrcmd)):
                                        for changevollevel in re.findall(
                                                r'\b\d+\b', str(usrcmd)):
                                            changevollevel = int(
                                                changevollevel)
                                    else:
                                        changevollevel = 10
                                    newvollevel = oldvollevel - changevollevel
                                    print(newvollevel)
                                    if newvollevel > 100:
                                        settingvollevel == 100
                                    elif newvollevel < 0:
                                        settingvollevel == 0
                                    else:
                                        settingvollevel = newvollevel
                                    with open('/home/pi/.mediavolume.json',
                                              'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation

                    if 'refresh'.lower() in str(usrcmd).lower(
                    ) and 'music'.lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if 'google music'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile(
                                "/home/pi/GassistPi/src/trackchange.py"):
                            os.system(
                                'rm /home/pi/GassistPi/src/trackchange.py')
                            os.system(
                                'echo "from actions import play_playlist\nfrom actions import play_songs\nfrom actions import play_album\nfrom actions import play_artist\n\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            if 'all the songs'.lower() in str(usrcmd).lower():
                                os.system(
                                    'echo "play_songs()\n" >> /home/pi/GassistPi/src/trackchange.py'
                                )
                                say("Playing all your songs")
                                play_songs()

                            if 'playlist'.lower() in str(usrcmd).lower():
                                if 'first'.lower() in str(usrcmd).lower(
                                ) or 'one'.lower() in str(usrcmd).lower(
                                ) or '1'.lower() in str(usrcmd).lower():
                                    os.system(
                                        'echo "play_playlist(0)\n" >> /home/pi/GassistPi/src/trackchange.py'
                                    )
                                    say("Playing songs from your playlist")
                                    play_playlist(0)
                                else:
                                    say("Sorry I am unable to help")

                            if 'album'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        "/home/pi/.gmusicalbumplayer.json"):
                                    os.system(
                                        "rm /home/pi/.gmusicalbumplayer.json")

                                req = str(usrcmd).lower()
                                idx = (req).find('album')
                                album = req[idx:]
                                album = album.replace("'}", "", 1)
                                album = album.replace('album', '', 1)
                                if 'from'.lower() in req:
                                    album = album.replace('from', '', 1)
                                    album = album.replace(
                                        'google music', '', 1)
                                else:
                                    album = album.replace(
                                        'google music', '', 1)

                                album = album.strip()
                                print(album)
                                albumstr = ('"' + album + '"')
                                f = open(
                                    '/home/pi/GassistPi/src/trackchange.py',
                                    'a+')
                                f.write('play_album(' + albumstr + ')')
                                f.close()
                                say("Looking for songs from the album")
                                play_album(album)

                            if 'artist'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        "/home/pi/.gmusicartistplayer.json"):
                                    os.system(
                                        "rm /home/pi/.gmusicartistplayer.json")

                                req = str(usrcmd).lower()
                                idx = (req).find('artist')
                                artist = req[idx:]
                                artist = artist.replace("'}", "", 1)
                                artist = artist.replace('artist', '', 1)
                                if 'from'.lower() in req:
                                    artist = artist.replace('from', '', 1)
                                    artist = artist.replace(
                                        'google music', '', 1)
                                else:
                                    artist = artist.replace(
                                        'google music', '', 1)

                                artist = artist.strip()
                                print(artist)
                                artiststr = ('"' + artist + '"')
                                f = open(
                                    '/home/pi/GassistPi/src/trackchange.py',
                                    'a+')
                                f.write('play_artist(' + artiststr + ')')
                                f.close()
                                say("Looking for songs rendered by the artist")
                                play_artist(artist)
                        else:
                            os.system(
                                'echo "from actions import play_playlist\nfrom actions import play_songs\nfrom actions import play_album\nfrom actions import play_artist\n\n" >> /home/pi/GassistPi/src/trackchange.py'
                            )
                            if 'all the songs'.lower() in str(usrcmd).lower():
                                os.system(
                                    'echo "play_songs()\n" >> /home/pi/GassistPi/src/trackchange.py'
                                )
                                say("Playing all your songs")
                                play_songs()

                            if 'playlist'.lower() in str(usrcmd).lower():
                                if 'first'.lower() in str(usrcmd).lower(
                                ) or 'one'.lower() in str(usrcmd).lower(
                                ) or '1'.lower() in str(usrcmd).lower():
                                    os.system(
                                        'echo "play_playlist(0)\n" >> /home/pi/GassistPi/src/trackchange.py'
                                    )
                                    say("Playing songs from your playlist")
                                    play_playlist(0)
                                else:
                                    say("Sorry I am unable to help")

                            if 'album'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        "/home/pi/.gmusicalbumplayer.json"):
                                    os.system(
                                        "rm /home/pi/.gmusicalbumplayer.json")

                                req = str(usrcmd).lower()
                                idx = (req).find('album')
                                album = req[idx:]
                                album = album.replace("'}", "", 1)
                                album = album.replace('album', '', 1)
                                if 'from'.lower() in req:
                                    album = album.replace('from', '', 1)
                                    album = album.replace(
                                        'google music', '', 1)
                                else:
                                    album = album.replace(
                                        'google music', '', 1)

                                album = album.strip()
                                print(album)
                                albumstr = ('"' + album + '"')
                                f = open(
                                    '/home/pi/GassistPi/src/trackchange.py',
                                    'a+')
                                f.write('play_album(' + albumstr + ')')
                                f.close()
                                say("Looking for songs from the album")
                                play_album(album)

                            if 'artist'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        "/home/pi/.gmusicartistplayer.json"):
                                    os.system(
                                        "rm /home/pi/.gmusicartistplayer.json")

                                req = str(usrcmd).lower()
                                idx = (req).find('artist')
                                artist = req[idx:]
                                artist = artist.replace("'}", "", 1)
                                artist = artist.replace('artist', '', 1)
                                if 'from'.lower() in req:
                                    artist = artist.replace('from', '', 1)
                                    artist = artist.replace(
                                        'google music', '', 1)
                                else:
                                    artist = artist.replace(
                                        'google music', '', 1)

                                artist = artist.strip()
                                print(artist)
                                artiststr = ('"' + artist + '"')
                                f = open(
                                    '/home/pi/GassistPi/src/trackchange.py',
                                    'a+')
                                f.write('play_artist(' + artiststr + ')')
                                f.close()
                                say("Looking for songs rendered by the artist")
                                play_artist(artist)
                        return continue_conversation

                else:
                    continue
                GPIO.output(5, GPIO.LOW)
                GPIO.output(6, GPIO.HIGH)
                led.ChangeDutyCycle(50)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                GPIO.output(6, GPIO.LOW)
                GPIO.output(5, GPIO.HIGH)
                led.ChangeDutyCycle(100)
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        GPIO.output(6, GPIO.LOW)
        GPIO.output(5, GPIO.LOW)
        led.ChangeDutyCycle(0)
        #Uncomment the following, after starting Kodi
        #with open('/home/pi/.volume.json', 'r') as f:
        #vollevel = json.load(f)
        #kodi.Application.SetVolume({"volume": vollevel})
        if ismpvplaying():
            if os.path.isfile("/home/pi/.mediavolume.json"):
                with open('/home/pi/.mediavolume.json', 'r') as vol:
                    oldvollevel = json.load(vol)
                print(oldvollevel)
                mpvsetvol = os.system("echo '" + json.dumps(
                    {"command": ["set_property", "volume",
                                 str(oldvollevel)]}) +
                                      "' | socat - /tmp/mpvsocket")
        self.conversation_stream.stop_playback()
        return continue_conversation
コード例 #9
0
    def assist(self):
        # Configure audio source and sink.
        self.audio_device = None
        self.audio_source = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        self.audio_sink = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=self.audio_source,
            sink=self.audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width)
        restart = False
        continue_dialog = True
        try:
            while continue_dialog:
                continue_dialog = False
                self.conversation_stream.start_recording()
                self.logger.info('Recording audio request.')

                def iter_converse_requests():
                    for c in self.gen_converse_requests():
                        assistant_helpers.log_assist_request_without_audio(c)
                        yield c
                    self.conversation_stream.start_playback()

                # This generator yields ConverseResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Converse(iter_converse_requests(),
                                                    self.grpc_deadline):
                    assistant_helpers.log_assist_response_without_audio(resp)
                    if resp.error.code != code_pb2.OK:
                        self.logger.error('server error: %s',
                                          resp.error.message)
                        break
                    if resp.event_type == END_OF_UTTERANCE:
                        self.logger.info('End of audio request detected')
                        self.conversation_stream.stop_recording()
                    if resp.result.spoken_request_text:
                        self.logger.info('Transcript of user request: "%s".',
                                         resp.result.spoken_request_text)
                        srtxt = resp.result.spoken_request_text
                        if text_run(srtxt):
                            self.logger.info(
                                'Got commnad and run from follow text')
                            self.conversation_stream.stop_playback()
                            break
                        self.logger.info('Playing assistant response.')
                    if len(resp.audio_out.audio_data) > 0:
                        self.conversation_stream.write(
                            resp.audio_out.audio_data)
                    if resp.result.spoken_response_text:
                        self.logger.info(
                            'Transcript of TTS response '
                            '(only populated from IFTTT): "%s".',
                            resp.result.spoken_response_text)
                    if resp.result.conversation_state:
                        self.conversation_state_bytes = resp.result.conversation_state
                    if resp.result.volume_percentage != 0:
                        volume_percentage = resp.result.volume_percentage
                        self.logger.info('Volume should be set to %s%%',
                                         volume_percentage)
                    if resp.result.microphone_mode == DIALOG_FOLLOW_ON:
                        continue_dialog = True
                        self.logger.info(
                            'Expecting follow-on query from user.')
                self.logger.info('Finished playing assistant response.')
                self.conversation_stream.stop_playback()
        except Exception as e:
            snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)
            self._create_assistant()
            self.logger.exception('Skipping because of connection reset')
            restart = True
        try:
            self.conversation_stream.close()
            if restart:
                self.assist()
        except Exception:
            self.logger.error('Failed to close conversation_stream.')
コード例 #10
0
ファイル: pushbutton.py プロジェクト: thigas88/GassistPi
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        if kodicontrol:
            try:
                status=mutevolstatus()
                vollevel=status[1]
                with open('{}/.volume.json'.format(USER_PATH), 'w') as f:
                       json.dump(vollevel, f)
                kodi.Application.SetVolume({"volume": 0})
            except requests.exceptions.ConnectionError:
                print("Kodi TV box not online")
        if GPIOcontrol:
            assistantindicator('listening')
        if vlcplayer.is_vlc_playing():
            if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                try:
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                        volume = json.load(vol)
                    vlcplayer.set_vlc_volume(15)
                except json.decoder.JSONDecodeError:
                    currentvolume=vlcplayer.get_vlc_volume()
                    print(currentvolume)
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                       json.dump(currentvolume, vol)
                    vlcplayer.set_vlc_volume(15)
            else:
                currentvolume=vlcplayer.get_vlc_volume()
                print(currentvolume)
                with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                   json.dump(currentvolume, vol)
                vlcplayer.set_vlc_volume(15)

        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                for r in resp.speech_results:
                    usercommand=str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd=str(usercommand).lower()
                    idx=usrcmd.find('stability')
                    usrcmd=usrcmd[:idx]
                    usrcmd=usrcmd.replace("stability","",1)
                    usrcmd=usrcmd.strip()
                    usrcmd=usrcmd.replace('transcript: "','',1)
                    usrcmd=usrcmd.replace('"','',1)
                    usrcmd=usrcmd.strip()
                    print(str(usrcmd))
                    if configuration['DIYHUE']['DIYHUE_Control']=='Enabled':
                        if os.path.isfile('/opt/hue-emulator/config.json'):
                            with open('/opt/hue-emulator/config.json', 'r') as config:
                                 hueconfig = json.load(config)
                            for i in range(1,len(hueconfig['lights'])+1):
                                try:
                                    if str(hueconfig['lights'][str(i)]['name']).lower() in str(usrcmd).lower():
                                        hue_control(str(usrcmd).lower(),str(i),str(hueconfig['lights_address'][str(i)]['ip']))
                                        return continue_conversation
                                        break
                                except Keyerror:
                                    say('Unable to help, please check your config file')
                    if configuration['Tasmota_devicelist']['Tasmota_Control']=='Enabled':
                        for num, name in enumerate(tasmota_devicelist):
                            if name.lower() in str(usrcmd).lower():
                                tasmota_control(str(usrcmd).lower(), name.lower(),tasmota_deviceip[num])
                                return continue_conversation
                                break
                    if configuration['Conversation']['Conversation_Control']=='Enabled':
                        for i in range(1,numques+1):
                            try:
                                if str(configuration['Conversation']['question'][i][0]).lower() in str(usrcmd).lower():
                                    selectedans=random.sample(configuration['Conversation']['answer'][i],1)
                                    say(selectedans[0])
                                    return continue_conversation
                                    break
                            except Keyerror:
                                say('Please check if the number of questions matches the number of answers')
                    if Domoticz_Device_Control==True and len(domoticz_devices['result'])>0:
                        for i in range(0,len(domoticz_devices['result'])):
                            if str(domoticz_devices['result'][i]['HardwareName']).lower() in str(usrcmd).lower():
                                domoticz_control(i,str(usrcmd).lower(),domoticz_devices['result'][i]['idx'],domoticz_devices['result'][i]['HardwareName'])
                                return continue_conversation
                                break
                    if (custom_action_keyword['Keywords']['Magic_mirror'][0]).lower() in str(usrcmd).lower():
                        try:
                            mmmcommand=str(usrcmd).lower()
                            if 'weather'.lower() in mmmcommand:
                                if 'show'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_3_currentweather")
                                if 'hide'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_3_currentweather")
                            if 'power off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=SHUTDOWN")
                            if 'reboot'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=REBOOT")
                            if 'restart'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=RESTART")
                            if 'display on'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITORON")
                            if 'display off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITOROFF")
                        except requests.exceptions.ConnectionError:
                            say("Magic mirror not online")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Recipe_pushbullet'][0]).lower() in str(usrcmd).lower():
                        ingrequest=str(usrcmd).lower()
                        ingredientsidx=ingrequest.find('for')
                        ingrequest=ingrequest[ingredientsidx:]
                        ingrequest=ingrequest.replace('for',"",1)
                        ingrequest=ingrequest.replace("'}","",1)
                        ingrequest=ingrequest.strip()
                        ingrequest=ingrequest.replace(" ","%20",1)
                        getrecipe(ingrequest)
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Kickstarter_tracking'][0]).lower() in str(usrcmd).lower():
                        kickstarter_tracker(str(usrcmd).lower())
                        return continue_conversation
                    if configuration['Raspberrypi_GPIO_Control']['GPIO_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Pi_GPIO_control'][0]).lower() in str(usrcmd).lower():
                            Action(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['YouTube']['YouTube_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['YouTube_music_stream'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                            return continue_conversation
                    if (custom_action_keyword['Keywords']['Stop_music'][0]).lower() in str(usrcmd).lower():
                        stop()
                    if configuration['Radio_stations']['Radio_Control']=='Enabled':
                        if 'radio'.lower() in str(usrcmd).lower():
                            radio(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['ESP']['ESP_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['ESP_control'][0]).lower() in str(usrcmd).lower():
                            ESP(str(usrcmd).lower())
                            return continue_conversation

                    if (custom_action_keyword['Keywords']['Parcel_tracking'][0]).lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['RSS'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['RSS'][1]).lower() in str(usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if kodicontrol:
                        try:
                            if (custom_action_keyword['Keywords']['Kodi_actions'][0]).lower() in str(usrcmd).lower():
                                kodiactions(str(usrcmd).lower())
                        except requests.exceptions.ConnectionError:
                            say("Kodi TV box not online")
                        return continue_conversation
                    # Google Assistant now comes built in with chromecast control, so custom function has been commented
                    # if 'chromecast'.lower() in str(usrcmd).lower():
                    #     if 'play'.lower() in str(usrcmd).lower():
                    #         chromecast_play_video(str(usrcmd).lower())
                    #     else:
                    #         chromecast_control(usrcmd)
                    #     return continue_conversation
                    if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing():
                            if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower():
                                vlcplayer.pause_vlc()
                        if checkvlcpaused():
                            if (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower():
                                vlcplayer.play_vlc()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Track_change']['Next'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][2]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing() or checkvlcpaused()==True:
                            vlcplayer.stop_vlc()
                            vlcplayer.change_media_next()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Track_change']['Previous'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][2]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing() or checkvlcpaused()==True:
                            vlcplayer.stop_vlc()
                            vlcplayer.change_media_previous()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['VLC_music_volume'][0]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing()==True or checkvlcpaused()==True:
                            if (custom_action_keyword['Dict']['Set']).lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Change'].lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Maximum'] in str(usrcmd).lower():
                                    settingvollevel=100
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                elif 'zero'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Minimum'] in str(usrcmd).lower():
                                    settingvollevel=0
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                else:
                                    for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                            json.dump(settingvollevel, vol)
                                print('Setting volume to: '+str(settingvollevel))
                                vlcplayer.set_vlc_volume(int(settingvollevel))
                            elif custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                                    try:
                                        with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                                            oldvollevel = json.load(vol)
                                            for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)):
                                                oldvollevel=int(oldvollevel)
                                    except json.decoder.JSONDecodeError:
                                        oldvollevel=vlcplayer.get_vlc_volume
                                        for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                            oldvollevel=int(oldvollevel)
                                else:
                                    oldvollevel=vlcplayer.get_vlc_volume
                                    for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel=int(oldvollevel)
                                if custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel+ changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel=100
                                    elif int(newvollevel)<0:
                                        settingvollevel=0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                                if custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel - changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel=100
                                    elif int(newvollevel)<0:
                                        settingvollevel=0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Music_index_refresh'][0]).lower() in str(usrcmd).lower() and (custom_action_keyword['Keywords']['Music_index_refresh'][1]).lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if configuration['Gmusicapi']['Gmusic_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Google_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            gmusicselect(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Spotify']['Spotify_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Spotify_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            spotify_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Gaana']['Gaana_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Gaana_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            gaana_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Deezer']['Deezer_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Deezer_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            deezer_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    else:
                        continue
                if GPIOcontrol:
                    assistantindicator('speaking')

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                if GPIOcontrol:
                    assistantindicator('listening')
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                if GPIOcontrol:
                    assistantindicator('off')
                if kodicontrol:
                    try:
                        with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                            vollevel = json.load(f)
                            kodi.Application.SetVolume({"volume": vollevel})
                    except requests.exceptions.ConnectionError:
                        print("Kodi TV box not online")

                if vlcplayer.is_vlc_playing():
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                        oldvolume= json.load(vol)
                    vlcplayer.set_vlc_volume(int(oldvolume))
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
        if GPIOcontrol:
            assistantindicator('off')
        if kodicontrol:
            try:
                with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                    vollevel = json.load(f)
                    kodi.Application.SetVolume({"volume": vollevel})
            except requests.exceptions.ConnectionError:
                print("Kodi TV box not online")

        if vlcplayer.is_vlc_playing():
            with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                oldvolume= json.load(vol)
            vlcplayer.set_vlc_volume(int(oldvolume))
コード例 #11
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        play_audio_file(resources['fb'])
        self.conversation_stream.start_recording()
        #Uncomment the following after starting the Kodi
        #status=mutevolstatus()
        #vollevel=status[1]
        #with open(os.path.expanduser('~/.volume.json'), 'w') as f:
        #json.dump(vollevel, f)
        #kodi.Application.SetVolume({"volume": 0})
        if GPIO != None:
            GPIO.output(5, GPIO.HIGH)
            led.ChangeDutyCycle(100)
        if ismpvplaying():
            if os.path.isfile(os.path.expanduser("~/.mediavolume.json")):
                mpvsetvol = os.system(
                    "echo '" +
                    json.dumps({"command": ["set_property", "volume", "10"]}) +
                    "' | socat - /tmp/mpvsocket")
            else:
                mpvgetvol = subprocess.Popen(
                    [("echo '" +
                      json.dumps({"command": ["get_property", "volume"]}) +
                      "' | socat - /tmp/mpvsocket")],
                    shell=True,
                    stdout=subprocess.PIPE)
                output = mpvgetvol.communicate()[0]
                for currntvol in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                    with open(os.path.expanduser('~/.mediavolume.json'),
                              'w') as vol:
                        json.dump(currntvol, vol)
                mpvsetvol = os.system(
                    "echo '" +
                    json.dumps({"command": ["set_property", "volume", "10"]}) +
                    "' | socat - /tmp/mpvsocket")

        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                if GPIO != None:
                    GPIO.output(5, GPIO.LOW)
                    led.ChangeDutyCycle(0)
                self.conversation_stream.stop_recording()
                print('Full Speech Result ' + str(resp.speech_results))
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))

                for r in resp.speech_results:
                    usercommand = str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd = str(usercommand).lower()
                    idx = usrcmd.find('stability')
                    usrcmd = usrcmd[:idx]
                    usrcmd = usrcmd.replace("stability", "", 1)
                    usrcmd = usrcmd.strip()
                    usrcmd = usrcmd.replace('transcript: "', '', 1)
                    usrcmd = usrcmd.replace('"', '', 1)
                    usrcmd = usrcmd.strip()
                    for item in tasmota_devicelist:
                        if item['friendly-name'] in str(usrcmd).lower():
                            tasmota_control(str(usrcmd).lower(), item)
                            return continue_conversation
                            break
                    with open('{}/src/diyHue/config.json'.format(ROOT_PATH),
                              'r') as config:
                        hueconfig = json.load(config)
                    for i in range(1, len(hueconfig['lights']) + 1):
                        try:
                            if str(hueconfig['lights'][str(i)]
                                   ['name']).lower() in str(usrcmd).lower():
                                assistant.stop_conversation()
                                hue_control(
                                    str(usrcmd).lower(), str(i),
                                    str(hueconfig['lights_address'][str(i)]
                                        ['ip']))
                                break
                        except Keyerror:
                            say('Unable to help, please check your config file'
                                )

                    if 'magic mirror'.lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        try:
                            mmmcommand = str(usrcmd).lower()
                            if 'weather'.lower() in mmmcommand:
                                if 'show'.lower() in mmmcommand:
                                    mmreq_one = requests.get(
                                        "http://" + mmmip +
                                        ":8080/remote?action=SHOW&module=module_2_currentweather"
                                    )
                                    mmreq_two = requests.get(
                                        "http://" + mmmip +
                                        ":8080/remote?action=SHOW&module=module_3_currentweather"
                                    )
                                if 'hide'.lower() in mmmcommand:
                                    mmreq_one = requests.get(
                                        "http://" + mmmip +
                                        ":8080/remote?action=HIDE&module=module_2_currentweather"
                                    )
                                    mmreq_two = requests.get(
                                        "http://" + mmmip +
                                        ":8080/remote?action=HIDE&module=module_3_currentweather"
                                    )
                            if 'power off'.lower() in mmmcommand:
                                mmreq = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=SHUTDOWN")
                            if 'reboot'.lower() in mmmcommand:
                                mmreq = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=REBOOT")
                            if 'restart'.lower() in mmmcommand:
                                mmreq = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=RESTART")
                            if 'display on'.lower() in mmmcommand:
                                mmreq = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=MONITORON")
                            if 'display off'.lower() in mmmcommand:
                                mmreq = requests.get(
                                    "http://" + mmmip +
                                    ":8080/remote?action=MONITOROFF")
                        except requests.exceptions.ConnectionError:
                            say("Magic mirror not online")
                    if 'ingredients'.lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        ingrequest = str(usrcmd).lower()
                        ingredientsidx = ingrequest.find('for')
                        ingrequest = ingrequest[ingredientsidx:]
                        ingrequest = ingrequest.replace('for', "", 1)
                        ingrequest = ingrequest.replace("'}", "", 1)
                        ingrequest = ingrequest.strip()
                        ingrequest = ingrequest.replace(" ", "%20", 1)
                        getrecipe(ingrequest)
                    if 'kickstarter'.lower() in str(usrcmd).lower():
                        assistant.stop_conversation()
                        kickstarter_tracker(str(usrcmd).lower())
                    if 'trigger'.lower() in str(usrcmd).lower():
                        Action(str(usrcmd).lower())
                        return continue_conversation
                    if 'stream'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile(
                                "{}/src/trackchange.py".format(ROOT_PATH)):
                            os.system(
                                'rm {}/src/trackchange.py'.format(ROOT_PATH))
                            os.system(
                                'echo "from actions import youtubeplayer\n\n" >> {}/src/trackchange.py'
                                .format(ROOT_PATH))
                            os.system(
                                'echo "youtubeplayer()\n" >> {}/src/trackchange.py'
                                .format(ROOT_PATH))
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        else:
                            os.system(
                                'echo "from actions import youtubeplayer\n\n" >> {}/src/trackchange.py'
                                .format(ROOT_PATH))
                            os.system(
                                'echo "youtubeplayer()\n" >> {}/src/trackchange.py'
                                .format(ROOT_PATH))
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        return continue_conversation
                    if 'stop'.lower() in str(usrcmd).lower():
                        stop()
                        return continue_conversation
                    if 'radio'.lower() in str(usrcmd).lower():
                        radio(str(usrcmd).lower())
                        return continue_conversation
                    if 'wireless'.lower() in str(usrcmd).lower():
                        ESP(str(usrcmd).lower())
                        return continue_conversation
                    if 'parcel'.lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower(
                    ) in str(usrcmd).lower() or 'quote'.lower() in str(
                            usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if 'on kodi'.lower() in str(usrcmd).lower():
                        kodiactions(str(usrcmd).lower())
                        return continue_conversation
                    if 'chromecast'.lower() in str(usrcmd).lower():
                        if 'play'.lower() in str(usrcmd).lower():
                            chromecast_play_video(str(usrcmd).lower())
                        else:
                            chromecast_control(usrcmd)
                        return continue_conversation
                    if 'pause music'.lower() in str(usrcmd).lower(
                    ) or 'resume music'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'pause music'.lower() in str(usrcmd).lower():
                                playstatus = os.system("echo '" + json.dumps({
                                    "command": ["set_property", "pause", True]
                                }) + "' | socat - /tmp/mpvsocket")
                            elif 'resume music'.lower() in str(usrcmd).lower():
                                playstatus = os.system("echo '" + json.dumps({
                                    "command":
                                    ["set_property", "pause", False]
                                }) + "' | socat - /tmp/mpvsocket")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if 'music volume'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'set'.lower() in str(usrcmd).lower(
                            ) or 'change'.lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower(
                                ) or 'maximum' in str(usrcmd).lower():
                                    settingvollevel = 100
                                    with open(
                                            os.path.expanduser(
                                                '~/.mediavolume.json'),
                                            'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                elif 'zero'.lower() in str(usrcmd).lower(
                                ) or 'minimum' in str(usrcmd).lower():
                                    settingvollevel = 0
                                    with open(
                                            os.path.expanduser(
                                                '~/.mediavolume.json'),
                                            'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                else:
                                    for settingvollevel in re.findall(
                                            r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open(
                                                os.path.expanduser(
                                                    '~/.mediavolume.json'),
                                                'w') as vol:
                                            json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                            elif 'increase'.lower() in str(usrcmd).lower(
                            ) or 'decrease'.lower() in str(usrcmd).lower(
                            ) or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile(
                                        os.path.expanduser(
                                            "~/.mediavolume.json")):
                                    with open(
                                            os.path.expanduser(
                                                '~/.mediavolume.json'),
                                            'r') as vol:
                                        oldvollevel = json.load(vol)
                                        for oldvollevel in re.findall(
                                                r'\b\d+\b', str(oldvollevel)):
                                            oldvollevel = int(oldvollevel)
                                else:
                                    mpvgetvol = subprocess.Popen(
                                        [("echo '" + json.dumps({
                                            "command":
                                            ["get_property", "volume"]
                                        }) + "' | socat - /tmp/mpvsocket")],
                                        shell=True,
                                        stdout=subprocess.PIPE)
                                    output = mpvgetvol.communicate()[0]
                                    for oldvollevel in re.findall(
                                            r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel = int(oldvollevel)

                                if 'increase'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit()
                                           for char in str(usrcmd)):
                                        for changevollevel in re.findall(
                                                r'\b\d+\b', str(usrcmd)):
                                            changevollevel = int(
                                                changevollevel)
                                    else:
                                        changevollevel = 10
                                    newvollevel = oldvollevel + changevollevel
                                    print(newvollevel)
                                    if newvollevel > 100:
                                        settingvollevel == 100
                                    elif newvollevel < 0:
                                        settingvollevel == 0
                                    else:
                                        settingvollevel = newvollevel
                                    with open(
                                            os.path.expanduser(
                                                '~/.mediavolume.json'),
                                            'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                                if 'decrease'.lower() in str(usrcmd).lower(
                                ) or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit()
                                           for char in str(usrcmd)):
                                        for changevollevel in re.findall(
                                                r'\b\d+\b', str(usrcmd)):
                                            changevollevel = int(
                                                changevollevel)
                                    else:
                                        changevollevel = 10
                                    newvollevel = oldvollevel - changevollevel
                                    print(newvollevel)
                                    if newvollevel > 100:
                                        settingvollevel == 100
                                    elif newvollevel < 0:
                                        settingvollevel == 0
                                    else:
                                        settingvollevel = newvollevel
                                    with open(
                                            os.path.expanduser(
                                                '~/.mediavolume.json'),
                                            'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol = os.system(
                                        "echo '" + json.dumps({
                                            "command": [
                                                "set_property", "volume",
                                                str(settingvollevel)
                                            ]
                                        }) + "' | socat - /tmp/mpvsocket")
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation

                    if 'refresh'.lower() in str(usrcmd).lower(
                    ) and 'music'.lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if 'google music'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile(
                                "{}/src/trackchange.py".format(ROOT_PATH)):
                            os.system(
                                'rm {}/src/trackchange.py'.format(ROOT_PATH))

                            gmusicselect(str(usrcmd).lower())
                        else:

                            gmusicselect(str(usrcmd).lower())
                        return continue_conversation

                    else:
                        continue
                if GPIO != None:
                    GPIO.output(5, GPIO.LOW)
                    GPIO.output(6, GPIO.HIGH)
                    led.ChangeDutyCycle(50)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                if GPIO != None:
                    GPIO.output(6, GPIO.LOW)
                    GPIO.output(5, GPIO.HIGH)
                    led.ChangeDutyCycle(100)
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:

                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
コード例 #12
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        mytranscript = ' '
        mystability = 0.1
        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.'
                          )  #								!!!!!!!!!!!!!

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.'
                             )  #											!!!!!!!!!!!!!
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                for r in resp.speech_results:
                    #logging.info('Transcript of user request: "%s" %f',r.transcript,r.stability)
                    mytranscript = r.transcript
                    mystability = r.stability
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.'
                                 )  #									!!!!!!!!!!!!!!Play response
                    logging.info('My user request: "%s" %f', mytranscript,
                                 mystability)
                    if "сколько" in mytranscript.lower():
                        #Do your voice local actions here
                        logging.info('Найдено "сколько", завершаемся')
                        self.conversation_stream.stop_playback()
                        return False
                self.conversation_stream.write(
                    resp.audio_out.audio_data
                )  # occurs many times while answering
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
コード例 #13
0
    def start(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        recognition_result = ""
        answer = ""
        device_actions_futures = []

        self.conversation_stream.start_recording()
        print('... Recording audio request ...')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            # print('Reached end of AssistRequest iteration.')

        def get_result(response):
            # type: (google.assistant.embedded.v1alpha2.embedded_assistant_pb2.AssistResponse) -> Recognition_Result
            words = ""
            for word in response.speech_results:
                words += word.transcript
            return words

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)

            if resp.speech_results:
                recognition_result = get_result(resp)
                if self.is_debug:
                    print("[Debug] %s" % recognition_result)

            if resp.dialog_state_out.supplemental_display_text:
                answer = resp.dialog_state_out.supplemental_display_text
                if self.is_debug:
                    print("[Speech] [ %s ]\n" % recognition_result)
                    print("[Answer] [ %s ]\n" % answer)
                if not self.is_answer:
                    self.conversation_stream.stop_recording()
                    return recognition_result, answer

            # ユーザー発話終了
            if resp.event_type == END_OF_UTTERANCE:
                if not self.is_debug and not self.is_answer:
                    return recognition_result, answer

                # print('End of audio request detected.')
                # print('Stopping recording.')

                # if not self.is_answer:
                #    self.conversation_stream.stop_recording()
                #    return recognition_result

            # アシスタントからの返答再生
            if self.is_answer:
                if len(resp.audio_out.audio_data) > 0:
                    if not self.conversation_stream.playing:
                        self.conversation_stream.stop_recording()
                        self.conversation_stream.start_playback()
                        # print('Playing assistant response.')
                    # 音声再生部分
                    self.conversation_stream.write(resp.audio_out.audio_data)

            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                print('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                print('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                print('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            #print('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        print('Finished playing assistant response.')
        if self.conversation_stream.playing:
            self.conversation_stream.stop_playback()
        return recognition_result, answer
コード例 #14
0
ファイル: pushbutton.py プロジェクト: ascillato/GassistPi
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        #Uncomment the following after starting the Kodi
        #status=mutevolstatus()
        #vollevel=status[1]
        #with open('/home/pi/.volume.json', 'w') as f:
               #json.dump(vollevel, f)
        #kodi.Application.SetVolume({"volume": 0})
        GPIO.output(5,GPIO.HIGH)
        led.ChangeDutyCycle(100)
        if ismpvplaying():
            if os.path.isfile("/home/pi/.mediavolume.json"):
                mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume","10"]})+"' | socat - /tmp/mpvsocket")
            else:
                mpvgetvol=subprocess.Popen([("echo '"+json.dumps({ "command": ["get_property", "volume"]})+"' | socat - /tmp/mpvsocket")],shell=True, stdout=subprocess.PIPE)
                output=mpvgetvol.communicate()[0]
                for currntvol in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                        json.dump(currntvol, vol)
                mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume","10"]})+"' | socat - /tmp/mpvsocket")

        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                GPIO.output(5,GPIO.LOW)
                led.ChangeDutyCycle(0)
                self.conversation_stream.stop_recording()

            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))

                for r in resp.speech_results:
                    usercommand=str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd=str(usercommand).lower()
                    idx=usrcmd.find('stability')
                    usrcmd=usrcmd[:idx]
                    usrcmd=usrcmd.replace("stability","",1)
                    usrcmd=usrcmd.strip()
                    usrcmd=usrcmd.replace('transcript: "','',1)
                    usrcmd=usrcmd.replace('"','',1)
                    usrcmd=usrcmd.strip()
                    print(str(usrcmd))
                    if 'trigger'.lower() in str(usrcmd).lower():
                        Action(str(usrcmd).lower())
                        return continue_conversation
                    if 'stream'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile("/home/pi/GassistPi/src/trackchange.py"):
                            os.system('rm /home/pi/GassistPi/src/trackchange.py')
                            os.system('echo "from actions import youtubeplayer\n\n" >> /home/pi/GassistPi/src/trackchange.py')
                            os.system('echo "youtubeplayer()\n" >> /home/pi/GassistPi/src/trackchange.py')
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        else:
                            os.system('echo "from actions import youtubeplayer\n\n" >> /home/pi/GassistPi/src/trackchange.py')
                            os.system('echo "youtubeplayer()\n" >> /home/pi/GassistPi/src/trackchange.py')
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                        return continue_conversation
                    if 'stop'.lower() in str(usrcmd).lower():
                        stop()
                        return continue_conversation
                    if 'radio'.lower() in str(usrcmd).lower():
                        radio(str(usrcmd).lower())
                        return continue_conversation
                    if 'wireless'.lower() in str(usrcmd).lower():
                        ESP(str(usrcmd).lower())
                        return continue_conversation
                    if 'parcel'.lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if 'on kodi'.lower() in str(usrcmd).lower():
                        kodiactions(str(usrcmd).lower())
                        return continue_conversation
                    if 'chromecast'.lower() in str(usrcmd).lower():
                        if 'play'.lower() in str(usrcmd).lower():
                            chromecast_play_video(str(usrcmd).lower())
                        else:
                            chromecast_control(usrcmd)
                        return continue_conversation
                    if 'pause music'.lower() in str(usrcmd).lower() or 'resume music'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'pause music'.lower() in str(usrcmd).lower():
                                playstatus=os.system("echo '"+json.dumps({ "command": ["set_property", "pause", True]})+"' | socat - /tmp/mpvsocket")
                            elif 'resume music'.lower() in str(usrcmd).lower():
                                playstatus=os.system("echo '"+json.dumps({ "command": ["set_property", "pause", False]})+"' | socat - /tmp/mpvsocket")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if 'music volume'.lower() in str(usrcmd).lower():
                        if ismpvplaying():
                            if 'set'.lower() in str(usrcmd).lower() or 'change'.lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower() or 'maximum' in str(usrcmd).lower():
                                    settingvollevel=100
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(settingvollevel)]})+"' | socat - /tmp/mpvsocket")
                                elif 'zero'.lower() in str(usrcmd).lower() or 'minimum' in str(usrcmd).lower():
                                    settingvollevel=0
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(settingvollevel)]})+"' | socat - /tmp/mpvsocket")
                                else:
                                    for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open('/home/pi/.mediavolume.json', 'w') as vol:
                                            json.dump(settingvollevel, vol)
                                    mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(settingvollevel)]})+"' | socat - /tmp/mpvsocket")
                            elif 'increase'.lower() in str(usrcmd).lower() or 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.mediavolume.json"):
                                    with open('/home/pi/.mediavolume.json', 'r') as vol:
                                        oldvollevel = json.load(vol)
                                        for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)):
                                            oldvollevel=int(oldvollevel)
                                else:
                                    mpvgetvol=subprocess.Popen([("echo '"+json.dumps({ "command": ["get_property", "volume"]})+"' | socat - /tmp/mpvsocket")],shell=True, stdout=subprocess.PIPE)
                                    output=mpvgetvol.communicate()[0]
                                    for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel=int(oldvollevel)

                                if 'increase'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel+ changevollevel
                                    print(newvollevel)
                                    if newvollevel>100:
                                        settingvollevel==100
                                    elif newvollevel<0:
                                        settingvollevel==0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(settingvollevel)]})+"' | socat - /tmp/mpvsocket")
                                if 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel - changevollevel
                                    print(newvollevel)
                                    if newvollevel>100:
                                        settingvollevel==100
                                    elif newvollevel<0:
                                        settingvollevel==0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(settingvollevel)]})+"' | socat - /tmp/mpvsocket")
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation

                    if 'refresh'.lower() in str(usrcmd).lower() and 'music'.lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if 'google music'.lower() in str(usrcmd).lower():
                        os.system('pkill mpv')
                        if os.path.isfile("/home/pi/GassistPi/src/trackchange.py"):
                            os.system('rm /home/pi/GassistPi/src/trackchange.py')
                            os.system('echo "from actions import play_playlist\nfrom actions import play_songs\nfrom actions import play_album\nfrom actions import play_artist\n\n" >> /home/pi/GassistPi/src/trackchange.py')
                            if 'all the songs'.lower() in str(usrcmd).lower():
                                os.system('echo "play_songs()\n" >> /home/pi/GassistPi/src/trackchange.py')
                                say("Playing all your songs")
                                play_songs()

                            if 'playlist'.lower() in str(usrcmd).lower():
                                if 'first'.lower() in str(usrcmd).lower() or 'one'.lower() in str(usrcmd).lower()  or '1'.lower() in str(usrcmd).lower():
                                    os.system('echo "play_playlist(0)\n" >> /home/pi/GassistPi/src/trackchange.py')
                                    say("Playing songs from your playlist")
                                    play_playlist(0)
                                else:
                                    say("Sorry I am unable to help")

                            if 'album'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.gmusicalbumplayer.json"):
                                    os.system("rm /home/pi/.gmusicalbumplayer.json")

                                req=str(usrcmd).lower()
                                idx=(req).find('album')
                                album=req[idx:]
                                album=album.replace("'}", "",1)
                                album = album.replace('album','',1)
                                if 'from'.lower() in req:
                                    album = album.replace('from','',1)
                                    album = album.replace('google music','',1)
                                else:
                                    album = album.replace('google music','',1)

                                album=album.strip()
                                print(album)
                                albumstr=('"'+album+'"')
                                f = open('/home/pi/GassistPi/src/trackchange.py', 'a+')
                                f.write('play_album('+albumstr+')')
                                f.close()
                                say("Looking for songs from the album")
                                play_album(album)

                            if 'artist'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.gmusicartistplayer.json"):
                                    os.system("rm /home/pi/.gmusicartistplayer.json")

                                req=str(usrcmd).lower()
                                idx=(req).find('artist')
                                artist=req[idx:]
                                artist=artist.replace("'}", "",1)
                                artist = artist.replace('artist','',1)
                                if 'from'.lower() in req:
                                    artist = artist.replace('from','',1)
                                    artist = artist.replace('google music','',1)
                                else:
                                    artist = artist.replace('google music','',1)

                                artist=artist.strip()
                                print(artist)
                                artiststr=('"'+artist+'"')
                                f = open('/home/pi/GassistPi/src/trackchange.py', 'a+')
                                f.write('play_artist('+artiststr+')')
                                f.close()
                                say("Looking for songs rendered by the artist")
                                play_artist(artist)
                        else:
                            os.system('echo "from actions import play_playlist\nfrom actions import play_songs\nfrom actions import play_album\nfrom actions import play_artist\n\n" >> /home/pi/GassistPi/src/trackchange.py')
                            if 'all the songs'.lower() in str(usrcmd).lower():
                                os.system('echo "play_songs()\n" >> /home/pi/GassistPi/src/trackchange.py')
                                say("Playing all your songs")
                                play_songs()

                            if 'playlist'.lower() in str(usrcmd).lower():
                                if 'first'.lower() in str(usrcmd).lower() or 'one'.lower() in str(usrcmd).lower()  or '1'.lower() in str(usrcmd).lower():
                                    os.system('echo "play_playlist(0)\n" >> /home/pi/GassistPi/src/trackchange.py')
                                    say("Playing songs from your playlist")
                                    play_playlist(0)
                                else:
                                    say("Sorry I am unable to help")

                            if 'album'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.gmusicalbumplayer.json"):
                                    os.system("rm /home/pi/.gmusicalbumplayer.json")

                                req=str(usrcmd).lower()
                                idx=(req).find('album')
                                album=req[idx:]
                                album=album.replace("'}", "",1)
                                album = album.replace('album','',1)
                                if 'from'.lower() in req:
                                    album = album.replace('from','',1)
                                    album = album.replace('google music','',1)
                                else:
                                    album = album.replace('google music','',1)

                                album=album.strip()
                                print(album)
                                albumstr=('"'+album+'"')
                                f = open('/home/pi/GassistPi/src/trackchange.py', 'a+')
                                f.write('play_album('+albumstr+')')
                                f.close()
                                say("Looking for songs from the album")
                                play_album(album)

                            if 'artist'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.gmusicartistplayer.json"):
                                    os.system("rm /home/pi/.gmusicartistplayer.json")

                                req=str(usrcmd).lower()
                                idx=(req).find('artist')
                                artist=req[idx:]
                                artist=artist.replace("'}", "",1)
                                artist = artist.replace('artist','',1)
                                if 'from'.lower() in req:
                                    artist = artist.replace('from','',1)
                                    artist = artist.replace('google music','',1)
                                else:
                                    artist = artist.replace('google music','',1)

                                artist=artist.strip()
                                print(artist)
                                artiststr=('"'+artist+'"')
                                f = open('/home/pi/GassistPi/src/trackchange.py', 'a+')
                                f.write('play_artist('+artiststr+')')
                                f.close()
                                say("Looking for songs rendered by the artist")
                                play_artist(artist)
                        return continue_conversation

                else:
                    continue
                GPIO.output(5,GPIO.LOW)
                GPIO.output(6,GPIO.HIGH)
                led.ChangeDutyCycle(50)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                GPIO.output(6,GPIO.LOW)
                GPIO.output(5,GPIO.HIGH)
                led.ChangeDutyCycle(100)
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        GPIO.output(6,GPIO.LOW)
        GPIO.output(5,GPIO.LOW)
        led.ChangeDutyCycle(0)
        #Uncomment the following, after starting Kodi
        #with open('/home/pi/.volume.json', 'r') as f:
               #vollevel = json.load(f)
               #kodi.Application.SetVolume({"volume": vollevel})
        if ismpvplaying():
            if os.path.isfile("/home/pi/.mediavolume.json"):
                with open('/home/pi/.mediavolume.json', 'r') as vol:
                    oldvollevel = json.load(vol)
                print(oldvollevel)
                mpvsetvol=os.system("echo '"+json.dumps({ "command": ["set_property", "volume",str(oldvollevel)]})+"' | socat - /tmp/mpvsocket")
        self.conversation_stream.stop_playback()
        return continue_conversation
コード例 #15
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        # drain audio
        self.conversation_stream.stop_recording()
        self.conversation_stream.start_recording()

        logging.info('Recording audio request.')

        self.event_queue.put('on_listen')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.info('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                # if self.conversation_stream.recording:
                # self.conversation_stream.stop_recording()
                self.event_queue.put('on_think')
            if resp.speech_results:
                logging.info(
                    'Transcript of user request: "%s".',
                    ' '.join(r.transcript for r in resp.speech_results))
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    # turn off capture device to playback
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    self.event_queue.put('on_speak')
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
                if not resp.dialog_state_out.volume_percentage:
                    self.event_queue.put('on_speak')
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
                time.sleep(
                    1
                )  # it's weird if remove this delay, PortAudioError will raise
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        if self.conversation_stream.playing:
            self.conversation_stream.stop_playback()

        # capture device should always be opened when not playing
        self.conversation_stream.start_recording()
        if not continue_conversation:
            logging.info('Complete conversation.')
            self.event_queue.put('on_idle')
        else:
            logging.info('Continue conversation.')
        return continue_conversation
コード例 #16
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        #Uncomment the following after starting the Kodi
        #status=mutevolstatus()
        #vollevel=status[1]
        #with open('/home/pi/.volume.json', 'w') as f:
               #json.dump(vollevel, f)
        #kodi.Application.SetVolume({"volume": 0})
        GPIO.output(5,GPIO.HIGH)
        led.ChangeDutyCycle(100)
        logging.info('Recording audio request.')

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                GPIO.output(5,GPIO.LOW)
                led.ChangeDutyCycle(0)
                self.conversation_stream.stop_recording()
                print('Full Speech Result '+str(resp.speech_results))
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                usrcmd=resp.speech_results
                print(str(usrcmd))
                if 'trigger' in str(usrcmd).lower():
                    Action(str(usrcmd).lower())
                    return continue_conversation
                if 'stream'.lower() in str(usrcmd).lower():
                    track=str(usrcmd).lower()
                    idx=track.find('stability')
                    track=track[:idx]
                    track=track.replace("stability","",1)
                    track=track.strip()
                    idx=track.find('stream')
                    track=track[:idx]
                    track=track.replace("stream","",1)
                    track=track.replace("","",1)
                    track=("stream " + track)
                    track=track.replace('[transcript: "','',1)
                    track=track.strip()
                    print(track)
                    YouTube_No_Autoplay(track)
                    return continue_conversation
                if 'stop'.lower() in str(usrcmd).lower():
                    stop()
                    return continue_conversation
                if 'tune into'.lower() in str(usrcmd).lower():
                    radio(str(usrcmd).lower())
                    return continue_conversation
                if 'wireless'.lower() in str(usrcmd).lower():
                    ESP(str(usrcmd).lower())
                    return continue_conversation
                if 'parcel'.lower() in str(usrcmd).lower():
                    track()
                    return continue_conversation
                if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower():
                    feed(str(usrcmd).lower())
                    return continue_conversation
                if 'on kodi'.lower() in str(usrcmd).lower():
                    kodiactions(str(usrcmd).lower())
                    return continue_conversation
                else:
                    continue
                GPIO.output(5,GPIO.LOW)
                GPIO.output(6,GPIO.HIGH)
                led.ChangeDutyCycle(50)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                GPIO.output(6,GPIO.LOW)
                GPIO.output(5,GPIO.HIGH)
                led.ChangeDutyCycle(100)
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        GPIO.output(6,GPIO.LOW)
        GPIO.output(5,GPIO.LOW)
        led.ChangeDutyCycle(0)
        #Uncomment the following, after starting Kodi
        #with open('/home/pi/.volume.json', 'r') as f:
               #vollevel = json.load(f)
               #kodi.Application.SetVolume({"volume": vollevel})
        self.conversation_stream.stop_playback()
        return continue_conversation
コード例 #17
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        if self.on_conversation_start:
            self.on_conversation_start()

        self.play_response = True
        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()

                if self.detected_speech and self.on_speech_recognized:
                    self.on_speech_recognized(self.detected_speech)

            if resp.speech_results:
                self.detected_speech = ' '.join(
                    r.transcript.strip() for r in resp.speech_results
                    if len(r.transcript.strip())).strip()

                logging.info('Transcript of user request: "%s".',
                             self.detected_speech)
            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()

                    if self.play_response:
                        self.conversation_stream.start_playback()
                        logging.info('Playing assistant response.')

                if self.play_response and self.conversation_stream.playing:
                    self.conversation_stream.write(resp.audio_out.audio_data)
                elif self.conversation_stream.playing:
                    self.conversation_stream.stop_playback()
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage

                if self.on_volume_changed:
                    self.on_volume_changed(volume_percentage)
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

            if resp.dialog_state_out.supplemental_display_text and self.on_response:
                self.on_response(
                    resp.dialog_state_out.supplemental_display_text)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()

        if self.on_conversation_end:
            self.on_conversation_end(continue_conversation)

        return continue_conversation
コード例 #18
0
ファイル: pushbutton.py プロジェクト: tomarsnap/GassistPi
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        #Uncomment the following after starting the Kodi
        #status=mutevolstatus()
        #vollevel=status[1]
        #with open('/home/pi/.volume.json', 'w') as f:
               #json.dump(vollevel, f)
        #kodi.Application.SetVolume({"volume": 0})
        GPIO.output(5,GPIO.HIGH)
        led.ChangeDutyCycle(100)
        if vlcplayer.is_vlc_playing():
            if os.path.isfile("/home/pi/.mediavolume.json"):
                vlcplayer.set_vlc_volume(15)
            else:
                currentvolume=vlcplayer.get_vlc_volume()
                print(currentvolume)
                with open('/home/pi/.mediavolume.json', 'w') as vol:
                   json.dump(currentvolume, vol)
                vlcplayer.set_vlc_volume(15)

        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                for r in resp.speech_results:
                    usercommand=str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd=str(usercommand).lower()
                    idx=usrcmd.find('stability')
                    usrcmd=usrcmd[:idx]
                    usrcmd=usrcmd.replace("stability","",1)
                    usrcmd=usrcmd.strip()
                    usrcmd=usrcmd.replace('transcript: "','',1)
                    usrcmd=usrcmd.replace('"','',1)
                    usrcmd=usrcmd.strip()
                    print(str(usrcmd))
                    with open('/home/pi/GassistPi/src/diyHue/config.json', 'r') as config:
                         hueconfig = json.load(config)
                    for i in range(1,len(hueconfig['lights'])+1):
                        try:
                            if str(hueconfig['lights'][str(i)]['name']).lower() in str(usrcmd).lower():
                                hue_control(str(usrcmd).lower(),str(i),str(hueconfig['lights_address'][str(i)]['ip']))
                                return continue_conversation
                                break
                        except Keyerror:
                            say('Unable to help, please check your config file')
                    for num, name in enumerate(tasmota_devicelist):
                        if name.lower() in str(usrcmd).lower():
                            tasmota_control(str(usrcmd).lower(), name.lower(),tasmota_deviceip[num])
                            return continue_conversation
                            break
                    if 'magic mirror'.lower() in str(usrcmd).lower():
                        try:
                            mmmcommand=str(usrcmd).lower()
                            if 'weather'.lower() in mmmcommand:
                                if 'show'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_3_currentweather")
                                if 'hide'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_3_currentweather")
                            if 'power off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=SHUTDOWN")
                            if 'reboot'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=REBOOT")
                            if 'restart'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=RESTART")
                            if 'display on'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITORON")
                            if 'display off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITOROFF")
                        except requests.exceptions.ConnectionError:
                            say("Magic mirror not online")
                        return continue_conversation
                    if 'ingredients'.lower() in str(usrcmd).lower():
                        ingrequest=str(usrcmd).lower()
                        ingredientsidx=ingrequest.find('for')
                        ingrequest=ingrequest[ingredientsidx:]
                        ingrequest=ingrequest.replace('for',"",1)
                        ingrequest=ingrequest.replace("'}","",1)
                        ingrequest=ingrequest.strip()
                        ingrequest=ingrequest.replace(" ","%20",1)
                        getrecipe(ingrequest)
                        return continue_conversation
                    if 'kickstarter'.lower() in str(usrcmd).lower():
                        kickstarter_tracker(str(usrcmd).lower())
                        return continue_conversation
                    if 'trigger'.lower() in str(usrcmd).lower():
                        Action(str(usrcmd).lower())
                        return continue_conversation
                    if 'stream'.lower() in str(usrcmd).lower():
                        vlcplayer.stop_vlc()
                        if 'autoplay'.lower() in str(usrcmd).lower():
                            YouTube_Autoplay(str(usrcmd).lower())
                        else:
                            YouTube_No_Autoplay(str(usrcmd).lower())
                        return continue_conversation
                    if 'stop'.lower() in str(usrcmd).lower():
                        stop()
                    if 'radio'.lower() in str(usrcmd).lower():
                        radio(str(usrcmd).lower())
                        return continue_conversation
                    if 'wireless'.lower() in str(usrcmd).lower():
                        ESP(str(usrcmd).lower())
                        return continue_conversation
                    if 'parcel'.lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if 'on kodi'.lower() in str(usrcmd).lower():
                        kodiactions(str(usrcmd).lower())
                        return continue_conversation
                    # Google Assistant now comes built in with chromecast control, so custom function has been commented
                    # if 'chromecast'.lower() in str(usrcmd).lower():
                    #     if 'play'.lower() in str(usrcmd).lower():
                    #         chromecast_play_video(str(usrcmd).lower())
                    #     else:
                    #         chromecast_control(usrcmd)
                    #     return continue_conversation
                    if 'pause music'.lower() in str(usrcmd).lower() or 'resume music'.lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing():
                            if 'pause music'.lower() in str(usrcmd).lower():
                                vlcplayer.pause_vlc()
                        if checkvlcpaused():
                            if 'resume music'.lower() in str(usrcmd).lower():
                                vlcplayer.play_vlc()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if 'music volume'.lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing()==True or checkvlcpaused()==True:
                            if 'set'.lower() in str(usrcmd).lower() or 'change'.lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower() or 'maximum' in str(usrcmd).lower():
                                    settingvollevel=100
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                elif 'zero'.lower() in str(usrcmd).lower() or 'minimum' in str(usrcmd).lower():
                                    settingvollevel=0
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                else:
                                    for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open('/home/pi/.mediavolume.json', 'w') as vol:
                                            json.dump(settingvollevel, vol)
                                print('Setting volume to: '+str(settingvollevel))
                                vlcplayer.set_vlc_volume(int(settingvollevel))
                            elif 'increase'.lower() in str(usrcmd).lower() or 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile("/home/pi/.mediavolume.json"):
                                    with open('/home/pi/.mediavolume.json', 'r') as vol:
                                        oldvollevel = json.load(vol)
                                        for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)):
                                            oldvollevel=int(oldvollevel)
                                else:
                                    oldvollevel=vlcplayer.get_vlc_volume
                                    for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel=int(oldvollevel)
                                if 'increase'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel+ changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel==100
                                    elif int(newvollevel)<0:
                                        settingvollevel==0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                                if 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel - changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel==100
                                    elif int(newvollevel)<0:
                                        settingvollevel==0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('/home/pi/.mediavolume.json', 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if 'refresh'.lower() in str(usrcmd).lower() and 'music'.lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if 'google music'.lower() in str(usrcmd).lower():
                        vlcplayer.stop_vlc()
                        gmusicselect(str(usrcmd).lower())
                        return continue_conversation
                    if 'spotify'.lower() in str(usrcmd).lower():
                        vlcplayer.stop_vlc()
                        spotify_playlist_select(str(usrcmd).lower())
                        return continue_conversation
                    else:
                        continue
                GPIO.output(5,GPIO.LOW)
                GPIO.output(6,GPIO.HIGH)
                led.ChangeDutyCycle(50)

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                GPIO.output(6,GPIO.LOW)
                GPIO.output(5,GPIO.HIGH)
                led.ChangeDutyCycle(100)
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                GPIO.output(6,GPIO.LOW)
                GPIO.output(5,GPIO.LOW)
                led.ChangeDutyCycle(0)
                #Uncomment the following after starting the Kodi
                #with open('/home/pi/.volume.json', 'r') as f:
                       #vollevel = json.load(f)
                       #kodi.Application.SetVolume({"volume": vollevel})
                if vlcplayer.is_vlc_playing():
                    with open('/home/pi/.mediavolume.json', 'r') as vol:
                        oldvolume= json.load(vol)
                    vlcplayer.set_vlc_volume(int(oldvolume))
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
        GPIO.output(6,GPIO.LOW)
        GPIO.output(5,GPIO.LOW)
        led.ChangeDutyCycle(0)
        #Uncomment the following after starting the Kodi
        #with open('/home/pi/.volume.json', 'r') as f:
               #vollevel = json.load(f)
               #kodi.Application.SetVolume({"volume": vollevel})
        if vlcplayer.is_vlc_playing():
            with open('/home/pi/.mediavolume.json', 'r') as vol:
                oldvolume= json.load(vol)
            vlcplayer.set_vlc_volume(int(oldvolume))
コード例 #19
0
ファイル: assistant.py プロジェクト: 09ubberboy90/RoghecV2
    def assist(self, canvas):
        device_actions_futures = []

        # Configure audio source and sink.
        self.audio_device = None
        self.audio_source = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        self.audio_sink = self.audio_device = (
            self.audio_device or audio_helpers.SoundDeviceStream(
                sample_rate=self.audio_sample_rate,
                sample_width=self.audio_sample_width,
                block_size=self.audio_block_size,
                flush_size=self.audio_flush_size))

        # Create conversation stream with the given audio source and sink.
        self.conversation_stream = audio_helpers.ConversationStream(
            source=self.audio_source,
            sink=self.audio_sink,
            iter_size=self.audio_iter_size,
            sample_width=self.audio_sample_width)
        restart = False
        continue_dialog = True
        try:
            while continue_dialog:
                continue_dialog = False
                self.conversation_stream.start_recording()
                self.logger.info('Recording audio request.')

                def iter_log_assist_requests():
                    for c in self.gen_assist_requests():
                        assistant_helpers.log_assist_request_without_audio(c)
                        yield c
                    logging.debug('Reached end of AssistRequest iteration.')

                # This generator yields AssistResponse proto messages
                # received from the gRPC Google Assistant API.
                for resp in self.assistant.Assist(iter_log_assist_requests(),
                                                  self.grpc_deadline):
                    assistant_helpers.log_assist_response_without_audio(resp)
                    if resp.event_type == END_OF_UTTERANCE:
                        logging.info('End of audio request detected.')
                        logging.info('Stopping recording.')
                        self.conversation_stream.stop_recording()
                    if resp.speech_results:
                        mess = ' '.join(r.transcript
                                        for r in resp.speech_results)
                        logging.info('Transcript of user request: "%s".', mess)
                        canvas[1]['text'] = mess
                        if self.once:
                            self.custom_command = google_control.custom_command_handler(
                                mess, canvas)
                    if len(resp.audio_out.audio_data
                           ) > 0 and not self.custom_command:
                        if not self.conversation_stream.playing:
                            self.conversation_stream.stop_recording()
                            self.conversation_stream.start_playback()
                            logging.info('Playing assistant response.')
                        self.conversation_stream.write(
                            resp.audio_out.audio_data)
                    if resp.dialog_state_out.conversation_state:
                        conversation_state = resp.dialog_state_out.conversation_state
                        logging.debug('Updating conversation state.')
                        self.conversation_state = conversation_state
                    if resp.dialog_state_out.volume_percentage != 0:
                        volume_percentage = resp.dialog_state_out.volume_percentage
                        logging.info('Setting volume to %s%%',
                                     volume_percentage)
                        self.conversation_stream.volume_percentage = volume_percentage
                    if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                        continue_conversation = True
                        logging.info('Expecting follow-on query from user.')
                    elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                        continue_conversation = False
                    if resp.device_action.device_request_json:
                        device_request = json.loads(
                            resp.device_action.device_request_json)
                        fs = self.device_handler(device_request)
                        if fs:
                            device_actions_futures.extend(fs)
                    if self.display and resp.screen_out.data and not self.custom_command:
                        system_browser = browser_helpers.system_browser
                        system_browser.display(resp.screen_out.data)
                        self.scrapper(canvas)

                self.logger.info('Finished playing assistant response.')
                self.conversation_stream.stop_playback()
        except Exception as e:
            self._create_assistant()
            self.logger.exception('Skipping because of connection reset')
            restart = True
        try:
            self.conversation_stream.close()
            if restart:
                self.assist()
        except Exception:
            self.logger.error('Failed to close conversation_stream.')
        self.once = True
コード例 #20
0
    def assist(self):
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        #logging.info('Recording audio request.')
        msg('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')
            msg('Reached end of AssistRequest iteration.')

        def iter_log_assist_requests2(str):
            for c in self.gen_assist_requests2(str):
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')
            msg('Reached end of AssistRequest iteration.')

        done = False
        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.

        phrase = ''

        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                msg('End of audio request detected.')
                msg('Stopping recording.')
                self.conversation_stream.stop_recording()
                done = True
            if resp.speech_results and done:
                text = ''
                for r in resp.speech_results:
                    text = text + r.transcript
                text = text.replace(' ', '')

                if phrase == '':
                    phrase = text
                    print('>> ' + phrase)
                    if phrase == '終了':
                        self.conversation_stream.stop_recording()
                        return False
                    phrase = phrase.replace(' ', '、')

        if phrase == '':
            self.conversation_stream.stop_recording()
            return False

        resp_text = ''
        for resp in self.assistant.Assist(iter_log_assist_requests2(phrase),
                                          self.deadline):
            if resp.dialog_state_out.supplemental_display_text != '':
                resp_text = resp_text + resp.dialog_state_out.supplemental_display_text

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    msg('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        msg('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        resp_text = resp_text.replace(' ', '')
        print('<< ' + resp_text)
        return True
コード例 #21
0
ファイル: pushtotalk.py プロジェクト: shineit/platypush
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []

        self.conversation_stream.start_recording()
        logging.info('Recording audio request.')

        if self.on_conversation_start:
            self.on_conversation_start()

        def iter_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            self.conversation_stream.start_playback()

        user_request = None

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == self.END_OF_UTTERANCE:
                logging.info('End of audio request detected')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                user_request = ' '.join(r.transcript
                                        for r in resp.speech_results)

                logging.info('Transcript of user request: "%s".', user_request)
                logging.info('Playing assistant response.')
            if len(resp.audio_out.audio_data) > 0:
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == self.DIALOG_FOLLOW_ON:
                continue_conversation = True
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == self.CLOSE_MICROPHONE:
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json)
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')

        try:
            self.conversation_stream.stop_playback()
        except:
            pass

        if user_request and self.on_speech_recognized:
            self.on_speech_recognized(user_request)

        return (user_request, continue_conversation)