예제 #1
0
 def buttonsinglepress(self):
     if os.path.isfile("{}/.mute".format(USER_PATH)):
         os.system("sudo rm {}/.mute".format(USER_PATH))
         assistantindicator('unmute')
         if configuration['Wakewords']['Ok_Google'] == 'Disabled':
             self.assistant.set_mic_mute(True)
         else:
             self.assistant.set_mic_mute(False)
         # if custom_wakeword:
         #     self.t1.start()
         subprocess.Popen([
             "aplay", "{}/sample-audio-files/Mic-On.wav".format(ROOT_PATH)
         ],
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE)
         print("Turning on the microphone")
     else:
         open('{}/.mute'.format(USER_PATH), 'a').close()
         assistantindicator('mute')
         self.assistant.set_mic_mute(True)
         # if custom_wakeword:
         #     self.thread_end(t1)
         subprocess.Popen([
             "aplay", "{}/sample-audio-files/Mic-Off.wav".format(ROOT_PATH)
         ],
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE)
         print("Turning off the microphone")
 def buttonsinglepress(self):
     if os.path.isfile("{}/.mute".format(USER_PATH)):
         os.system("sudo rm {}/.mute".format(USER_PATH))
         assistantindicator('unmute')
         # if cont.set_mic_mute(Tru)
         # else:
             self.assistant.set_mic_mute(False)
         # if custom_wakeword:
         #     self.t1.start()
         subprocess.Popen(["aplay", "{}/sample-audio-files/Mic-On.wav".format(ROOT_PATH)],
                          stdin=subprocess.PIPE,
                          stdout=subprocess.PIPE,
                          stderr=subprocess.PIPE)
         print("Turning on the microphone")
예제 #3
0
파일: main.py 프로젝트: vfciot/GassistPi
 def buttonsinglepress(self):
     if os.path.isfile("{}/.mute".format(USER_PATH)):
         os.system("sudo rm {}/.mute".format(USER_PATH))
         assistantindicator('unmute')
         if configuration['Wakewords']['Ok_Google']=='Disabled':
             self.assistant.set_mic_mute(True)
             print("Mic is open, but Ok-Google is disabled")
         else:
             self.assistant.set_mic_mute(False)
         # if custom_wakeword:
         #     self.t1.start()
             print("Turning on the microphone")
     else:
         open('{}/.mute'.format(USER_PATH), 'a').close()
         assistantindicator('mute')
         self.assistant.set_mic_mute(True)
         # if custom_wakeword:
         #     self.thread_end(t1)
         print("Turning off the microphone")
 def buttonSinglePress(self):
     if os.path.isfile("/.mute"):
         os.system("sudo rm /.mute")
         assistantindicator('unmute')
         # shivasiddharth/GassistPi...src/main.py
         if configuration['Wakewords']['Ok_Google'] == 'Disabled':
             self.assistant.set_mic_mute(True)
         else:
             self.assistant.set_mic_mute(False)
         if gender == 'Male':
             subprocess.Popen(
                 ['aplay', "/resources/sample-audio-files/Mic-On-Male.wav"],
                 stdin=subprocess.PIPE,
                 stderr=subprocess.PIPE)
         else:
             subprocess.Popen([
                 'aplay', "/resources/sample-audio-files/Mic-On-Female.wav"
             ],
                              stdin=subprocess.PIPE,
                              stderr=subprocess.PIPE)
         print("Turning on the microphone")
     else:
         open('/.mute', 'a').close()
         assistantindicator('mute')
         self.assistant.set_mic_mute(True)
         if gender == 'Male':
             subprocess.Popen(
                 ['aplay', "/resources/sample-audio-files/Mic-On-Male.wav"],
                 stdin=subprocess.PIPE,
                 stderr=subprocess.PIPE)
         else:
             subprocess.Popen([
                 'aplay', "/resources/sample-audio-files/Mic-On-Female.wav"
             ],
                              stdin=subprocess.PIPE,
                              stderr=subprocess.PIPE)
         print("Turning off the microphone")
예제 #5
0
    def assist(self):
        """Send a voice request to the Assistant and playback the response.

        Returns: True if conversation should continue.
        """
        continue_conversation = False
        device_actions_futures = []
        subprocess.Popen(["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        self.conversation_stream.start_recording()
        if kodicontrol:
            try:
                status=mutevolstatus()
                vollevel=status[1]
                with open('{}/.volume.json'.format(USER_PATH), 'w') as f:
                       json.dump(vollevel, f)
                kodi.Application.SetVolume({"volume": 0})
            except requests.exceptions.ConnectionError:
                print("Kodi TV box not online")
        if GPIOcontrol:
            assistantindicator('listening')
        if vlcplayer.is_vlc_playing():
            if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                try:
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                        volume = json.load(vol)
                    vlcplayer.set_vlc_volume(15)
                except json.decoder.JSONDecodeError:
                    currentvolume=vlcplayer.get_vlc_volume()
                    print(currentvolume)
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                       json.dump(currentvolume, vol)
                    vlcplayer.set_vlc_volume(15)
            else:
                currentvolume=vlcplayer.get_vlc_volume()
                print(currentvolume)
                with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                   json.dump(currentvolume, vol)
                vlcplayer.set_vlc_volume(15)

        logging.info('Recording audio request.')

        def iter_log_assist_requests():
            for c in self.gen_assist_requests():
                assistant_helpers.log_assist_request_without_audio(c)
                yield c
            logging.debug('Reached end of AssistRequest iteration.')

        # This generator yields AssistResponse proto messages
        # received from the gRPC Google Assistant API.
        for resp in self.assistant.Assist(iter_log_assist_requests(),
                                          self.deadline):
            assistant_helpers.log_assist_response_without_audio(resp)
            if resp.event_type == END_OF_UTTERANCE:
                logging.info('End of audio request detected.')
                logging.info('Stopping recording.')
                self.conversation_stream.stop_recording()
            if resp.speech_results:
                logging.info('Transcript of user request: "%s".',
                             ' '.join(r.transcript
                                      for r in resp.speech_results))
                for r in resp.speech_results:
                    usercommand=str(r)

                if "stability: 1.0" in usercommand.lower():
                    usrcmd=str(usercommand).lower()
                    idx=usrcmd.find('stability')
                    usrcmd=usrcmd[:idx]
                    usrcmd=usrcmd.replace("stability","",1)
                    usrcmd=usrcmd.strip()
                    usrcmd=usrcmd.replace('transcript: "','',1)
                    usrcmd=usrcmd.replace('"','',1)
                    usrcmd=usrcmd.strip()
                    print(str(usrcmd))
                    if configuration['DIYHUE']['DIYHUE_Control']=='Enabled':
                        if os.path.isfile('/opt/hue-emulator/config.json'):
                            with open('/opt/hue-emulator/config.json', 'r') as config:
                                 hueconfig = json.load(config)
                            for i in range(1,len(hueconfig['lights'])+1):
                                try:
                                    if str(hueconfig['lights'][str(i)]['name']).lower() in str(usrcmd).lower():
                                        hue_control(str(usrcmd).lower(),str(i),str(hueconfig['lights_address'][str(i)]['ip']))
                                        return continue_conversation
                                        break
                                except Keyerror:
                                    say('Unable to help, please check your config file')
                    if configuration['Tasmota_devicelist']['Tasmota_Control']=='Enabled':
                        for num, name in enumerate(tasmota_devicelist):
                            if name.lower() in str(usrcmd).lower():
                                tasmota_control(str(usrcmd).lower(), name.lower(),tasmota_deviceip[num])
                                return continue_conversation
                                break
                    if configuration['Conversation']['Conversation_Control']=='Enabled':
                        for i in range(1,numques+1):
                            try:
                                if str(configuration['Conversation']['question'][i][0]).lower() in str(usrcmd).lower():
                                    selectedans=random.sample(configuration['Conversation']['answer'][i],1)
                                    say(selectedans[0])
                                    return continue_conversation
                                    break
                            except Keyerror:
                                say('Please check if the number of questions matches the number of answers')
                    if Domoticz_Device_Control==True and len(domoticz_devices['result'])>0:
                        for i in range(0,len(domoticz_devices['result'])):
                            if str(domoticz_devices['result'][i]['HardwareName']).lower() in str(usrcmd).lower():
                                domoticz_control(i,str(usrcmd).lower(),domoticz_devices['result'][i]['idx'],domoticz_devices['result'][i]['HardwareName'])
                                return continue_conversation
                                break
                    if (custom_action_keyword['Keywords']['Magic_mirror'][0]).lower() in str(usrcmd).lower():
                        try:
                            mmmcommand=str(usrcmd).lower()
                            if 'weather'.lower() in mmmcommand:
                                if 'show'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_3_currentweather")
                                if 'hide'.lower() in mmmcommand:
                                    mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_2_currentweather")
                                    mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_3_currentweather")
                            if 'power off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=SHUTDOWN")
                            if 'reboot'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=REBOOT")
                            if 'restart'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=RESTART")
                            if 'display on'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITORON")
                            if 'display off'.lower() in mmmcommand:
                                mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITOROFF")
                        except requests.exceptions.ConnectionError:
                            say("Magic mirror not online")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Recipe_pushbullet'][0]).lower() in str(usrcmd).lower():
                        ingrequest=str(usrcmd).lower()
                        ingredientsidx=ingrequest.find('for')
                        ingrequest=ingrequest[ingredientsidx:]
                        ingrequest=ingrequest.replace('for',"",1)
                        ingrequest=ingrequest.replace("'}","",1)
                        ingrequest=ingrequest.strip()
                        ingrequest=ingrequest.replace(" ","%20",1)
                        getrecipe(ingrequest)
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Kickstarter_tracking'][0]).lower() in str(usrcmd).lower():
                        kickstarter_tracker(str(usrcmd).lower())
                        return continue_conversation
                    if configuration['Raspberrypi_GPIO_Control']['GPIO_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Pi_GPIO_control'][0]).lower() in str(usrcmd).lower():
                            Action(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['YouTube']['YouTube_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['YouTube_music_stream'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            if 'autoplay'.lower() in str(usrcmd).lower():
                                YouTube_Autoplay(str(usrcmd).lower())
                            else:
                                YouTube_No_Autoplay(str(usrcmd).lower())
                            return continue_conversation
                    if (custom_action_keyword['Keywords']['Stop_music'][0]).lower() in str(usrcmd).lower():
                        stop()
                    if configuration['Radio_stations']['Radio_Control']=='Enabled':
                        if 'radio'.lower() in str(usrcmd).lower():
                            radio(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['ESP']['ESP_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['ESP_control'][0]).lower() in str(usrcmd).lower():
                            ESP(str(usrcmd).lower())
                            return continue_conversation

                    if (custom_action_keyword['Keywords']['Parcel_tracking'][0]).lower() in str(usrcmd).lower():
                        track()
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['RSS'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['RSS'][1]).lower() in str(usrcmd).lower():
                        feed(str(usrcmd).lower())
                        return continue_conversation
                    if kodicontrol:
                        try:
                            if (custom_action_keyword['Keywords']['Kodi_actions'][0]).lower() in str(usrcmd).lower():
                                kodiactions(str(usrcmd).lower())
                        except requests.exceptions.ConnectionError:
                            say("Kodi TV box not online")
                        return continue_conversation
                    # Google Assistant now comes built in with chromecast control, so custom function has been commented
                    # if 'chromecast'.lower() in str(usrcmd).lower():
                    #     if 'play'.lower() in str(usrcmd).lower():
                    #         chromecast_play_video(str(usrcmd).lower())
                    #     else:
                    #         chromecast_control(usrcmd)
                    #     return continue_conversation
                    if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing():
                            if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower():
                                vlcplayer.pause_vlc()
                        if checkvlcpaused():
                            if (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower():
                                vlcplayer.play_vlc()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Track_change']['Next'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][2]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing() or checkvlcpaused()==True:
                            vlcplayer.stop_vlc()
                            vlcplayer.change_media_next()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Track_change']['Previous'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][2]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing() or checkvlcpaused()==True:
                            vlcplayer.stop_vlc()
                            vlcplayer.change_media_previous()
                        elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['VLC_music_volume'][0]).lower() in str(usrcmd).lower():
                        if vlcplayer.is_vlc_playing()==True or checkvlcpaused()==True:
                            if (custom_action_keyword['Dict']['Set']).lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Change'].lower() in str(usrcmd).lower():
                                if 'hundred'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Maximum'] in str(usrcmd).lower():
                                    settingvollevel=100
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                elif 'zero'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Minimum'] in str(usrcmd).lower():
                                    settingvollevel=0
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                else:
                                    for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)):
                                        with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                            json.dump(settingvollevel, vol)
                                print('Setting volume to: '+str(settingvollevel))
                                vlcplayer.set_vlc_volume(int(settingvollevel))
                            elif custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                                    try:
                                        with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                                            oldvollevel = json.load(vol)
                                            for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)):
                                                oldvollevel=int(oldvollevel)
                                    except json.decoder.JSONDecodeError:
                                        oldvollevel=vlcplayer.get_vlc_volume
                                        for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                            oldvollevel=int(oldvollevel)
                                else:
                                    oldvollevel=vlcplayer.get_vlc_volume
                                    for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)):
                                        oldvollevel=int(oldvollevel)
                                if custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel+ changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel=100
                                    elif int(newvollevel)<0:
                                        settingvollevel=0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                                if custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower():
                                    if any(char.isdigit() for char in str(usrcmd)):
                                        for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)):
                                            changevollevel=int(changevollevel)
                                    else:
                                        changevollevel=10
                                    newvollevel= oldvollevel - changevollevel
                                    print(newvollevel)
                                    if int(newvollevel)>100:
                                        settingvollevel=100
                                    elif int(newvollevel)<0:
                                        settingvollevel=0
                                    else:
                                        settingvollevel=newvollevel
                                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                                        json.dump(settingvollevel, vol)
                                    print('Setting volume to: '+str(settingvollevel))
                                    vlcplayer.set_vlc_volume(int(settingvollevel))
                            else:
                                say("Sorry I could not help you")
                        else:
                            say("Sorry nothing is playing right now")
                        return continue_conversation
                    if (custom_action_keyword['Keywords']['Music_index_refresh'][0]).lower() in str(usrcmd).lower() and (custom_action_keyword['Keywords']['Music_index_refresh'][1]).lower() in str(usrcmd).lower():
                        refreshlists()
                        return continue_conversation
                    if configuration['Gmusicapi']['Gmusic_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Google_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            gmusicselect(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Spotify']['Spotify_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Spotify_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            spotify_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Gaana']['Gaana_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Gaana_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            gaana_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    if configuration['Deezer']['Deezer_Control']=='Enabled':
                        if (custom_action_keyword['Keywords']['Deezer_music_streaming'][0]).lower() in str(usrcmd).lower():
                            vlcplayer.stop_vlc()
                            deezer_playlist_select(str(usrcmd).lower())
                            return continue_conversation
                    else:
                        continue
                if GPIOcontrol:
                    assistantindicator('speaking')

            if len(resp.audio_out.audio_data) > 0:
                if not self.conversation_stream.playing:
                    self.conversation_stream.stop_recording()
                    self.conversation_stream.start_playback()
                    logging.info('Playing assistant response.')
                self.conversation_stream.write(resp.audio_out.audio_data)
            if resp.dialog_state_out.conversation_state:
                conversation_state = resp.dialog_state_out.conversation_state
                logging.debug('Updating conversation state.')
                self.conversation_state = conversation_state
            if resp.dialog_state_out.volume_percentage != 0:
                volume_percentage = resp.dialog_state_out.volume_percentage
                logging.info('Setting volume to %s%%', volume_percentage)
                self.conversation_stream.volume_percentage = volume_percentage
            if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON:
                continue_conversation = True
                if GPIOcontrol:
                    assistantindicator('listening')
                logging.info('Expecting follow-on query from user.')
            elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE:
                if GPIOcontrol:
                    assistantindicator('off')
                if kodicontrol:
                    try:
                        with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                            vollevel = json.load(f)
                            kodi.Application.SetVolume({"volume": vollevel})
                    except requests.exceptions.ConnectionError:
                        print("Kodi TV box not online")

                if vlcplayer.is_vlc_playing():
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                        oldvolume= json.load(vol)
                    vlcplayer.set_vlc_volume(int(oldvolume))
                continue_conversation = False
            if resp.device_action.device_request_json:
                device_request = json.loads(
                    resp.device_action.device_request_json
                )
                fs = self.device_handler(device_request)
                if fs:
                    device_actions_futures.extend(fs)
            if self.display and resp.screen_out.data:
                system_browser = browser_helpers.system_browser
                system_browser.display(resp.screen_out.data)

        if len(device_actions_futures):
            logging.info('Waiting for device executions to complete.')
            concurrent.futures.wait(device_actions_futures)

        logging.info('Finished playing assistant response.')
        self.conversation_stream.stop_playback()
        return continue_conversation
        if GPIOcontrol:
            assistantindicator('off')
        if kodicontrol:
            try:
                with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                    vollevel = json.load(f)
                    kodi.Application.SetVolume({"volume": vollevel})
            except requests.exceptions.ConnectionError:
                print("Kodi TV box not online")

        if vlcplayer.is_vlc_playing():
            with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                oldvolume= json.load(vol)
            vlcplayer.set_vlc_volume(int(oldvolume))
예제 #6
0
    def process_event(self, event):
        """Pretty prints events.
        Prints all events that occur with two spaces between each new
        conversation and a single space between turns of a conversation.
        Args:
            event(event.Event): The current event to process.
        """
        print(event)
        if event.type == EventType.ON_START_FINISHED:
            self.can_start_conversation = True
            self.t2.start()
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if custom_wakeword:
                self.t1.start()

        if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
            self.can_start_conversation = False
            subprocess.Popen(
                ["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)],
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            #Uncomment the following after starting the Kodi
            #status=mutevolstatus()
            #vollevel=status[1]
            #with open('{}/.volume.json'.format(USER_PATH), 'w') as f:
            #json.dump(vollevel, f)
            #kodi.Application.SetVolume({"volume": 0})
            assistantindicator('listening')
            if vlcplayer.is_vlc_playing():
                if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                    vlcplayer.set_vlc_volume(15)
                else:
                    currentvolume = vlcplayer.get_vlc_volume()
                    print(currentvolume)
                    with open('{}/.mediavolume.json'.format(USER_PATH),
                              'w') as vol:
                        json.dump(currentvolume, vol)
                    vlcplayer.set_vlc_volume(15)
            print()

        if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
                or event.type == EventType.ON_NO_RESPONSE):
            self.can_start_conversation = True
            assistantindicator('off')
            #Uncomment the following after starting the Kodi
            #with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
            #vollevel = json.load(f)
            #kodi.Application.SetVolume({"volume": vollevel})
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            if vlcplayer.is_vlc_playing():
                with open('{}/.mediavolume.json'.format(USER_PATH),
                          'r') as vol:
                    oldvolume = json.load(vol)
                vlcplayer.set_vlc_volume(int(oldvolume))

        if (event.type == EventType.ON_RESPONDING_STARTED and event.args
                and not event.args['is_error_response']):
            assistantindicator('speaking')

        if event.type == EventType.ON_RESPONDING_FINISHED:
            assistantindicator('off')

        if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
            assistantindicator('off')

        print(event)

        if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
                and event.args and not event.args['with_follow_on_turn']):
            self.can_start_conversation = True
            assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            #Uncomment the following after starting the Kodi
            #with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
            #vollevel = json.load(f)
            #kodi.Application.SetVolume({"volume": vollevel})
            if vlcplayer.is_vlc_playing():
                with open('{}/.mediavolume.json'.format(USER_PATH),
                          'r') as vol:
                    oldvolume = json.load(vol)
                vlcplayer.set_vlc_volume(int(oldvolume))
            print()

        if event.type == EventType.ON_DEVICE_ACTION:
            for command, params in event.actions:
                print('Do command', command, 'with params', str(params))
        if os.path.isfile("{}/.mute".format(USER_PATH)):
            os.system("sudo rm {}/.mute".format(USER_PATH))
            assistantindicator('unmute')
            # if cont.set_mic_mute(Tru)
            # else:
                self.assistant.set_mic_mute(False)
            # if custom_wakeword:
            #     self.t1.start()
            subprocess.Popen(["aplay", "{}/sample-audio-files/Mic-On.wav".format(ROOT_PATH)],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            print("Turning on the microphone")
        else:
            open('{}/.mute'.format(USER_PATH), 'a').close()
            assistantindicator('mute')
            self.assistant.set_mic_mute(True)
            # if custom_wakeword:
            #     self.thread_end(t1)
            subprocess.Popen(["aplay", "{}/sample-audio-files/Mic-Off.wav".format(ROOT_PATH)],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            print("Turning off the microphone")

    def buttondoublepress(self):
        print('Stopped')
        stop()

    def buttontriplepress(self):
        print("Create your own action for button triple press")
예제 #8
0
    def process_event(self, event):
        """Pretty prints events.
        Prints all events that occur with two spaces between each new
        conversation and a single space between turns of a conversation.
        Args:
            event(event.Event): The current event to process.
        """
        print(event)
        print()
        if event.type == EventType.ON_MUTED_CHANGED:
            self.mutestatus = event.args["is_muted"]

        if event.type == EventType.ON_START_FINISHED:
            self.can_start_conversation = True
            if custom_wakeword:
                self.t1.start()

        if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
            subprocess.Popen(
                ["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)],
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            if GPIOcontrol:
                assistantindicator('listening')
            self.can_start_conversation = False

        if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
                or event.type == EventType.ON_NO_RESPONSE):
            if GPIOcontrol:
                assistantindicator('off')
            self.can_start_conversation = True

        if (event.type == EventType.ON_RESPONDING_STARTED and event.args
                and not event.args['is_error_response']):
            if GPIOcontrol:
                assistantindicator('speaking')
            print(event.args)

        if event.type == EventType.ON_RESPONDING_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')
            print(event.args)

        if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')
            usrcmd = event.args["text"]

        if event.type == EventType.ON_RENDER_RESPONSE:
            if GPIOcontrol:
                assistantindicator('off')
            print(event.args)

        if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
                and event.args and not event.args['with_follow_on_turn']):
            self.can_start_conversation = True
            if GPIOcontrol:
                assistantindicator('off')

        if event.type == EventType.ON_DEVICE_ACTION:
            for command, params in event.actions:
                print('Do command', command, 'with params', str(params))
예제 #9
0
파일: main.py 프로젝트: vfciot/GassistPi
    def process_event(self,event):
        """Pretty prints events.
        Prints all events that occur with two spaces between each new
        conversation and a single space between turns of a conversation.
        Args:
            event(event.Event): The current event to process.
        """
        print(event)
        print()
        if event.type == EventType.ON_MUTED_CHANGED:
            self.mutestatus=event.args["is_muted"]

        if event.type == EventType.ON_START_FINISHED:
            self.can_start_conversation = True
            if GPIOcontrol:
                self.t2.start()
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            if (configuration['Wakewords']['Ok_Google']=='Disabled' or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if custom_wakeword:
                self.t1.start()
            if configuration['MQTT']['MQTT_Control']=='Enabled':
                self.t3.start()
            if irreceiver!=None:
                self.t4.start()
            if configuration['ADAFRUIT_IO']['ADAFRUIT_IO_CONTROL']=='Enabled':
                self.t5.start()

        if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
            subprocess.Popen(["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            self.can_start_conversation = False
            if kodicontrol:
                try:
                    status=mutevolstatus()
                    vollevel=status[1]
                    with open('{}/.volume.json'.format(USER_PATH), 'w') as f:
                           json.dump(vollevel, f)
                    kodi.Application.SetVolume({"volume": 0})
                    kodi.GUI.ShowNotification({"title": "", "message": ".....Listening.....", "image": "{}/GoogleAssistantImages/GoogleAssistantBarsTransparent.gif".format(ROOT_PATH)})
                except requests.exceptions.ConnectionError:
                    print("Kodi TV box not online")

            if GPIOcontrol:
                assistantindicator('listening')
            if vlcplayer.is_vlc_playing():
                if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)):
                    try:
                        with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                            volume = json.load(vol)
                        vlcplayer.set_vlc_volume(15)
                    except json.decoder.JSONDecodeError:
                        currentvolume=vlcplayer.get_vlc_volume()
                        print(currentvolume)
                        with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                           json.dump(currentvolume, vol)
                        vlcplayer.set_vlc_volume(15)
                else:
                    currentvolume=vlcplayer.get_vlc_volume()
                    print(currentvolume)
                    with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol:
                       json.dump(currentvolume, vol)
                    vlcplayer.set_vlc_volume(15)

        if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT or event.type == EventType.ON_NO_RESPONSE):
            self.can_start_conversation = True
            if GPIOcontrol:
                assistantindicator('off')
            if kodicontrol:
                try:
                    with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                           vollevel = json.load(f)
                           kodi.Application.SetVolume({"volume": vollevel})
                except requests.exceptions.ConnectionError:
                    print("Kodi TV box not online")

            if (configuration['Wakewords']['Ok_Google']=='Disabled' or os.path.isfile("{}/.mute".format(USER_PATH))):
                  self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                if GPIOcontrol:
                    assistantindicator('mute')
            if vlcplayer.is_vlc_playing():
                with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                    oldvolume = json.load(vol)
                vlcplayer.set_vlc_volume(int(oldvolume))

        if (event.type == EventType.ON_RESPONDING_STARTED and event.args and not event.args['is_error_response']):
            if GPIOcontrol:
                assistantindicator('speaking')

        if event.type == EventType.ON_RESPONDING_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')

        if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')
            if self.singleresposne:
                self.assistant.stop_conversation()
                self.singledetectedresponse= event.args["text"]
            else:
                usrcmd=event.args["text"]
                self.custom_command(usrcmd)
                if kodicontrol:
                    try:
                        kodi.GUI.ShowNotification({"title": "", "message": event.args["text"], "image": "{}/GoogleAssistantImages/GoogleAssistantDotsTransparent.gif".format(ROOT_PATH)})
                    except requests.exceptions.ConnectionError:
                        print("Kodi TV box not online")

        if event.type == EventType.ON_RENDER_RESPONSE:
            if GPIOcontrol:
                assistantindicator('off')
            if kodicontrol:
                try:
                    kodi.GUI.ShowNotification({"title": "", "message": event.args["text"], "image": "{}/GoogleAssistantImages/GoogleAssistantTransparent.gif".format(ROOT_PATH),"displaytime": 20000})
                except requests.exceptions.ConnectionError:
                    print("Kodi TV box not online")

        if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and
                event.args and not event.args['with_follow_on_turn']):
            self.can_start_conversation = True
            if GPIOcontrol:
                assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google']=='Disabled' or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                if GPIOcontrol:
                    assistantindicator('mute')
            if kodicontrol:
                try:
                    with open('{}/.volume.json'.format(USER_PATH), 'r') as f:
                        vollevel = json.load(f)
                        kodi.Application.SetVolume({"volume": vollevel})
                except requests.exceptions.ConnectionError:
                    print("Kodi TV box not online")

            if vlcplayer.is_vlc_playing():
                with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol:
                    oldvolume= json.load(vol)
                vlcplayer.set_vlc_volume(int(oldvolume))

        if event.type == EventType.ON_DEVICE_ACTION:
            for command, params in event.actions:
                print('Do command', command, 'with params', str(params))
예제 #10
0
    def process_event(self, event):
        """Pretty prints events.
        Prints all events that occur with two spaces between each new
        conversation and a single space between turns of a conversation.
        Args:
            event(event.Event): The current event to process.
        """
        print(event)
        if event.type == EventType.ON_START_FINISHED:
            self.can_start_conversation = True
            self.t2.start()
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if custom_wakeword:
                self.t1.start()

        if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
            self.can_start_conversation = False
            subprocess.Popen(
                ["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)],
                stdin=subprocess.PIPE,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE)
            assistantindicator('listening')
            print()

        if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
                or event.type == EventType.ON_NO_RESPONSE):
            self.can_start_conversation = True
            assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')

        if (event.type == EventType.ON_RESPONDING_STARTED and event.args
                and not event.args['is_error_response']):
            assistantindicator('speaking')

        if event.type == EventType.ON_RESPONDING_FINISHED:
            assistantindicator('off')

        if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
            assistantindicator('off')

        if event.type == EventType.ON_ASSISTANT_ERROR:
            print('here is an indication')

        print(event)

        if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
                and event.args and not event.args['with_follow_on_turn']):
            self.can_start_conversation = True
            assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("{}/.mute".format(USER_PATH))):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("{}/.mute".format(USER_PATH)):
                assistantindicator('mute')
            print()

        if event.type == EventType.ON_DEVICE_ACTION:
            for command, params in event.actions:
                print('Do command', command, 'with params', str(params))
                if command == 'com.example.commands.AwardPoints':
                    award(params["house"], params["number"])
    def process_event(self, event):
        # Prettyprints events
        # Args: event(event.Event): The current event to process
        print(event)
        if event.type == EventType.ON_START_FINISHED:
            self.can_start_conversation = True
            if GPIOcontrol:
                self.t2.start()
            if os.path.isfile("/.mute"):
                assistantindicator('mute')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("/.mute")):
                self.assistant.set_mic_mute(True)
            if custom_wakeword:
                self.t1.start()

        if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
            self.can_start_conversation = False
            subprocess.Popen(["aplay", "/resources/sample-audio-files/Fb.wav"],
                             stdin=subprocess.PIPE,
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
            if GPIOcontrol:
                assistantindicator('listening')
            print()
        if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT
                or event.type == EventType.ON_NO_RESPONSE):
            self.can_start_conversation = True
            if GPIOcontrol:
                assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("/.mute")):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("/.mute"):
                if GPIOcontrol:
                    assistantindicator('mute')
        if (event.type == EventType.ON_RESPONDING_STARTED and event.args
                and not event.args['is_error_response']):
            if GPIOcontrol:
                assistantindicator('speaking')
        if event.type == EventType.ON_RESPONDING_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')
        if event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED:
            if GPIOcontrol:
                assistantindicator('off')
        print(event)
        if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED
                and event.args and not event.args['with_follow_on_turn']):
            self.can_start_conversation = True
            if GPIOcontrol:
                assistantindicator('off')
            if (configuration['Wakewords']['Ok_Google'] == 'Disabled'
                    or os.path.isfile("/.mute")):
                self.assistant.set_mic_mute(True)
            if os.path.isfile("/.mute"):
                if GPIOcontrol:
                    assistantindicator('mute')
            print()
        if event.type == EventType.ON_DEVICE_ACTION:
            for command, params in event.actions:
                print('Do command', command, 'with params', str(params))