def __init__(self): self.isEnabled = True self.openInven = Action(0, self.openInventory) self.feedBeer = Action(0, self.feedBeerToWorkers) self.last_frame = None self.state = State.BASE
def work(): if not client_status.is_actual_version(): check_update() actions.clear() if client_status.last_fs_upload: nxt = client_status.last_fs_upload + timedelta(seconds=FS_SET_PERIOD) till_next = max(0, total_seconds(nxt - datetime.utcnow())) del nxt else: till_next = 0 actions.add(Action(24 * HOUR, check_system_info, start=2 * MIN)) actions.add(StepAction(FS_SET_PERIOD, update_fs, start=till_next)) actions.extend([ Action(LOG_UPLOAD_PERIOD, upload_log), Action(CHANGES_CHECK_PERIOD, check_changes) ]) if config.database or client_status.database: actions.add(Action(DB_CHECK_PERIOD, check_db, start=7 * MIN)) if client_status.amazon: actions.add(Action(backup.next_date, make_backup)) else: actions.add( OneTimeAction( 5 * MIN, get_s3_access, followers=[ActionSeed(backup.next_date, make_backup)])) if os.path.exists(CRASH_PATH) and os.stat(CRASH_PATH).st_size > 0: actions.add(OneTimeAction(10 * MIN, report_crash, start=0)) try: log.info('Action pool is:\n%s' % '\n'.join([str(a) for a in actions])) except TypeError: msg = ['Failed to iterate actions.'] if isinstance(actions, ActionPool): msg.append('ActionPool object has %s__iter__ method.') msg[-1] %= '' if hasattr(actions, '__iter__') else 'no ' if hasattr(actions, '_actions'): if isinstance(actions._actions, list): if actions._actions: f = ', '.join((a.__name__ for a in actions._funcs())) msg.append('There are functions: %s.' % f) else: msg.append('_actions is empty list.') else: msg.append('_actions is %s.' % type(actions._actions)) else: msg.append('There is no _actions in actions.') else: msg.append('Type of actions is %s' % type(actions)) log.error(' '.join(msg)) log.info('Start main loop') while True: action = actions.next() log.info('Next action is %s' % action) time.sleep(action.time_left()) action()
def path_find(self, x, y): """ "A-star" is probably a cloth brand from some US rap singer... Let's do our own stupid greedy, short-sighted algo instead, right? """ if self.x == x and self.y == y: return Action(ActionType.STOP) direction = None if self.y > y: if not self.blocked(self.x, self.y - 1): direction = Direction.NORTH elif self.y < y: if not self.blocked(self.x, self.y + 1): direction = Direction.SOUTH if direction is None and self.x > x: if not self.blocked(self.x - 1, self.y): direction = Direction.WEST elif direction is None and self.x < x: if not self.blocked(self.x + 1, self.y): direction = Direction.EAST if direction is not None: return Action(ActionType.MOVE, direction) else: self.random_steps = 10 self.random_action = RandomPolicy().action(self.team_id, self.observation) return self.random_action
def handle_keys_choose_option(key): action: [Action, None] = None index = key - 97 if index >= 0: return Action(ActionType.CHOOSE_OPTION, option_index=index) elif key == tcod.event.K_TAB or key == tcod.event.K_ESCAPE: return Action(ActionType.TOGGLE_INVENTORY) return action
def handle_keys_main_menu(key): action: [Action, None] = None if key == tcod.event.K_a: action = Action(ActionType.NEW_GAME) elif key == tcod.event.K_b: action = Action(ActionType.LOAD_GAME) elif key in (tcod.event.K_c, tcod.event.K_ESCAPE): action = Action(ActionType.ESCAPE) return action
def handle_keys_targeting(key): action: [Action, None] = None action = handle_movement_keys(key) if action is None: if key == tcod.event.K_ESCAPE: action = Action(ActionType.ESCAPE) elif key in (tcod.event.K_KP_ENTER, tcod.event.K_SPACE): action = Action(ActionType.EXECUTE) return action
def handle_keys_player_turn(key) -> [Action, None]: action: [Action, None] = None if key == tcod.event.K_g: action = Action(ActionType.GRAB) elif key == tcod.event.K_a: action = Action(ActionType.TOGGLE_INVENTORY) elif key == tcod.event.K_d: action = Action(ActionType.DROP_INVENTORY_ITEM) elif key == tcod.event.K_PERIOD: action = Action(ActionType.WAIT) elif key == tcod.event.K_SPACE: action = Action(ActionType.INTERACT) elif key == tcod.event.K_e: action = Action(ActionType.LOOK) elif key == tcod.event.K_ESCAPE: action = Action(ActionType.ESCAPE) elif key == tcod.event.K_r: action = Action(ActionType.SWAP_APPENDAGE) elif action is None: action = handle_movement_keys(key) # No valid key was pressed return action
def __init__(self, rule_context, rule_state, data, nested_rule): """ @type rule_state: RuleState @type rule_context: RuleContext @type data: matplotlib.pyparsing.ParseResults @type nested_rule: Rule @type parent_rule: Rule """ self.nested_rule = nested_rule self.parent = None if self.nested_rule is None: self.actions = [ Action(action, 'always_fire_actions' in data) for action in data['actions'] ] else: self.actions = [] self.nested_rule.set_parent(self) self.override = "override" in data self.overrideOff = False if self.override and len(data["override"]) == 1: warning = "rule '%s' has override configuration and will turn a possible override state off" self.overrideOff = True __logger__.info(warning % rule_state.rule_name) elif self.override: __logger__.info( "rule '%s' has override configuration and will turn a possible override state on" % rule_state.rule_name) self.rule_state = rule_state self.rule_context = rule_context self.always_fire = False
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--credentials', type=existing_file, metavar='OAUTH2_CREDENTIALS_FILE', default=os.path.join( os.path.expanduser('/home/pi/.config'), 'google-oauthlib-tool', 'credentials.json' ), help='Path to store and read OAuth2 credentials') args = parser.parse_args() with open(args.credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) with Assistant(credentials) as assistant: subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Startup.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) for event in assistant.start(): process_event(event) usrcmd=event.args if 'trigger'.lower() in str(usrcmd).lower(): assistant.stop_conversation() Action(str(usrcmd).lower()) if 'play'.lower() in str(usrcmd).lower(): assistant.stop_conversation() YouTube(str(usrcmd).lower()) if 'stop'.lower() in str(usrcmd).lower(): stop()
def __init__(self, **enemy): self.name = enemy['name'] self.size = enemy['size'] self.type = enemy['type'] self.alignment = enemy['alignment'] self.AC = enemy['ac'] self.armorType = enemy['armorType'] self.hp = enemy['hp'] self.STR = enemy['STR'] self.DEX = enemy['DEX'] self.CON = enemy['CON'] self.INT = enemy['INT'] self.WIS = enemy['WIS'] self.CHA = enemy['CHA'] self.initiative = enemy['initiative'] self.alive = True self.currentstatus = 'Healthy' self.actions = [Action(**i) for i in enemy['actions']] self.max = enemy['maxhp'] self.enemyId = enemy['enemyId'] self.combatId = enemy['combatId'] self.savingThrows = enemy.get('savingThrows', None) self.challenge = enemy['challenge'] self.languages = enemy['languages'] self.senses = enemy['senses'] self.bloodied = enemy['bloodied'] self.damage_vulnerabilities = enemy['damage_vulnerabilities'] self.damage_resistances = enemy['damage_resistances'] self.damage_immunities = enemy['damage_immunities'] self.condition_immunities = enemy['condition_immunities'] if enemy.get('specialTraits', None): self.specialTraits = [ SpecialTraits(**i) for i in enemy['specialTraits'] ] self.actionsText = [ActionsText(**i) for i in enemy['actionsText']]
def set_context(received_context): global context context = received_context context.register_action( Action("screenshot", take_screenshot, menu_name="Take screenshot", description="Takes a screenshot from previous app"))
def handle_keys(key) -> [Action, None]: action: [Action, None] = None if key == tcod.event.K_UP: action = Action(ActionType.MOVEMENT, dx=0, dy=-1) elif key == tcod.event.K_DOWN: action = Action(ActionType.MOVEMENT, dx=0, dy=1) elif key == tcod.event.K_LEFT: action = Action(ActionType.MOVEMENT, dx=-1, dy=0) elif key == tcod.event.K_RIGHT: action = Action(ActionType.MOVEMENT, dx=1, dy=0) elif key == tcod.event.K_ESCAPE: action = Action(ActionType.ESCAPE) # No valid key was pressed return action
def add_action(self, textMessage): cmd = extract_verb(textMessage.lower()) print(cmd) args = textMessage.replace(cmd, '') runner = None if cmd == "fala": runner = TalkRunner() action = Action(runner, args) self.actions.append(action)
def new_message(): message_complete_data = json.loads(request.data) print(message_complete_data) try: action = Action(bot_name, telegram_uri, message_complete_data) action.evaluate() return 'Action has been executed' except: return 'Error excecuting action'
def main(): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--credentials', type=existing_file, metavar='OAUTH2_CREDENTIALS_FILE', default=os.path.join( os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json' ), help='Path to store and read OAuth2 credentials') parser.add_argument('--device_model_id', type=str, metavar='DEVICE_MODEL_ID', required=True, help='The device model ID registered with Google.') parser.add_argument('--project_id', type=str, metavar='PROJECT_ID', required=False, help='The project ID used to register device ' + 'instances.') args = parser.parse_args() with open(args.credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) with Assistant(credentials, args.device_model_id) as assistant: subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Startup.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) events = assistant.start() print('device_model_id:', args.device_model_id + '\n' + 'device_id:', assistant.device_id + '\n') if args.project_id: register_device(args.project_id, credentials, args.device_model_id, assistant.device_id) for event in events: process_event(event, assistant.device_id) usrcmd=event.args if 'trigger'.lower() in str(usrcmd).lower(): assistant.stop_conversation() Action(str(usrcmd).lower()) if 'stream'.lower() in str(usrcmd).lower(): assistant.stop_conversation() YouTube(str(usrcmd).lower()) if 'stop'.lower() in str(usrcmd).lower(): stop() if 'tune into'.lower() in str(usrcmd).lower(): assistant.stop_conversation() radio(str(usrcmd).lower()) if 'wireless'.lower() in str(usrcmd).lower(): assistant.stop_conversation() ESP(str(usrcmd).lower()) if 'parcel'.lower() in str(usrcmd).lower(): assistant.stop_conversation() track() if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower(): assistant.stop_conversation() feed(str(usrcmd).lower()) if 'on kodi'.lower() in str(usrcmd).lower(): assistant.stop_conversation() kodiactions(str(usrcmd).lower())
def prepend_actions(self, menu): """Prepends find specific actions to menu""" toggle_case = Action(self, "Match &case", self.on_toggle_case, checkable=True, statustip='Match case in search') toggle_results = Action(self, "Code and results", self.on_toggle_results, checkable=True, statustip='Search also considers string ' 'representations of result objects.') toggle_up = Action(self, "Search &backward", self.on_toggle_up, checkable=True, statustip='Search fore-/backwards') toggle_word = Action(self, "&Whole words", self.on_toggle_word, checkable=True, statustip='Whole word search') toggle_regexp = Action(self, "Regular expression", self.on_toggle_regexp, checkable=True, statustip='Regular expression search') toggle_case.setChecked(self.case) toggle_results.setChecked(self.results) toggle_up.setChecked(self.up) toggle_word.setChecked(self.word) toggle_regexp.setChecked(self.regexp) actions = (toggle_case, toggle_results, toggle_up, toggle_word, toggle_regexp) menu.insertActions(menu.actions()[0], actions)
def get_stacked_action(self, stacked_state): """Given the current (stacked) game state, determine what action the model will output""" # take a random action epsilon fraction of the time if random.random() < self.epsilon: return get_random_action() # otherwise, take the action which maximizes expected reward else: q_values = self.net( torch.FloatTensor(stacked_state).to(self.device)) action_idx = torch.argmax(q_values).item() return Action(action_idx)
def play_guard(self, player: int, target: int, guess: Cards) -> Action: print('player: {0}, target: {1}, guess: {2}'.format( player, target, guess)) if (target == -1): raise IllegalActionException('Target myst be specified') if (player == target): raise IllegalActionException('Player cannot target themself') if (guess == None): raise IllegalActionException('Player must guess a card') if (guess == Cards.GUARD): raise IllegalActionException('Player cannot guess a guard') return Action(Cards.GUARD, player, target, guess)
def __init__(self): self.isEnabled = False self.reelInFishAction = Action(0, self.reelInFish) self.castLineAction = Action(0, self.castLine) self.classifier = Classifier('./ml/') self.comp_img = cv.imread('resources/fishing_space_bar.jpg', 0) self.window = GameWindow("BLACK DESERT") self.colorGrabber = ColorGrabber() self.outputColors = False self.outputKeys = False self.swapRods = False self.rodCharSeq = '' self.rodSwapThresh = 5 self.collectAll = True self.discardBlue = False self.discardGreen = False self.collectUnknowns = False self.reset()
def __init__(self, type_): f = open(os.path.join("data/enemy_data.json"), "r") data = json.load(f) if type_ not in data.keys(): raise Exception("Sorry, no enemies availible") self.enemy = data[type_] self.type = type_ self.actions = [] #Damage list of two ints, damage will RANGE from the first & second. EX. CSE220 dmg ranges from 5 to 12 self.damage = self.enemy["damage"] self.hp = 100 for action in self.enemy["actions"]: self.actions.append(Action(action))
def get_action(self, state): """Used when playing the actual game""" if random.random() < self.end_epsilon: return get_random_action() else: pp_state = self.preprocess_state(state) self.state_buffer.append(pp_state) stacked_state = np.stack([self.state_buffer]) q_vals = self.net( torch.FloatTensor(stacked_state).to(self.device)).to( self.device) action_idx = torch.argmax(q_vals).item() return Action(action_idx)
def action(self, team_id, observation): """ Chose an action at random, with a bias towards moving """ action_types = [item for item in ActionType] directions = [item for item in Direction] action_type = random.choices(action_types, weights=[10, 100, 1])[0] if action_type == ActionType.MOVE: direction = random.choices(directions)[0] else: direction = None action = Action(action_type, direction) return action
def action(self, team_id, observation): if self.random_steps > 0: # super smart way of avoiding to get stuck ;) self.random_steps -= 1 return self.random_action self.parse(observation, team_id) if self.resources > 0 or self.health <= 2: # return to base _, target_x, target_y = self.find_item("BASE") else: # find some resource resource, target_x, target_y = self.find_item("RESOURCE") if resource is None: return Action(ActionType.STOP) return self.path_find(target_x, target_y)
def handle_movement_keys(key): action: [Action, None] = None if key == tcod.event.K_u: action = Action(ActionType.MOVEMENT, dx=0, dy=-1) elif key == tcod.event.K_j: action = Action(ActionType.MOVEMENT, dx=0, dy=1) elif key == tcod.event.K_k: action = Action(ActionType.MOVEMENT, dx=1, dy=0) elif key == tcod.event.K_h: action = Action(ActionType.MOVEMENT, dx=-1, dy=0) elif key == tcod.event.K_y: action = Action(ActionType.MOVEMENT, dx=-1, dy=-1) elif key == tcod.event.K_i: action = Action(ActionType.MOVEMENT, dx=1, dy=-1) elif key == tcod.event.K_n: action = Action(ActionType.MOVEMENT, dx=-1, dy=1) elif key == tcod.event.K_m: action = Action(ActionType.MOVEMENT, dx=1, dy=1) return action
def create_new_job(self, created_actions: List[ActionToCreate]): """Creates a job that will be performed on the songs """ created_job: List[Action] = [] for index, created_action in enumerate(created_actions): action = Action(created_action, self.job_index, index) action.signals.action_started.connect(self.set_action_length) action.signals.action_progress\ .connect(self.update_action_progress) action.signals.action_finished\ .connect(self.update_action_complete) if action.subaction_signals is not None: action.subaction_signals.subaction_started\ .connect(self.set_subaction_length) action.subaction_signals.subaction_progress\ .connect(self.update_subaction_progress) action.subaction_signals.subaction_finished\ .connect(self.update_subaction_complete) created_job.append(action) self.add_to_current_jobs(created_job)
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] subprocess.Popen(["aplay", "{}/sample-audio-files/Fb.wav".format(ROOT_PATH)], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() if kodicontrol: try: status=mutevolstatus() vollevel=status[1] with open('{}/.volume.json'.format(USER_PATH), 'w') as f: json.dump(vollevel, f) kodi.Application.SetVolume({"volume": 0}) except requests.exceptions.ConnectionError: print("Kodi TV box not online") if GPIOcontrol: assistantindicator('listening') if vlcplayer.is_vlc_playing(): if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)): try: with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol: volume = json.load(vol) vlcplayer.set_vlc_volume(15) except json.decoder.JSONDecodeError: currentvolume=vlcplayer.get_vlc_volume() print(currentvolume) with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(currentvolume, vol) vlcplayer.set_vlc_volume(15) else: currentvolume=vlcplayer.get_vlc_volume() print(currentvolume) with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(currentvolume, vol) vlcplayer.set_vlc_volume(15) logging.info('Recording audio request.') def iter_log_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.') # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected.') logging.info('Stopping recording.') self.conversation_stream.stop_recording() if resp.speech_results: logging.info('Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) for r in resp.speech_results: usercommand=str(r) if "stability: 1.0" in usercommand.lower(): usrcmd=str(usercommand).lower() idx=usrcmd.find('stability') usrcmd=usrcmd[:idx] usrcmd=usrcmd.replace("stability","",1) usrcmd=usrcmd.strip() usrcmd=usrcmd.replace('transcript: "','',1) usrcmd=usrcmd.replace('"','',1) usrcmd=usrcmd.strip() print(str(usrcmd)) if configuration['DIYHUE']['DIYHUE_Control']=='Enabled': if os.path.isfile('/opt/hue-emulator/config.json'): with open('/opt/hue-emulator/config.json', 'r') as config: hueconfig = json.load(config) for i in range(1,len(hueconfig['lights'])+1): try: if str(hueconfig['lights'][str(i)]['name']).lower() in str(usrcmd).lower(): hue_control(str(usrcmd).lower(),str(i),str(hueconfig['lights_address'][str(i)]['ip'])) return continue_conversation break except Keyerror: say('Unable to help, please check your config file') if configuration['Tasmota_devicelist']['Tasmota_Control']=='Enabled': for num, name in enumerate(tasmota_devicelist): if name.lower() in str(usrcmd).lower(): tasmota_control(str(usrcmd).lower(), name.lower(),tasmota_deviceip[num]) return continue_conversation break if configuration['Conversation']['Conversation_Control']=='Enabled': for i in range(1,numques+1): try: if str(configuration['Conversation']['question'][i][0]).lower() in str(usrcmd).lower(): selectedans=random.sample(configuration['Conversation']['answer'][i],1) say(selectedans[0]) return continue_conversation break except Keyerror: say('Please check if the number of questions matches the number of answers') if Domoticz_Device_Control==True and len(domoticz_devices['result'])>0: for i in range(0,len(domoticz_devices['result'])): if str(domoticz_devices['result'][i]['HardwareName']).lower() in str(usrcmd).lower(): domoticz_control(i,str(usrcmd).lower(),domoticz_devices['result'][i]['idx'],domoticz_devices['result'][i]['HardwareName']) return continue_conversation break if (custom_action_keyword['Keywords']['Magic_mirror'][0]).lower() in str(usrcmd).lower(): try: mmmcommand=str(usrcmd).lower() if 'weather'.lower() in mmmcommand: if 'show'.lower() in mmmcommand: mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_2_currentweather") mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_3_currentweather") if 'hide'.lower() in mmmcommand: mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_2_currentweather") mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_3_currentweather") if 'power off'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=SHUTDOWN") if 'reboot'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=REBOOT") if 'restart'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=RESTART") if 'display on'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITORON") if 'display off'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITOROFF") except requests.exceptions.ConnectionError: say("Magic mirror not online") return continue_conversation if (custom_action_keyword['Keywords']['Recipe_pushbullet'][0]).lower() in str(usrcmd).lower(): ingrequest=str(usrcmd).lower() ingredientsidx=ingrequest.find('for') ingrequest=ingrequest[ingredientsidx:] ingrequest=ingrequest.replace('for',"",1) ingrequest=ingrequest.replace("'}","",1) ingrequest=ingrequest.strip() ingrequest=ingrequest.replace(" ","%20",1) getrecipe(ingrequest) return continue_conversation if (custom_action_keyword['Keywords']['Kickstarter_tracking'][0]).lower() in str(usrcmd).lower(): kickstarter_tracker(str(usrcmd).lower()) return continue_conversation if configuration['Raspberrypi_GPIO_Control']['GPIO_Control']=='Enabled': if (custom_action_keyword['Keywords']['Pi_GPIO_control'][0]).lower() in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if configuration['YouTube']['YouTube_Control']=='Enabled': if (custom_action_keyword['Keywords']['YouTube_music_stream'][0]).lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() if 'autoplay'.lower() in str(usrcmd).lower(): YouTube_Autoplay(str(usrcmd).lower()) else: YouTube_No_Autoplay(str(usrcmd).lower()) return continue_conversation if (custom_action_keyword['Keywords']['Stop_music'][0]).lower() in str(usrcmd).lower(): stop() if configuration['Radio_stations']['Radio_Control']=='Enabled': if 'radio'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if configuration['ESP']['ESP_Control']=='Enabled': if (custom_action_keyword['Keywords']['ESP_control'][0]).lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation if (custom_action_keyword['Keywords']['Parcel_tracking'][0]).lower() in str(usrcmd).lower(): track() return continue_conversation if (custom_action_keyword['Keywords']['RSS'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['RSS'][1]).lower() in str(usrcmd).lower(): feed(str(usrcmd).lower()) return continue_conversation if kodicontrol: try: if (custom_action_keyword['Keywords']['Kodi_actions'][0]).lower() in str(usrcmd).lower(): kodiactions(str(usrcmd).lower()) except requests.exceptions.ConnectionError: say("Kodi TV box not online") return continue_conversation # Google Assistant now comes built in with chromecast control, so custom function has been commented # if 'chromecast'.lower() in str(usrcmd).lower(): # if 'play'.lower() in str(usrcmd).lower(): # chromecast_play_video(str(usrcmd).lower()) # else: # chromecast_control(usrcmd) # return continue_conversation if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing(): if (custom_action_keyword['Keywords']['Pause_resume'][0]).lower() in str(usrcmd).lower(): vlcplayer.pause_vlc() if checkvlcpaused(): if (custom_action_keyword['Keywords']['Pause_resume'][1]).lower() in str(usrcmd).lower(): vlcplayer.play_vlc() elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False: say("Sorry nothing is playing right now") return continue_conversation if (custom_action_keyword['Keywords']['Track_change']['Next'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Next'][2]).lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing() or checkvlcpaused()==True: vlcplayer.stop_vlc() vlcplayer.change_media_next() elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False: say("Sorry nothing is playing right now") return continue_conversation if (custom_action_keyword['Keywords']['Track_change']['Previous'][0]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][1]).lower() in str(usrcmd).lower() or (custom_action_keyword['Keywords']['Track_change']['Previous'][2]).lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing() or checkvlcpaused()==True: vlcplayer.stop_vlc() vlcplayer.change_media_previous() elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False: say("Sorry nothing is playing right now") return continue_conversation if (custom_action_keyword['Keywords']['VLC_music_volume'][0]).lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing()==True or checkvlcpaused()==True: if (custom_action_keyword['Dict']['Set']).lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Change'].lower() in str(usrcmd).lower(): if 'hundred'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Maximum'] in str(usrcmd).lower(): settingvollevel=100 with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) elif 'zero'.lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Minimum'] in str(usrcmd).lower(): settingvollevel=0 with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) else: for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)): with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) elif custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower() or custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower(): if os.path.isfile("{}/.mediavolume.json".format(USER_PATH)): try: with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol: oldvollevel = json.load(vol) for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)): oldvollevel=int(oldvollevel) except json.decoder.JSONDecodeError: oldvollevel=vlcplayer.get_vlc_volume for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)): oldvollevel=int(oldvollevel) else: oldvollevel=vlcplayer.get_vlc_volume for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)): oldvollevel=int(oldvollevel) if custom_action_keyword['Dict']['Increase'].lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)): changevollevel=int(changevollevel) else: changevollevel=10 newvollevel= oldvollevel+ changevollevel print(newvollevel) if int(newvollevel)>100: settingvollevel=100 elif int(newvollevel)<0: settingvollevel=0 else: settingvollevel=newvollevel with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) if custom_action_keyword['Dict']['Decrease'].lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)): changevollevel=int(changevollevel) else: changevollevel=10 newvollevel= oldvollevel - changevollevel print(newvollevel) if int(newvollevel)>100: settingvollevel=100 elif int(newvollevel)<0: settingvollevel=0 else: settingvollevel=newvollevel with open('{}/.mediavolume.json'.format(USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) else: say("Sorry I could not help you") else: say("Sorry nothing is playing right now") return continue_conversation if (custom_action_keyword['Keywords']['Music_index_refresh'][0]).lower() in str(usrcmd).lower() and (custom_action_keyword['Keywords']['Music_index_refresh'][1]).lower() in str(usrcmd).lower(): refreshlists() return continue_conversation if configuration['Gmusicapi']['Gmusic_Control']=='Enabled': if (custom_action_keyword['Keywords']['Google_music_streaming'][0]).lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() gmusicselect(str(usrcmd).lower()) return continue_conversation if configuration['Spotify']['Spotify_Control']=='Enabled': if (custom_action_keyword['Keywords']['Spotify_music_streaming'][0]).lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() spotify_playlist_select(str(usrcmd).lower()) return continue_conversation if configuration['Gaana']['Gaana_Control']=='Enabled': if (custom_action_keyword['Keywords']['Gaana_music_streaming'][0]).lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() gaana_playlist_select(str(usrcmd).lower()) return continue_conversation if configuration['Deezer']['Deezer_Control']=='Enabled': if (custom_action_keyword['Keywords']['Deezer_music_streaming'][0]).lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() deezer_playlist_select(str(usrcmd).lower()) return continue_conversation else: continue if GPIOcontrol: assistantindicator('speaking') if len(resp.audio_out.audio_data) > 0: if not self.conversation_stream.playing: self.conversation_stream.stop_recording() self.conversation_stream.start_playback() logging.info('Playing assistant response.') self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True if GPIOcontrol: assistantindicator('listening') logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: if GPIOcontrol: assistantindicator('off') if kodicontrol: try: with open('{}/.volume.json'.format(USER_PATH), 'r') as f: vollevel = json.load(f) kodi.Application.SetVolume({"volume": vollevel}) except requests.exceptions.ConnectionError: print("Kodi TV box not online") if vlcplayer.is_vlc_playing(): with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol: oldvolume= json.load(vol) vlcplayer.set_vlc_volume(int(oldvolume)) continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json ) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if self.display and resp.screen_out.data: system_browser = browser_helpers.system_browser system_browser.display(resp.screen_out.data) if len(device_actions_futures): logging.info('Waiting for device executions to complete.') concurrent.futures.wait(device_actions_futures) logging.info('Finished playing assistant response.') self.conversation_stream.stop_playback() return continue_conversation if GPIOcontrol: assistantindicator('off') if kodicontrol: try: with open('{}/.volume.json'.format(USER_PATH), 'r') as f: vollevel = json.load(f) kodi.Application.SetVolume({"volume": vollevel}) except requests.exceptions.ConnectionError: print("Kodi TV box not online") if vlcplayer.is_vlc_playing(): with open('{}/.mediavolume.json'.format(USER_PATH), 'r') as vol: oldvolume= json.load(vol) vlcplayer.set_vlc_volume(int(oldvolume))
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() #Uncomment the following after starting the Kodi #status=mutevolstatus() #vollevel=status[1] #with open('/home/pi/.volume.json', 'w') as f: #json.dump(vollevel, f) #kodi.Application.SetVolume({"volume": 0}) GPIO.output(5,GPIO.HIGH) led.ChangeDutyCycle(100) if vlcplayer.is_vlc_playing(): if os.path.isfile("/home/pi/.mediavolume.json"): vlcplayer.set_vlc_volume(15) else: currentvolume=vlcplayer.get_vlc_volume() print(currentvolume) with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(currentvolume, vol) vlcplayer.set_vlc_volume(15) logging.info('Recording audio request.') def iter_log_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c logging.debug('Reached end of AssistRequest iteration.') # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_log_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected.') logging.info('Stopping recording.') self.conversation_stream.stop_recording() if resp.speech_results: logging.info('Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) for r in resp.speech_results: usercommand=str(r) if "stability: 1.0" in usercommand.lower(): usrcmd=str(usercommand).lower() idx=usrcmd.find('stability') usrcmd=usrcmd[:idx] usrcmd=usrcmd.replace("stability","",1) usrcmd=usrcmd.strip() usrcmd=usrcmd.replace('transcript: "','',1) usrcmd=usrcmd.replace('"','',1) usrcmd=usrcmd.strip() print(str(usrcmd)) with open('/home/pi/GassistPi/src/diyHue/config.json', 'r') as config: hueconfig = json.load(config) for i in range(1,len(hueconfig['lights'])+1): try: if str(hueconfig['lights'][str(i)]['name']).lower() in str(usrcmd).lower(): hue_control(str(usrcmd).lower(),str(i),str(hueconfig['lights_address'][str(i)]['ip'])) return continue_conversation break except Keyerror: say('Unable to help, please check your config file') for num, name in enumerate(tasmota_devicelist): if name.lower() in str(usrcmd).lower(): tasmota_control(str(usrcmd).lower(), name.lower(),tasmota_deviceip[num]) return continue_conversation break if 'magic mirror'.lower() in str(usrcmd).lower(): try: mmmcommand=str(usrcmd).lower() if 'weather'.lower() in mmmcommand: if 'show'.lower() in mmmcommand: mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_2_currentweather") mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=SHOW&module=module_3_currentweather") if 'hide'.lower() in mmmcommand: mmreq_one=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_2_currentweather") mmreq_two=requests.get("http://"+mmmip+":8080/remote?action=HIDE&module=module_3_currentweather") if 'power off'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=SHUTDOWN") if 'reboot'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=REBOOT") if 'restart'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=RESTART") if 'display on'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITORON") if 'display off'.lower() in mmmcommand: mmreq=requests.get("http://"+mmmip+":8080/remote?action=MONITOROFF") except requests.exceptions.ConnectionError: say("Magic mirror not online") return continue_conversation if 'ingredients'.lower() in str(usrcmd).lower(): ingrequest=str(usrcmd).lower() ingredientsidx=ingrequest.find('for') ingrequest=ingrequest[ingredientsidx:] ingrequest=ingrequest.replace('for',"",1) ingrequest=ingrequest.replace("'}","",1) ingrequest=ingrequest.strip() ingrequest=ingrequest.replace(" ","%20",1) getrecipe(ingrequest) return continue_conversation if 'kickstarter'.lower() in str(usrcmd).lower(): kickstarter_tracker(str(usrcmd).lower()) return continue_conversation if 'trigger'.lower() in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'stream'.lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() if 'autoplay'.lower() in str(usrcmd).lower(): YouTube_Autoplay(str(usrcmd).lower()) else: YouTube_No_Autoplay(str(usrcmd).lower()) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() if 'radio'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation if 'parcel'.lower() in str(usrcmd).lower(): track() return continue_conversation if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower(): feed(str(usrcmd).lower()) return continue_conversation if 'on kodi'.lower() in str(usrcmd).lower(): kodiactions(str(usrcmd).lower()) return continue_conversation # Google Assistant now comes built in with chromecast control, so custom function has been commented # if 'chromecast'.lower() in str(usrcmd).lower(): # if 'play'.lower() in str(usrcmd).lower(): # chromecast_play_video(str(usrcmd).lower()) # else: # chromecast_control(usrcmd) # return continue_conversation if 'pause music'.lower() in str(usrcmd).lower() or 'resume music'.lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing(): if 'pause music'.lower() in str(usrcmd).lower(): vlcplayer.pause_vlc() if checkvlcpaused(): if 'resume music'.lower() in str(usrcmd).lower(): vlcplayer.play_vlc() elif vlcplayer.is_vlc_playing()==False and checkvlcpaused()==False: say("Sorry nothing is playing right now") return continue_conversation if 'music volume'.lower() in str(usrcmd).lower(): if vlcplayer.is_vlc_playing()==True or checkvlcpaused()==True: if 'set'.lower() in str(usrcmd).lower() or 'change'.lower() in str(usrcmd).lower(): if 'hundred'.lower() in str(usrcmd).lower() or 'maximum' in str(usrcmd).lower(): settingvollevel=100 with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(settingvollevel, vol) elif 'zero'.lower() in str(usrcmd).lower() or 'minimum' in str(usrcmd).lower(): settingvollevel=0 with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(settingvollevel, vol) else: for settingvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(usrcmd)): with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) elif 'increase'.lower() in str(usrcmd).lower() or 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower(): if os.path.isfile("/home/pi/.mediavolume.json"): with open('/home/pi/.mediavolume.json', 'r') as vol: oldvollevel = json.load(vol) for oldvollevel in re.findall(r'\b\d+\b', str(oldvollevel)): oldvollevel=int(oldvollevel) else: oldvollevel=vlcplayer.get_vlc_volume for oldvollevel in re.findall(r"[-+]?\d*\.\d+|\d+", str(output)): oldvollevel=int(oldvollevel) if 'increase'.lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)): changevollevel=int(changevollevel) else: changevollevel=10 newvollevel= oldvollevel+ changevollevel print(newvollevel) if int(newvollevel)>100: settingvollevel==100 elif int(newvollevel)<0: settingvollevel==0 else: settingvollevel=newvollevel with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) if 'decrease'.lower() in str(usrcmd).lower() or 'reduce'.lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall(r'\b\d+\b', str(usrcmd)): changevollevel=int(changevollevel) else: changevollevel=10 newvollevel= oldvollevel - changevollevel print(newvollevel) if int(newvollevel)>100: settingvollevel==100 elif int(newvollevel)<0: settingvollevel==0 else: settingvollevel=newvollevel with open('/home/pi/.mediavolume.json', 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: '+str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) else: say("Sorry I could not help you") else: say("Sorry nothing is playing right now") return continue_conversation if 'refresh'.lower() in str(usrcmd).lower() and 'music'.lower() in str(usrcmd).lower(): refreshlists() return continue_conversation if 'google music'.lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() gmusicselect(str(usrcmd).lower()) return continue_conversation if 'spotify'.lower() in str(usrcmd).lower(): vlcplayer.stop_vlc() spotify_playlist_select(str(usrcmd).lower()) return continue_conversation else: continue GPIO.output(5,GPIO.LOW) GPIO.output(6,GPIO.HIGH) led.ChangeDutyCycle(50) if len(resp.audio_out.audio_data) > 0: if not self.conversation_stream.playing: self.conversation_stream.stop_recording() self.conversation_stream.start_playback() logging.info('Playing assistant response.') self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6,GPIO.LOW) GPIO.output(5,GPIO.HIGH) led.ChangeDutyCycle(100) logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: GPIO.output(6,GPIO.LOW) GPIO.output(5,GPIO.LOW) led.ChangeDutyCycle(0) #Uncomment the following after starting the Kodi #with open('/home/pi/.volume.json', 'r') as f: #vollevel = json.load(f) #kodi.Application.SetVolume({"volume": vollevel}) if vlcplayer.is_vlc_playing(): with open('/home/pi/.mediavolume.json', 'r') as vol: oldvolume= json.load(vol) vlcplayer.set_vlc_volume(int(oldvolume)) continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json ) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if self.display and resp.screen_out.data: system_browser = browser_helpers.system_browser system_browser.display(resp.screen_out.data) if len(device_actions_futures): logging.info('Waiting for device executions to complete.') concurrent.futures.wait(device_actions_futures) logging.info('Finished playing assistant response.') self.conversation_stream.stop_playback() return continue_conversation GPIO.output(6,GPIO.LOW) GPIO.output(5,GPIO.LOW) led.ChangeDutyCycle(0) #Uncomment the following after starting the Kodi #with open('/home/pi/.volume.json', 'r') as f: #vollevel = json.load(f) #kodi.Application.SetVolume({"volume": vollevel}) if vlcplayer.is_vlc_playing(): with open('/home/pi/.mediavolume.json', 'r') as vol: oldvolume= json.load(vol) vlcplayer.set_vlc_volume(int(oldvolume))
def assist(self): """Send a voice request to the Assistant and playback the response. Returns: True if conversation should continue. """ continue_conversation = False device_actions_futures = [] subprocess.Popen(["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() #Uncomment the following after starting the Kodi #status=mutevolstatus() #vollevel=status[1] #with open('/home/pi/.volume.json', 'w') as f: #json.dump(vollevel, f) #kodi.Application.SetVolume({"volume": 0}) GPIO.output(5,GPIO.HIGH) led.ChangeDutyCycle(100) logging.info('Recording audio request.') def iter_assist_requests(): for c in self.gen_assist_requests(): assistant_helpers.log_assist_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields AssistResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Assist(iter_assist_requests(), self.deadline): assistant_helpers.log_assist_response_without_audio(resp) if resp.event_type == END_OF_UTTERANCE: logging.info('End of audio request detected') GPIO.output(5,GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_recording() print('Full Speech Result '+str(resp.speech_results)) if resp.speech_results: logging.info('Transcript of user request: "%s".', ' '.join(r.transcript for r in resp.speech_results)) usrcmd=resp.speech_results print(str(usrcmd)) if 'trigger' in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'stream'.lower() in str(usrcmd).lower(): track=str(usrcmd).lower() idx=track.find('stability') track=track[:idx] track=track.replace("stability","",1) track=track.strip() idx=track.find('stream') track=track[:idx] track=track.replace("stream","",1) track=track.replace("","",1) track=("stream " + track) track=track.replace('[transcript: "','',1) track=track.strip() print(track) YouTube_No_Autoplay(track) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() return continue_conversation if 'tune into'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation if 'parcel'.lower() in str(usrcmd).lower(): track() return continue_conversation if 'news'.lower() in str(usrcmd).lower() or 'feed'.lower() in str(usrcmd).lower() or 'quote'.lower() in str(usrcmd).lower(): feed(str(usrcmd).lower()) return continue_conversation if 'on kodi'.lower() in str(usrcmd).lower(): kodiactions(str(usrcmd).lower()) return continue_conversation else: continue GPIO.output(5,GPIO.LOW) GPIO.output(6,GPIO.HIGH) led.ChangeDutyCycle(50) logging.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write(resp.audio_out.audio_data) if resp.dialog_state_out.conversation_state: conversation_state = resp.dialog_state_out.conversation_state logging.debug('Updating conversation state.') self.conversation_state = conversation_state if resp.dialog_state_out.volume_percentage != 0: volume_percentage = resp.dialog_state_out.volume_percentage logging.info('Setting volume to %s%%', volume_percentage) self.conversation_stream.volume_percentage = volume_percentage if resp.dialog_state_out.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6,GPIO.LOW) GPIO.output(5,GPIO.HIGH) led.ChangeDutyCycle(100) logging.info('Expecting follow-on query from user.') elif resp.dialog_state_out.microphone_mode == CLOSE_MICROPHONE: continue_conversation = False if resp.device_action.device_request_json: device_request = json.loads( resp.device_action.device_request_json ) fs = self.device_handler(device_request) if fs: device_actions_futures.extend(fs) if len(device_actions_futures): logging.info('Waiting for device executions to complete.') concurrent.futures.wait(device_actions_futures) logging.info('Finished playing assistant response.') GPIO.output(6,GPIO.LOW) GPIO.output(5,GPIO.LOW) led.ChangeDutyCycle(0) #Uncomment the following, after starting Kodi #with open('/home/pi/.volume.json', 'r') as f: #vollevel = json.load(f) #kodi.Application.SetVolume({"volume": vollevel}) self.conversation_stream.stop_playback() return continue_conversation
def assist(self): # Configure audio source and sink. self.audio_device = None self.audio_source = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) self.audio_sink = self.audio_device = ( self.audio_device or audio_helpers.SoundDeviceStream( sample_rate=self.audio_sample_rate, sample_width=self.audio_sample_width, block_size=self.audio_block_size, flush_size=self.audio_flush_size)) # Create conversation stream with the given audio source and sink. self.conversation_stream = audio_helpers.ConversationStream( source=self.audio_source, sink=self.audio_sink, iter_size=self.audio_iter_size, sample_width=self.audio_sample_width) restart = False continue_conversation = True try: while continue_conversation: continue_conversation = False subprocess.Popen( ["aplay", "/home/pi/GassistPi/sample-audio-files/Fb.wav"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.conversation_stream.start_recording() GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info('Recording audio request.') def iter_converse_requests(): for c in self.gen_converse_requests(): assistant_helpers.log_converse_request_without_audio(c) yield c self.conversation_stream.start_playback() # This generator yields ConverseResponse proto messages # received from the gRPC Google Assistant API. for resp in self.assistant.Converse(iter_converse_requests(), self.grpc_deadline): assistant_helpers.log_converse_response_without_audio(resp) if resp.error.code != code_pb2.OK: self.logger.error('server error: %s', resp.error.message) break if resp.event_type == END_OF_UTTERANCE: self.logger.info('End of audio request detected') GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_recording() if resp.result.spoken_request_text: usrcmd = resp.result.spoken_request_text if 'trigger' in str(usrcmd).lower(): Action(str(usrcmd).lower()) return continue_conversation if 'play'.lower() in str(usrcmd).lower(): YouTube(str(usrcmd).lower()) return continue_conversation if 'stop'.lower() in str(usrcmd).lower(): stop() return continue_conversation if 'tune into'.lower() in str(usrcmd).lower(): radio(str(usrcmd).lower()) return continue_conversation if 'wireless'.lower() in str(usrcmd).lower(): ESP(str(usrcmd).lower()) return continue_conversation else: continue self.logger.info('Transcript of user request: "%s".', resp.result.spoken_request_text) GPIO.output(5, GPIO.LOW) GPIO.output(6, GPIO.HIGH) led.ChangeDutyCycle(50) self.logger.info('Playing assistant response.') if len(resp.audio_out.audio_data) > 0: self.conversation_stream.write( resp.audio_out.audio_data) if resp.result.spoken_response_text: self.logger.info( 'Transcript of TTS response ' '(only populated from IFTTT): "%s".', resp.result.spoken_response_text) if resp.result.conversation_state: self.conversation_state_bytes = resp.result.conversation_state if resp.result.volume_percentage != 0: volume_percentage = resp.result.volume_percentage self.logger.info('Volume should be set to %s%%', volume_percentage) if resp.result.microphone_mode == DIALOG_FOLLOW_ON: continue_conversation = True GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.HIGH) led.ChangeDutyCycle(100) self.logger.info( 'Expecting follow-on query from user.') self.logger.info('Finished playing assistant response.') GPIO.output(6, GPIO.LOW) GPIO.output(5, GPIO.LOW) led.ChangeDutyCycle(0) self.conversation_stream.stop_playback() except Exception as e: self._create_assistant() self.logger.exception('Skipping because of connection reset') restart = True try: self.conversation_stream.close() if restart: self.assist() except Exception: self.logger.error('Failed to close conversation_stream.')
def main(self): parser = argparse.ArgumentParser( formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--device-model-id', '--device_model_id', type=str, metavar='DEVICE_MODEL_ID', required=False, help='the device model ID registered with Google') parser.add_argument('--project-id', '--project_id', type=str, metavar='PROJECT_ID', required=False, help='the project ID used to register this device') parser.add_argument('--device-config', type=str, metavar='DEVICE_CONFIG_FILE', default=os.path.join( os.path.expanduser('~/.config'), 'googlesamples-assistant', 'device_config_library.json'), help='path to store and read device configuration') parser.add_argument('--credentials', type=existing_file, metavar='OAUTH2_CREDENTIALS_FILE', default=os.path.join( os.path.expanduser('~/.config'), 'google-oauthlib-tool', 'credentials.json'), help='path to store and read OAuth2 credentials') parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + Assistant.__version_str__()) args = parser.parse_args() with open(args.credentials, 'r') as f: credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f)) device_model_id = None last_device_id = None try: with open(args.device_config) as f: device_config = json.load(f) device_model_id = device_config['model_id'] last_device_id = device_config.get('last_device_id', None) except FileNotFoundError: pass if not args.device_model_id and not device_model_id: raise Exception('Missing --device-model-id option') # Re-register if "device_model_id" is given by the user and it differs # from what we previously registered with. should_register = (args.device_model_id and args.device_model_id != device_model_id) device_model_id = args.device_model_id or device_model_id with Assistant(credentials, device_model_id) as assistant: self.assistant = assistant subprocess.Popen([ "aplay", "{}/sample-audio-files/Startup.wav".format(ROOT_PATH) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) events = assistant.start() device_id = assistant.device_id print('device_model_id:', device_model_id) print('device_id:', device_id + '\n') # Re-register if "device_id" is different from the last "device_id": if should_register or (device_id != last_device_id): if args.project_id: register_device(args.project_id, credentials, device_model_id, device_id) pathlib.Path(os.path.dirname( args.device_config)).mkdir(exist_ok=True) with open(args.device_config, 'w') as f: json.dump( { 'last_device_id': device_id, 'model_id': device_model_id, }, f) else: print(WARNING_NOT_REGISTERED) for event in events: self.process_event(event) usrcmd = event.args with open('{}/src/diyHue/config.json'.format(ROOT_PATH), 'r') as config: hueconfig = json.load(config) for i in range(1, len(hueconfig['lights']) + 1): try: if str(hueconfig['lights'][str(i)] ['name']).lower() in str(usrcmd).lower(): assistant.stop_conversation() hue_control( str(usrcmd).lower(), str(i), str(hueconfig['lights_address'][str(i)]['ip'])) break except Keyerror: say('Unable to help, please check your config file') for num, name in enumerate(tasmota_devicelist): if name.lower() in str(usrcmd).lower(): assistant.stop_conversation() tasmota_control( str(usrcmd).lower(), name.lower(), tasmota_deviceip[num], tasmota_deviceportid[num]) break for i in range(1, numques + 1): try: if str(configuration['Conversation']['question'][i] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() selectedans = random.sample( configuration['Conversation']['answer'][i], 1) say(selectedans[0]) break except Keyerror: say('Please check if the number of questions matches the number of answers' ) if Domoticz_Device_Control == True and len( domoticz_devices['result']) > 0: for i in range(0, len(domoticz_devices['result'])): if str( domoticz_devices['result'][i] ['HardwareName']).lower() in str(usrcmd).lower(): assistant.stop_conversation() domoticz_control( i, str(usrcmd).lower(), domoticz_devices['result'][i]['idx'], domoticz_devices['result'][i]['HardwareName']) break if (custom_action_keyword['Keywords']['Magic_mirror'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() try: mmmcommand = str(usrcmd).lower() if 'weather'.lower() in mmmcommand: if 'show'.lower() in mmmcommand: mmreq_one = requests.get( "http://" + mmmip + ":8080/remote?action=SHOW&module=module_2_currentweather" ) mmreq_two = requests.get( "http://" + mmmip + ":8080/remote?action=SHOW&module=module_3_currentweather" ) if 'hide'.lower() in mmmcommand: mmreq_one = requests.get( "http://" + mmmip + ":8080/remote?action=HIDE&module=module_2_currentweather" ) mmreq_two = requests.get( "http://" + mmmip + ":8080/remote?action=HIDE&module=module_3_currentweather" ) if 'power off'.lower() in mmmcommand: mmreq = requests.get( "http://" + mmmip + ":8080/remote?action=SHUTDOWN") if 'reboot'.lower() in mmmcommand: mmreq = requests.get("http://" + mmmip + ":8080/remote?action=REBOOT") if 'restart'.lower() in mmmcommand: mmreq = requests.get("http://" + mmmip + ":8080/remote?action=RESTART") if 'display on'.lower() in mmmcommand: mmreq = requests.get( "http://" + mmmip + ":8080/remote?action=MONITORON") if 'display off'.lower() in mmmcommand: mmreq = requests.get( "http://" + mmmip + ":8080/remote?action=MONITOROFF") except requests.exceptions.ConnectionError: say("Magic mirror not online") if (custom_action_keyword['Keywords']['Recipe_pushbullet'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() ingrequest = str(usrcmd).lower() ingredientsidx = ingrequest.find('for') ingrequest = ingrequest[ingredientsidx:] ingrequest = ingrequest.replace('for', "", 1) ingrequest = ingrequest.replace("'}", "", 1) ingrequest = ingrequest.strip() ingrequest = ingrequest.replace(" ", "%20", 1) getrecipe(ingrequest) if (custom_action_keyword['Keywords']['Kickstarter_tracking'] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() kickstarter_tracker(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Pi_GPIO_control'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() Action(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['YouTube_music_stream'] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() vlcplayer.stop_vlc() if 'autoplay'.lower() in str(usrcmd).lower(): YouTube_Autoplay(str(usrcmd).lower()) else: YouTube_No_Autoplay(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Stop_music'][0] ).lower() in str(usrcmd).lower(): stop() if 'radio'.lower() in str(usrcmd).lower(): assistant.stop_conversation() radio(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['ESP_control'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() ESP(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Parcel_tracking'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() track() if (custom_action_keyword['Keywords']['RSS'][0] ).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['RSS'][1] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() feed(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Kodi_actions'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() kodiactions(str(usrcmd).lower()) # Google Assistant now comes built in with chromecast control, so custom function has been commented # if 'chromecast'.lower() in str(usrcmd).lower(): # assistant.stop_conversation() # if 'play'.lower() in str(usrcmd).lower(): # chromecast_play_video(str(usrcmd).lower()) # else: # chromecast_control(usrcmd) if (custom_action_keyword['Keywords']['Pause_resume'][0] ).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['Pause_resume'][1] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() if vlcplayer.is_vlc_playing(): if (custom_action_keyword['Keywords']['Pause_resume'] [0]).lower() in str(usrcmd).lower(): vlcplayer.pause_vlc() if checkvlcpaused(): if (custom_action_keyword['Keywords']['Pause_resume'] [1]).lower() in str(usrcmd).lower(): vlcplayer.play_vlc() elif vlcplayer.is_vlc_playing( ) == False and checkvlcpaused() == False: say("Sorry nothing is playing right now") if (custom_action_keyword['Keywords']['Track_change']['Next'] [0]).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['Track_change'] ['Next'][1]).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['Track_change'] ['Next'][2]).lower() in str(usrcmd).lower(): assistant.stop_conversation() if vlcplayer.is_vlc_playing() or checkvlcpaused() == True: vlcplayer.stop_vlc() vlcplayer.change_media_next() elif vlcplayer.is_vlc_playing( ) == False and checkvlcpaused() == False: say("Sorry nothing is playing right now") if (custom_action_keyword['Keywords']['Track_change'] ['Previous'][0]).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['Track_change'] ['Previous'][1]).lower() in str(usrcmd).lower() or ( custom_action_keyword['Keywords']['Track_change'] ['Previous'][2]).lower() in str(usrcmd).lower(): assistant.stop_conversation() if vlcplayer.is_vlc_playing() or checkvlcpaused() == True: vlcplayer.stop_vlc() vlcplayer.change_media_previous() elif vlcplayer.is_vlc_playing( ) == False and checkvlcpaused() == False: say("Sorry nothing is playing right now") if (custom_action_keyword['Keywords']['VLC_music_volume'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() if vlcplayer.is_vlc_playing() == True or checkvlcpaused( ) == True: if 'set'.lower() in str(usrcmd).lower( ) or 'change'.lower() in str(usrcmd).lower(): if 'hundred'.lower() in str(usrcmd).lower( ) or 'maximum' in str(usrcmd).lower(): settingvollevel = 100 with open( '{}/.mediavolume.json'.format( USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) elif 'zero'.lower() in str(usrcmd).lower( ) or 'minimum' in str(usrcmd).lower(): settingvollevel = 0 with open( '{}/.mediavolume.json'.format( USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) else: for settingvollevel in re.findall( r"[-+]?\d*\.\d+|\d+", str(usrcmd)): with open( '{}/.mediavolume.json'.format( USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: ' + str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) elif 'increase'.lower() in str(usrcmd).lower( ) or 'decrease'.lower() in str(usrcmd).lower( ) or 'reduce'.lower() in str(usrcmd).lower(): if os.path.isfile( "{}/.mediavolume.json".format(USER_PATH)): with open( '{}/.mediavolume.json'.format( USER_PATH), 'r') as vol: oldvollevel = json.load(vol) for oldvollevel in re.findall( r'\b\d+\b', str(oldvollevel)): oldvollevel = int(oldvollevel) else: oldvollevel = vlcplayer.get_vlc_volume for oldvollevel in re.findall( r"[-+]?\d*\.\d+|\d+", str(output)): oldvollevel = int(oldvollevel) if 'increase'.lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall( r'\b\d+\b', str(usrcmd)): changevollevel = int(changevollevel) else: changevollevel = 10 newvollevel = oldvollevel + changevollevel print(newvollevel) if int(newvollevel) > 100: settingvollevel == 100 elif int(newvollevel) < 0: settingvollevel == 0 else: settingvollevel = newvollevel with open( '{}/.mediavolume.json'.format( USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: ' + str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) if 'decrease'.lower() in str(usrcmd).lower( ) or 'reduce'.lower() in str(usrcmd).lower(): if any(char.isdigit() for char in str(usrcmd)): for changevollevel in re.findall( r'\b\d+\b', str(usrcmd)): changevollevel = int(changevollevel) else: changevollevel = 10 newvollevel = oldvollevel - changevollevel print(newvollevel) if int(newvollevel) > 100: settingvollevel == 100 elif int(newvollevel) < 0: settingvollevel == 0 else: settingvollevel = newvollevel with open( '{}/.mediavolume.json'.format( USER_PATH), 'w') as vol: json.dump(settingvollevel, vol) print('Setting volume to: ' + str(settingvollevel)) vlcplayer.set_vlc_volume(int(settingvollevel)) else: say("Sorry I could not help you") else: say("Sorry nothing is playing right now") if (custom_action_keyword['Keywords']['Music_index_refresh'][0] ).lower() in str(usrcmd).lower() and ( custom_action_keyword['Keywords'] ['Music_index_refresh'][1] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() refreshlists() if (custom_action_keyword['Keywords']['Google_music_streaming'] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() vlcplayer.stop_vlc() gmusicselect(str(usrcmd).lower()) if (custom_action_keyword['Keywords'] ['Spotify_music_streaming'][0] ).lower() in str(usrcmd).lower(): assistant.stop_conversation() vlcplayer.stop_vlc() spotify_playlist_select(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Gaana_music_streaming'] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() vlcplayer.stop_vlc() gaana_playlist_select(str(usrcmd).lower()) if (custom_action_keyword['Keywords']['Deezer_music_streaming'] [0]).lower() in str(usrcmd).lower(): assistant.stop_conversation() vlcplayer.stop_vlc() deezer_playlist_select(str(usrcmd).lower()) if custom_wakeword: self.detector.terminate()