import copy import logging import salt.loader from common_util import dict_get, dict_find, dict_filter from messaging import EventDrivenMessageProcessor, keyword_resolve log = logging.getLogger(__name__) # Message processor edmp = EventDrivenMessageProcessor("reactor") context = { "cache.get": lambda *args, **kwargs: dict_get(context.get("cache", None), *args, ** kwargs), "cache.find": lambda *args, **kwargs: dict_find( context.get("cache", {}).values(), *args, **kwargs) } @edmp.register_hook() def module_handler(name, *args, **kwargs): """ Calls a Salt execution module from within minion process. """ return __salt__["minionutil.run_job"](name, *args, **kwargs)
def sleep_timer(enable=None, period=1800, add=None, clear=None, refresh=None, **kwargs): """ Setup sleep timer to schedule power off upon inactivity. NOTE: Do not access pillar data in this function as they will not be available when called from engines (separate processes). Optional arguments: - add (str): Add a timer with the given name. - clear (str): Clear sleep timer(s) matching the given name. Use '*' to clear all. - enable (bool): Enable or disable timer. __DEPRECATED__: Use 'add' or 'clear' instead. - period (int): Timer period in seconds before performing sleep. Default is '1800'. - reason (str): Reason code that tells why we decided to sleep. Default is 'unknown'. """ reason = kwargs.setdefault("reason", "unknown") if enable != None: log.warning( "Using deprecated argument 'enable' - use 'add' or 'clear' instead" ) # Helper function to get all scheduled sleep timers def timers(): res = __salt__["schedule.list"](return_yaml=False) ret = { k: dict(v, _stamp=datetime.utcnow().isoformat(), job_args=v.pop("args", []), job_kwargs=v.pop("kwargs", {})) for k, v in res.iteritems() if k.startswith("_sleep_timer") } return ret # Load configuration file if present config = {} try: config = __salt__["fileutil.load_yaml"]( "/opt/autopi/power/sleep_timer.yml") except: log.exception("Failed to load sleep timer configuration file") # Clear timer(s) if requested if clear != None or enable == False: # Clear matching timers for name in timers(): if clear not in [None, "*"]: if "_sleep_timer/{:}".format(clear) != name: continue res = __salt__["schedule.delete"](name) # Trigger a cleared event __salt__["minionutil.trigger_event"]("system/{:}/cleared".format( name.lstrip("_")), data={ "reason": reason }) # Add timer if requested if add != None or enable == True: name = "_sleep_timer/{:}".format(add or reason) # Always try to delete existing timer res = __salt__["schedule.delete"](name) # Prepare keyword arguments kwargs = salt_more.clean_kwargs(kwargs) # Clean up unwanted entries kwargs["confirm"] = True # Ensure confirm is set now = datetime.utcnow() expiry = now + timedelta(seconds=period) # Add fresh timer res = __salt__["schedule.add"]( name, function="power.sleep", job_kwargs=kwargs, seconds=period, maxrunning=1, return_job=False, # Do not return info to master upon job completion persist= False, # Do not persist schedule (actually this is useless because all schedules might be persisted when modified later on) metadata={ "created": now.isoformat(), "expires": expiry.isoformat(), "transient": True, # Enforce schedule is never persisted on disk and thereby not surviving minion restarts (see patch 'salt/utils/schedule.py.patch') "revision": 2 }) if res.get("result", False): # Ensure to adjust if refresh == None: refresh = add or reason # Trigger an added event __salt__["minionutil.trigger_event"]( "system/{:}/added".format(name.lstrip("_")), data={ "reason": reason } if not name.endswith("/{:}".format(reason)) else {}) # Broadcast notification to all terminals try: __salt__["cmd.run"]( "wall -n \"\nATTENTION ({:}):\n\nSleep timer '{:}' added which is scheduled to trigger at {:}.\nRun command 'autopi power.sleep_timer' to list active sleep timers.\n\n(Press ENTER to continue)\"" .format(now, name[name.rindex("/") + 1:], expiry)) except: log.exception( "Failed to broadcast sleep timer added notification") else: log.error("Failed to add sleep timer '{:}': {:}".format(name, res)) # Refresh timer(s) if requested if refresh != None: boot_delay = dict_get(config, "suppress", "boot_delay", default=60) # Loop through all matching sleep timers for name, schedule in timers().iteritems(): if refresh not in [None, "*"]: if "_sleep_timer/{:}".format(refresh) != name: continue # Adjust according to suppress schedules for entry in dict_get(config, "suppress", "schedule", default=[]): try: if not "|" in entry: raise ValueError( "No pipe sign separator found in schedule entry") expression, duration = entry.split("|") # Generate suppress start and end times expiry = fromisoformat(schedule["metadata"]["expires"]) for suppress_start in [ croniter.croniter(expression.strip(), expiry).get_prev(datetime), croniter.croniter(expression.strip(), expiry).get_next(datetime) ]: # NOTE: If a datetime is given to croniter which exactly matches the expression, the same datetime will be returned for both get_prev and get_next. suppress_end = suppress_start + timedelta( seconds=int(duration.strip())) # Caluclate sleep start and end times sleep_start = expiry sleep_end = expiry + timedelta( seconds=schedule["job_kwargs"].get( "interval", 86400) + boot_delay) # Also add time for booting # Proceed if we have an overlap if sleep_start < suppress_end and sleep_end > suppress_start: log.info( "Sleep timer '{:}' sleep period from {:} to {:} overlaps with sleep suppress period from {:} to {:}" .format(name, sleep_start, sleep_end, suppress_start, suppress_end)) now = datetime.utcnow() # Is it possible to reduce sleeping time? if schedule["job_kwargs"].get( "interval", 0 ) > 0 and sleep_start < suppress_start and ( suppress_start - sleep_start).total_seconds() > boot_delay: state = "reduced" old_interval = schedule["job_kwargs"][ "interval"] new_interval = int( (suppress_start - sleep_start).total_seconds() - boot_delay ) # Subtract time for booting from the sleeping time log.warning( "Reducing sleeping time of sleep timer '{:}' from {:} to {:} seconds due to suppress schedule: {:}" .format(name, old_interval, new_interval, entry)) # Set reduced sleeping time schedule["job_kwargs"][ "interval"] = new_interval # Also update fire time to match originally scheduled schedule["seconds"] = (expiry - now).total_seconds() # Or must sleep be postponed? else: state = "postponed" old_period = schedule["seconds"] new_period = (suppress_end - now).total_seconds() log.warning( "Postponing sleep timer '{:}' from {:} to {:} seconds due to suppress schedule: {:}" .format(name, old_period, new_period, entry)) # Set postponed fire time schedule["seconds"] = new_period # Calculate expiry time (may be unchanged) expiry = now + timedelta( seconds=schedule["seconds"]) # Update metadata schedule["metadata"]["updated"] = now.isoformat() schedule["metadata"]["expires"] = expiry.isoformat( ) # Modify existing timer res = __salt__["schedule.modify"](**schedule) if res.get("result", False): # Trigger an modified event __salt__["minionutil.trigger_event"]( "system/{:}/{:}".format( name.lstrip("_"), state)) # Broadcast notification to all terminals try: __salt__["cmd.run"]( "wall -n \"\nATTENTION ({:}):\n\nSleep timer '{:}' has been {:} due to sleep suppress rule and is scheduled to trigger at {:}.\nRun command 'autopi power.sleep_timer' to list active sleep timers.\n\n(Press ENTER to continue)\"" .format(now, name[name.rindex("/") + 1:], state, expiry)) except: log.exception( "Failed to broadcast sleep timer modified notification" ) else: log.error( "Failed to modify sleep timer '{:}': {:}". format(name, res)) elif log.isEnabledFor(logging.DEBUG): log.debug( "Sleep timer '{:}' does not overlap with suppress schedule: {:}" .format(name, entry)) except: log.exception( "Failed to process suppress schedule for sleep timer '{:}': {:}" .format(name, entry)) # Return all existing timer(s) return timers()