async def init_admin_stats(self): # Initialize admin stats await self.add_entity("admin", "sensor.callbacks_total_fired", 0) await self.add_entity("admin", "sensor.callbacks_average_fired", 0) await self.add_entity("admin", "sensor.callbacks_total_executed", 0) await self.add_entity("admin", "sensor.callbacks_average_executed", 0) await self.add_entity("admin", "sensor.threads_current_busy", 0) await self.add_entity("admin", "sensor.threads_max_busy", 0) await self.add_entity("admin", "sensor.threads_max_busy_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0))) await self.add_entity("admin", "sensor.threads_last_action_time", utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)))
async def add_entity(self, namespace, entity, state, attributes=None): if await self.entity_exists(namespace, entity): return attrs = {} if isinstance(attributes, dict): attrs.update(attributes) state = { "entity_id": entity, "state": state, "last_changed": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), "attributes": attrs, } self.state[namespace][entity] = state data = { "event_type": "__AD_ENTITY_ADDED", "data": { "entity_id": entity, "state": state }, } self.AD.loop.create_task(self.AD.events.process_event(namespace, data))
async def add_thread(self, silent=False, pinthread=False, id=None): if id is None: tid = self.thread_count else: tid = id if silent is False: self.logger.info("Adding thread %s", tid) t = threading.Thread(target=self.worker) t.daemon = True name = "thread-{}".format(tid) t.setName(name) if id is None: await self.add_entity( "admin", "thread.{}".format(name), "idle", {"q": 0, "is_alive": True, "time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0))}, ) self.threads[name] = {} self.threads[name]["queue"] = Queue(maxsize=0) t.start() self.thread_count += 1 if pinthread is True: self.pin_threads += 1 else: await self.set_state( "_threading", "admin", "thread.{}".format(name), state="idle", is_alive=True, ) self.threads[name]["thread"] = t
async def create_initial_threads(self): kwargs = self.kwargs if "threads" in kwargs: self.logger.warning( "Threads directive is deprecated apps - will be pinned. Use total_threads if you want to unpin your apps" ) if "total_threads" in kwargs: self.total_threads = kwargs["total_threads"] self.auto_pin = False else: apps = await self.AD.app_management.check_config(True, False) self.total_threads = int(apps["active"]) self.pin_apps = True utils.process_arg(self, "pin_apps", kwargs) if self.pin_apps is True: self.pin_threads = self.total_threads else: self.auto_pin = False self.pin_threads = 0 if "total_threads" not in kwargs: self.total_threads = 10 utils.process_arg(self, "pin_threads", kwargs, int=True) if self.pin_threads > self.total_threads: raise ValueError("pin_threads cannot be > total_threads") if self.pin_threads < 0: raise ValueError("pin_threads cannot be < 0") self.logger.info( "Starting Apps with %s workers and %s pins", self.total_threads, self.pin_threads, ) self.next_thread = self.pin_threads self.thread_count = 0 for i in range(self.total_threads): await self.add_thread(True) # Add thread object to track async await self.add_entity( "admin", "thread.async", "idle", { "q": 0, "is_alive": True, "time_called": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), "pinned_apps": [], }, )
async def set_state(self, name, namespace, entity, **kwargs): self.logger.debug("set_state(): %s, %s", entity, kwargs) if entity in self.state[namespace]: old_state = deepcopy(self.state[namespace][entity]) else: old_state = {"state": None, "attributes": {}} new_state = self.parse_state(entity, namespace, **kwargs) new_state["last_changed"] = utils.dt_to_str( (await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz) self.logger.debug("Old state: %s", old_state) self.logger.debug("New state: %s", new_state) if not await self.AD.state.entity_exists(namespace, entity): if not ("_silent" in kwargs and kwargs["_silent"] is True): self.logger.info("%s: Entity %s created in namespace: %s", name, entity, namespace) # Fire the plugin's state update if it has one plugin = await self.AD.plugins.get_plugin_object(namespace) if hasattr(plugin, "set_plugin_state"): # We assume that the state change will come back to us via the plugin self.logger.debug("sending event to plugin") result = await plugin.set_plugin_state( namespace, entity, state=new_state["state"], attributes=new_state["attributes"]) if result is not None: if "entity_id" in result: result.pop("entity_id") self.state[namespace][entity] = self.parse_state( entity, namespace, **result) else: # Set the state locally self.state[namespace][entity] = new_state # Fire the event locally self.logger.debug("sending event locally") data = { "event_type": "state_changed", "data": { "entity_id": entity, "new_state": new_state, "old_state": old_state }, } # # Schedule this rather than awaiting to avoid locking ourselves out # # await self.AD.events.process_event(namespace, data) self.AD.loop.create_task( self.AD.events.process_event(namespace, data)) return new_state
async def update_thread_info(self, thread_id, callback, app, type, uuid): self.logger.debug("Update thread info: %s", thread_id) if self.AD.log_thread_actions: if callback == "idle": self.diag.info( "%s done", thread_id) else: self.diag.info( "%s calling %s callback %s", thread_id, type, callback) now = await self.AD.sched.get_now() if callback == "idle": start = utils.str_to_dt(await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called")) if self.AD.sched.realtime is True and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold: self.logger.warning("callback %s has now completed", await self.get_state("_threading", "admin", "thread.{}".format(thread_id))) await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1) await self.add_to_attr("_threading", "admin", "app.{}".format(app), "callbacks", 1) await self.add_to_attr("_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1) await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1) self.current_callbacks_executed += 1 else: await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1) self.current_callbacks_fired += 1 current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy") max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy") if current_busy > max_busy: await self.set_state("_threading", "admin", "sensor.threads_max_busy" , state=current_busy) await self.set_state("_threading", "admin", "sensor.threads_max_busy_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz)) await self.set_state("_threading", "admin", "sensor.threads_last_action_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz)) # Update thread info await self.set_state("_threading", "admin", "thread.{}".format(thread_id), q=self.threads[thread_id]["queue"].qsize(), state=callback, time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz), is_alive = self.threads[thread_id]["thread"].is_alive(), pinned_apps=await self.get_pinned_apps(thread_id) ) await self.set_state("_threading", "admin", "app.{}".format(app), state=callback)
async def add_entity(self, namespace, entity, state, attributes=None): if attributes is None: attrs = {} else: attrs = attributes state = {"state": state, "last_changed": utils.dt_to_str(datetime.datetime(1970, 1, 1, 0, 0, 0, 0)), "attributes": attrs} self.state[namespace][entity] = state data = \ { "event_type": "__AD_ENTITY_ADDED", "data": { "entity_id": entity, "state": state, } } await self.AD.events.process_event(namespace, data)
async def get_history_api(self, **kwargs): if "entity_id" in kwargs and kwargs["entity_id"] != "": filter_entity_id = "?filter_entity_id={}".format( kwargs["entity_id"]) else: filter_entity_id = "" start_time = "" end_time = "" if "days" in kwargs: days = kwargs["days"] if days - 1 < 0: days = 1 else: days = 1 if "start_time" in kwargs: if isinstance(kwargs["start_time"], str): start_time = utils.str_to_dt( kwargs["start_time"]).replace(microsecond=0) elif isinstance(kwargs["start_time"], datetime.datetime): start_time = self.AD.tz.localize( kwargs["start_time"]).replace(microsecond=0) else: raise ValueError("Invalid type for start time") if "end_time" in kwargs: if isinstance(kwargs["end_time"], str): end_time = utils.str_to_dt( kwargs["end_time"]).replace(microsecond=0) elif isinstance(kwargs["end_time"], datetime.datetime): end_time = self.AD.tz.localize( kwargs["end_time"]).replace(microsecond=0) else: raise ValueError("Invalid type for end time") # if both are declared, it can't process entity_id if start_time != "" and end_time != "": filter_entity_id = "" # if starttime is not declared and entity_id is declared, and days specified elif (filter_entity_id != "" and start_time == "") and "days" in kwargs: start_time = (await self.AD.sched.get_now()).replace( microsecond=0) - datetime.timedelta(days=days) # if starttime is declared and entity_id is not declared, and days specified elif filter_entity_id == "" and start_time != "" and end_time == "" and "days" in kwargs: end_time = start_time + datetime.timedelta(days=days) # if endtime is declared and entity_id is not declared, and days specified elif filter_entity_id == "" and end_time != "" and start_time == "" and "days" in kwargs: start_time = end_time - datetime.timedelta(days=days) if start_time != "": timestamp = "/{}".format( utils.dt_to_str(start_time.replace(microsecond=0), self.AD.tz)) if filter_entity_id != "": # if entity_id is specified, end_time cannot be used end_time = "" if end_time != "": end_time = "?end_time={}".format( quote( utils.dt_to_str(end_time.replace(microsecond=0), self.AD.tz))) # if no start_time is specified, other parameters are invalid else: timestamp = "" end_time = "" return "{}/api/history/period{}{}{}".format(self.config["ha_url"], timestamp, filter_entity_id, end_time)
async def insert_schedule(self, name, aware_dt, callback, repeat, type_, **kwargs): # aware_dt will include a timezone of some sort - convert to utc timezone utc = aware_dt.astimezone(pytz.utc) # Round to nearest second utc = self.my_dt_round(utc, base=1) if "pin" in kwargs: pin_app = kwargs["pin"] else: pin_app = self.AD.app_management.objects[name]["pin_app"] if "pin_thread" in kwargs: pin_thread = kwargs["pin_thread"] pin_app = True else: pin_thread = self.AD.app_management.objects[name]["pin_thread"] if name not in self.schedule: self.schedule[name] = {} handle = uuid.uuid4().hex c_offset = self.get_offset({"kwargs": kwargs}) ts = utc + timedelta(seconds=c_offset) interval = kwargs.get("interval", 0) self.schedule[name][handle] = { "name": name, "id": self.AD.app_management.objects[name]["id"], "callback": callback, "timestamp": ts, "interval": interval, "basetime": utc, "repeat": repeat, "offset": c_offset, "type": type_, "pin_app": pin_app, "pin_thread": pin_thread, "kwargs": kwargs, } if callback is None: function_name = "cancel_callback" else: function_name = callback.__name__ await self.AD.state.add_entity( "admin", "scheduler_callback.{}".format(handle), "active", { "app": name, "execution_time": utils.dt_to_str(ts.replace(microsecond=0), self.AD.tz), "repeat": str(datetime.timedelta(seconds=interval)), "function": function_name, "pinned": pin_app, "pinned_thread": pin_thread, "fired": 0, "executed": 0, "kwargs": kwargs, }, ) # verbose_log(conf.logger, "INFO", conf.schedule[name][handle]) if self.active is True: await self.kick() return handle
async def exec_schedule(self, name, args, uuid_): try: # Call function if "__entity" in args["kwargs"]: # # it's a "duration" entry # # first remove the duration parameter if args["kwargs"].get("__duration"): del args["kwargs"]["__duration"] executed = await self.AD.threading.dispatch_worker( name, { "id": uuid_, "name": name, "objectid": self.AD.app_management.objects[name]["id"], "type": "state", "function": args["callback"], "attribute": args["kwargs"]["__attribute"], "entity": args["kwargs"]["__entity"], "new_state": args["kwargs"]["__new_state"], "old_state": args["kwargs"]["__old_state"], "pin_app": args["pin_app"], "pin_thread": args["pin_thread"], "kwargs": args["kwargs"], }, ) if executed is True: remove = args["kwargs"].get("oneshot", False) if remove is True: await self.AD.state.cancel_state_callback( args["kwargs"]["__handle"], name) if "__timeout" in args["kwargs"] and self.timer_running( name, args["kwargs"]["__timeout"] ): # meaning there is a timeout for this callback await self.cancel_timer( name, args["kwargs"]["__timeout"] ) # cancel it as no more needed elif "__state_handle" in args["kwargs"]: # # It's a state timeout entry - just delete the callback # await self.AD.state.cancel_state_callback( args["kwargs"]["__state_handle"], name) elif "__event_handle" in args["kwargs"]: # # It's an event timeout entry - just delete the callback # await self.AD.events.cancel_event_callback( name, args["kwargs"]["__event_handle"]) elif "__log_handle" in args["kwargs"]: # # It's a log timeout entry - just delete the callback # await self.AD.logging.cancel_log_callback( name, args["kwargs"]["__log_handle"]) else: # # A regular callback # await self.AD.threading.dispatch_worker( name, { "id": uuid_, "name": name, "objectid": self.AD.app_management.objects[name]["id"], "type": "scheduler", "function": args["callback"], "pin_app": args["pin_app"], "pin_thread": args["pin_thread"], "kwargs": args["kwargs"], }, ) # If it is a repeating entry, rewrite with new timestamp if args["repeat"]: if args["type"] == "next_rising" or args[ "type"] == "next_setting": c_offset = self.get_offset(args) args["timestamp"] = self.sun(args["type"], c_offset) args["offset"] = c_offset else: # Not sunrise or sunset so just increment # the timestamp with the repeat interval args["basetime"] += timedelta(seconds=args["interval"]) args["timestamp"] = args["basetime"] + timedelta( seconds=self.get_offset(args)) # Update entity await self.AD.state.set_state( "_scheduler", "admin", "scheduler_callback.{}".format(uuid_), execution_time=utils.dt_to_str( args["timestamp"].replace(microsecond=0), self.AD.tz), ) else: # Otherwise just delete await self.AD.state.remove_entity( "admin", "scheduler_callback.{}".format(uuid_)) del self.schedule[name][uuid_] except Exception: error_logger = logging.getLogger("Error.{}".format(name)) error_logger.warning("-" * 60) error_logger.warning( "Unexpected error during exec_schedule() for App: %s", name) error_logger.warning("Args: %s", args) error_logger.warning("-" * 60) error_logger.warning(traceback.format_exc()) error_logger.warning("-" * 60) if self.AD.logging.separate_error_log() is True: self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log")) error_logger.warning("Scheduler entry has been deleted") error_logger.warning("-" * 60) await self.AD.state.remove_entity( "admin", "scheduler_callback.{}".format(uuid_)) del self.schedule[name][uuid_]
async def loop(self): """The main utility loop. Loops until stop() is called, checks for file changes, overdue threads, thread starvation, and schedules regular state refreshes. """ # # Setup # await self.AD.threading.init_admin_stats() await self.AD.threading.create_initial_threads() await self.AD.app_management.init_admin_stats() # # Start the web server # if self.AD.http is not None: await self.AD.http.start_server() # # Wait for all plugins to initialize # await self.AD.plugins.wait_for_plugins() if not self.stopping: # Create timer loop self.logger.debug("Starting timer loop") for ns in await self.AD.state.list_namespaces(): # # Register state services # # only default, rules or it belongs to a local plugin. Don't allow for admin/appdaemon/global namespaces if ns in [ "default", "rules" ] or ns in self.AD.plugins.plugin_objs or ns in self.AD.namespaces: self.AD.services.register_service( ns, "state", "add_namespace", self.AD.state.state_services) self.AD.services.register_service( ns, "state", "add_entity", self.AD.state.state_services) self.AD.services.register_service( ns, "state", "set", self.AD.state.state_services) self.AD.services.register_service( ns, "state", "remove_namespace", self.AD.state.state_services) self.AD.services.register_service( ns, "state", "remove_entity", self.AD.state.state_services) # # Register fire_event services # self.AD.services.register_service( ns, "event", "fire", self.AD.events.event_services) # # Register run_sequence service # self.AD.services.register_service( "rules", "sequence", "run", self.AD.sequences.run_sequence_service) self.AD.services.register_service( "rules", "sequence", "cancel", self.AD.sequences.run_sequence_service) # # Register production_mode service # self.AD.services.register_service("admin", "production_mode", "set", self.production_mode_service) # # Register logging services # self.AD.services.register_service("admin", "logs", "get_admin", self.AD.logging.manage_services) # # Start the scheduler # self.AD.loop.create_task(self.AD.sched.loop()) if self.AD.apps is True: self.logger.debug("Reading Apps") await self.AD.app_management.check_app_updates(mode="init") self.logger.info("App initialization complete") # # Fire APPD Started Event # await self.AD.events.process_event("global", { "event_type": "appd_started", "data": {} }) self.booted = await self.AD.sched.get_now() await self.AD.state.add_entity("admin", "sensor.appdaemon_version", utils.__version__) await self.AD.state.add_entity("admin", "sensor.appdaemon_uptime", str(datetime.timedelta(0))) await self.AD.state.add_entity( "admin", "sensor.appdaemon_booted", utils.dt_to_str( (await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz), ) warning_step = 0 warning_iterations = 0 # Start the loop proper while not self.stopping: s1 = 0 e1 = 0 start_time = datetime.datetime.now().timestamp() try: if self.AD.apps is True: if self.AD.production_mode is False: # Check to see if config has changed s1 = datetime.datetime.now().timestamp() await self.AD.app_management.check_app_updates() e1 = datetime.datetime.now().timestamp() # Call me suspicious, but lets update state from the plugins periodically await self.AD.plugins.update_plugin_state() # Check for thread starvation ( warning_step, warning_iterations, ) = await self.AD.threading.check_q_size( warning_step, warning_iterations) # Check for any overdue threads await self.AD.threading.check_overdue_and_dead_threads() # Save any hybrid namespaces self.AD.state.save_hybrid_namespaces() # Run utility for each plugin self.AD.plugins.run_plugin_utility() # Update uptime sensor uptime = (await self.AD.sched.get_now()).replace( microsecond=0) - self.booted.replace(microsecond=0) await self.AD.state.set_state( "_utility", "admin", "sensor.appdaemon_uptime", state=str(uptime), ) except Exception: self.logger.warning("-" * 60) self.logger.warning("Unexpected error during utility()") self.logger.warning("-" * 60) self.logger.warning(traceback.format_exc()) self.logger.warning("-" * 60) end_time = datetime.datetime.now().timestamp() loop_duration = (int( (end_time - start_time) * 1000) / 1000) * 1000 check_app_updates_duration = (int( (e1 - s1) * 1000) / 1000) * 1000 self.logger.debug( "Util loop compute time: %sms, check_config()=%sms, other=%sms", loop_duration, check_app_updates_duration, loop_duration - check_app_updates_duration, ) if self.AD.sched.realtime is True and loop_duration > ( self.AD.max_utility_skew * 1000): self.logger.warning( "Excessive time spent in utility loop: %sms, %sms in check_app_updates(), %sms in other", loop_duration, check_app_updates_duration, loop_duration - check_app_updates_duration, ) if self.AD.check_app_updates_profile is True: self.logger.info( "Profile information for Utility Loop") self.logger.info(self.AD.app_management. check_app_updates_profile_stats) await asyncio.sleep(self.AD.utility_delay) # # Shutting down now # # # Stop apps # if self.AD.app_management is not None: await self.AD.app_management.terminate() # # Shutdown webserver # if self.AD.http is not None: await self.AD.http.stop_server()
async def exec_schedule(self, name, args, uuid_): try: if "inactive" in args: return # Call function if "__entity" in args["kwargs"]: await self.AD.threading.dispatch_worker( name, { "id": uuid_, "name": name, "objectid": self.AD.app_management.objects[name]["id"], "type": "state", "function": args["callback"], "attribute": args["kwargs"]["__attribute"], "entity": args["kwargs"]["__entity"], "new_state": args["kwargs"]["__new_state"], "old_state": args["kwargs"]["__old_state"], "pin_app": args["pin_app"], "pin_thread": args["pin_thread"], "kwargs": args["kwargs"], }) else: await self.AD.threading.dispatch_worker( name, { "id": uuid_, "name": name, "objectid": self.AD.app_management.objects[name]["id"], "type": "scheduler", "function": args["callback"], "pin_app": args["pin_app"], "pin_thread": args["pin_thread"], "kwargs": deepcopy(args["kwargs"]), }) # If it is a repeating entry, rewrite with new timestamp if args["repeat"]: if args["type"] == "next_rising" or args[ "type"] == "next_setting": c_offset = self.get_offset(args) args["timestamp"] = self.sun(args["type"], c_offset) args["offset"] = c_offset else: # Not sunrise or sunset so just increment # the timestamp with the repeat interval args["basetime"] += timedelta(seconds=args["interval"]) args["timestamp"] = args["basetime"] + timedelta( seconds=self.get_offset(args)) # Update entity await self.AD.state.set_state( "_scheduler", "admin", "scheduler_callback.{}".format(uuid_), execution_time=utils.dt_to_str( args["timestamp"].replace(microsecond=0), self.AD.tz)) else: # Otherwise just delete await self.AD.state.remove_entity( "admin", "scheduler_callback.{}".format(uuid_)) del self.schedule[name][uuid_] except: error_logger = logging.getLogger("Error.{}".format(name)) error_logger.warning('-' * 60) error_logger.warning( "Unexpected error during exec_schedule() for App: %s", name) error_logger.warning("Args: %s", args) error_logger.warning('-' * 60) error_logger.warning(traceback.format_exc()) error_logger.warning('-' * 60) if self.AD.logging.separate_error_log() is True: self.logger.warning("Logged an error to %s", self.AD.logging.get_filename("error_log")) error_logger.warning("Scheduler entry has been deleted") error_logger.warning('-' * 60) await self.AD.state.remove_entity( "admin", "scheduler_callback.{}".format(uuid_)) del self.schedule[name][uuid_]
async def loop(self): """ The main utility loop. Loops until stop() is called, checks for file changes, overdue threads, thread starvation, and schedules regular state refreshes """ # # Setup # await self.AD.threading.init_admin_stats() await self.AD.threading.create_initial_threads() # # Wait for all plugins to initialize # await self.AD.plugins.wait_for_plugins() # Check if we need to bail due to missing metadata if self.AD.plugins.required_meta_check() is False: if self.AD.stop_function is not None: self.AD.stop_function() else: self.stop() if not self.stopping: # # All plugins are loaded and we have initial state # We also have metadata so we can initialise the scheduler # self.AD.sched = scheduler.Scheduler(self.AD) # Create timer loop self.logger.debug("Starting timer loop") self.AD.loop.create_task(self.AD.sched.loop()) if self.AD.apps is True: self.logger.debug("Reading Apps") await self.AD.app_management.check_app_updates() self.logger.info("App initialization complete") # # Fire APPD Started Event # await self.AD.events.process_event("global", { "event_type": "appd_started", "data": {} }) self.booted = await self.AD.sched.get_now() await self.AD.state.add_entity("admin", "sensor.appdaemon_version", utils.__version__) await self.AD.state.add_entity("admin", "sensor.appdaemon_uptime", str(datetime.timedelta(0))) await self.AD.state.add_entity( "admin", "sensor.appdaemon_booted", utils.dt_to_str( (await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz)) warning_step = 0 warning_iterations = 0 s1 = 0 e1 = 0 # Start the loop proper while not self.stopping: start_time = datetime.datetime.now().timestamp() try: if self.AD.apps is True: if self.AD.production_mode is False: # Check to see if config has changed s1 = datetime.datetime.now().timestamp() await self.AD.app_management.check_app_updates() e1 = datetime.datetime.now().timestamp() # Call me suspicious, but lets update state from the plugins periodically await self.AD.plugins.update_plugin_state() # Check for thread starvation warning_step, warning_iterations = await self.AD.threading.check_q_size( warning_step, warning_iterations) # Check for any overdue threads await self.AD.threading.check_overdue_and_dead_threads() # Save any hybrid namespaces self.AD.state.save_hybrid_namespaces() # Run utility for each plugin self.AD.plugins.run_plugin_utility() # Update uptime sensor uptime = (await self.AD.sched.get_now()).replace( microsecond=0) - self.booted.replace(microsecond=0) await self.AD.state.set_state("_utility", "admin", "sensor.appdaemon_uptime", state=str(uptime)) except: self.logger.warning('-' * 60) self.logger.warning("Unexpected error during utility()") self.logger.warning('-' * 60) self.logger.warning(traceback.format_exc()) self.logger.warning('-' * 60) end_time = datetime.datetime.now().timestamp() loop_duration = (int( (end_time - start_time) * 1000) / 1000) * 1000 check_app_updates_duration = (int( (e1 - s1) * 1000) / 1000) * 1000 self.logger.debug( "Util loop compute time: %sms, check_config()=%sms, other=%sms", loop_duration, check_app_updates_duration, loop_duration - check_app_updates_duration) if self.AD.sched.realtime is True and loop_duration > ( self.AD.max_utility_skew * 1000): self.logger.warning( "Excessive time spent in utility loop: %sms, %sms in check_app_updates(), %sms in other", loop_duration, check_app_updates_duration, loop_duration - check_app_updates_duration) if self.AD.check_app_updates_profile is True: self.logger.info( "Profile information for Utility Loop") self.logger.info(self.AD.app_management. check_app_updates_profile_stats) await asyncio.sleep(self.AD.utility_delay) if self.AD.app_management is not None: await self.AD.app_management.terminate()
async def get_history_api(self, **kwargs): query = {} entity_id = None days = None start_time = None end_time = None kwargkeys = set(kwargs.keys()) if {"days", "start_time"} <= kwargkeys: raise ValueError( f'Can not have both days and start time. days: {kwargs["days"]} -- start_time: {kwargs["start_time"]}' ) if "end_time" in kwargkeys and {"start_time", "days" }.isdisjoint(kwargkeys): raise ValueError( f"Can not have end_time without start_time or days") entity_id = kwargs.get("entity_id", "").strip() days = max(0, kwargs.get("days", 0)) def as_datetime(args, key): if key in args: if isinstance(args[key], str): return utils.str_to_dt(args(key)).replace(microsecond=0) elif isinstance(args[key], datetime.datetime): return self.AD.tz.localize( args[key]).replace(microsecond=0) else: raise ValueError(f"Invalid type for {key}") start_time = as_datetime(kwargs, "start_time") end_time = as_datetime(kwargs, "end_time") # end_time default - now now = (await self.AD.sched.get_now()).replace(microsecond=0) end_time = end_time if end_time else now # Days: Calculate start_time (now-days) and end_time (now) if days: now = (await self.AD.sched.get_now()).replace(microsecond=0) start_time = now - datetime.timedelta(days=days) end_time = now # Build the url # /api/history/period/<start_time>?filter_entity_id=<entity_id>&end_time=<end_time> apiurl = f'{self.config["ha_url"]}/api/history/period' if start_time: apiurl += "/" + utils.dt_to_str(start_time.replace(microsecond=0), self.AD.tz) if entity_id or end_time: if entity_id: query["filter_entity_id"] = entity_id if end_time: query["end_time"] = end_time apiurl += f"?{urlencode(query)}" return apiurl
async def update_thread_info(self, thread_id, callback, app, type, uuid, silent): self.logger.debug("Update thread info: %s", thread_id) if silent is True: return if self.AD.log_thread_actions: if callback == "idle": self.diag.info("%s done", thread_id) else: self.diag.info("%s calling %s callback %s", thread_id, type, callback) appinfo = self.AD.app_management.get_app_info(app) if appinfo is None: # app possibly terminated return appentity = "{}.{}".format(appinfo["type"], app) now = await self.AD.sched.get_now() if callback == "idle": start = utils.str_to_dt( await self.get_state("_threading", "admin", "thread.{}".format(thread_id), attribute="time_called",) ) if ( self.AD.sched.realtime is True and (now - start).total_seconds() >= self.AD.thread_duration_warning_threshold ): self.logger.warning( "callback %s has now completed", await self.get_state("_threading", "admin", "thread.{}".format(thread_id)), ) await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", -1) await self.add_to_attr("_threading", "admin", appentity, "totalcallbacks", 1) await self.add_to_attr("_threading", "admin", appentity, "instancecallbacks", 1) await self.add_to_attr( "_threading", "admin", "{}_callback.{}".format(type, uuid), "executed", 1, ) await self.add_to_state("_threading", "admin", "sensor.callbacks_total_executed", 1) self.current_callbacks_executed += 1 else: await self.add_to_state("_threading", "admin", "sensor.threads_current_busy", 1) self.current_callbacks_fired += 1 current_busy = await self.get_state("_threading", "admin", "sensor.threads_current_busy") max_busy = await self.get_state("_threading", "admin", "sensor.threads_max_busy") if current_busy > max_busy: await self.set_state("_threading", "admin", "sensor.threads_max_busy", state=current_busy) await self.set_state( "_threading", "admin", "sensor.threads_max_busy_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz), ) await self.set_state( "_threading", "admin", "sensor.threads_last_action_time", state=utils.dt_to_str((await self.AD.sched.get_now()).replace(microsecond=0), self.AD.tz), ) # Update thread info if thread_id == "async": await self.set_state( "_threading", "admin", "thread.{}".format(thread_id), q=0, state=callback, time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz), is_alive=True, pinned_apps=[], ) else: await self.set_state( "_threading", "admin", "thread.{}".format(thread_id), q=self.threads[thread_id]["queue"].qsize(), state=callback, time_called=utils.dt_to_str(now.replace(microsecond=0), self.AD.tz), is_alive=self.threads[thread_id]["thread"].is_alive(), pinned_apps=await self.get_pinned_apps(thread_id), ) await self.set_state("_threading", "admin", appentity, state=callback)
async def call_plugin_service(self, namespace, domain, service, data): self.logger.debug( "call_plugin_service() namespace=%s domain=%s service=%s data=%s", namespace, domain, service, data) # # If data is a string just assume it's an entity_id # if isinstance(data, str): data = {"entity_id": data} config = (await self.AD.plugins.get_plugin_object(namespace)).config if "token" in config: headers = {'Authorization': "Bearer {}".format(config["token"])} elif "ha_key" in config: headers = {'x-ha-access': config["ha_key"]} else: headers = {} if domain == "database": if "entity_id" in data and data["entity_id"] != "": filter_entity_id = "?filter_entity_id={}".format( data["entity_id"]) else: filter_entity_id = "" start_time = "" end_time = "" if "days" in data: days = data["days"] if days - 1 < 0: days = 1 else: days = 1 if "start_time" in data: if isinstance(data["start_time"], str): start_time = utils.str_to_dt( data["start_time"]).replace(microsecond=0) elif isinstance(data["start_time"], datetime.datetime): start_time = self.AD.tz.localize( data["start_time"]).replace(microsecond=0) else: raise ValueError("Invalid type for start time") if "end_time" in data: if isinstance(data["end_time"], str): end_time = utils.str_to_dt( data["end_time"]).replace(microsecond=0) elif isinstance(data["end_time"], datetime.datetime): end_time = self.AD.tz.localize( data["end_time"]).replace(microsecond=0) else: raise ValueError("Invalid type for end time") #if both are declared, it can't process entity_id if start_time != "" and end_time != "": filter_entity_id = "" #if starttime is not declared and entity_id is declared, and days specified elif (filter_entity_id != "" and start_time == "") and "days" in data: start_time = (await self.AD.sched.get_now()).replace( microsecond=0) - datetime.timedelta(days=days) #if starttime is declared and entity_id is not declared, and days specified elif filter_entity_id == "" and start_time != "" and end_time == "" and "days" in data: end_time = start_time + datetime.timedelta(days=days) #if endtime is declared and entity_id is not declared, and days specified elif filter_entity_id == "" and end_time != "" and start_time == "" and "days" in data: start_time = end_time - datetime.timedelta(days=days) if start_time != "": timestamp = "/{}".format( utils.dt_to_str(start_time.replace(microsecond=0), self.AD.tz)) if filter_entity_id != "": #if entity_id is specified, end_time cannot be used end_time = "" if end_time != "": end_time = "?end_time={}".format( quote( utils.dt_to_str(end_time.replace(microsecond=0), self.AD.tz))) # if no start_time is specified, other parameters are invalid else: timestamp = "" end_time = "" api_url = "{}/api/history/period{}{}{}".format( config["ha_url"], timestamp, filter_entity_id, end_time) elif domain == "template": api_url = "{}/api/template".format(config["ha_url"]) else: api_url = "{}/api/services/{}/{}".format(config["ha_url"], domain, service) try: if domain == "database": r = await self.session.get(api_url, headers=headers, verify_ssl=self.cert_verify) else: r = await self.session.post(api_url, headers=headers, json=data, verify_ssl=self.cert_verify) if r.status == 200 or r.status == 201: if domain == "template": result = await r.text() else: result = await r.json() else: self.logger.warning( "Error calling Home Assistant service %s/%s/%s", namespace, domain, service) txt = await r.text() self.logger.warning("Code: %s, error: %s", r.status, txt) result = None return result except (asyncio.TimeoutError, asyncio.CancelledError): self.logger.warning("Timeout in call_service(%s/%s/%s, %s)", namespace, domain, service, data) except aiohttp.client_exceptions.ServerDisconnectedError: self.logger.warning( "HASS Disconnected unexpectedly during call_service()") except: self.logger.warning('-' * 60) self.logger.warning( "Unexpected error during call_plugin_service()") self.logger.warning("Service: %s.%s.%s Arguments: %s", namespace, domain, service, data) self.logger.warning('-' * 60) self.logger.warning(traceback.format_exc()) self.logger.warning('-' * 60) return None
async def get_history_api(self, **kwargs): """ This replaces the original method for get_history_api. I'm certain the old one doesn't work properly. Perhaps the HA functionality has changed. At the very least, days= didn't work. Also it seems like some of the limitations that were written into the AD function are no longer necessary. Here is a simplified version. Note - this is NOT fully tested. entity_id: str (optional) days: int (optional). Number of days of data to get, ending now. start_time: date str (optional) - Can NOT have with "days". end_time: date str (optional) - Default now. """ query = {} entity_id = None days = None start_time = None end_time = None kwargkeys = set(kwargs.keys()) if {"days", "start_time"} <= kwargkeys: raise ValueError( f'Can not have both days and start time. days: {kwargs["days"]} -- start_time: {kwargs["start_time"]}' ) if "end_time" in kwargkeys and {"start_time", "days" }.isdisjoint(kwargkeys): raise ValueError( f'Can not have end_time without start_time or days') entity_id = kwargs.get("entity_id", "").strip() days = max(0, kwargs.get("days", 0)) def as_datetime(args, key): if key in args: if isinstance(args[key], str): return utils.str_to_dt(args(key)).replace(microsecond=0) elif isinstance(args[key], datetime.datetime): return self.AD.tz.localize( args(key)).replace(microsecond=0) else: raise ValueError(f"Invalid type for {key}") start_time = as_datetime(kwargs, "start_time") end_time = as_datetime(kwargs, "end_time") # end_time default - now now = (await self.AD.sched.get_now()).replace(microsecond=0) end_time = end_time if end_time else now # Days: Calculate start_time (now-days) and end_time (now) if days: now = (await self.AD.sched.get_now()).replace(microsecond=0) start_time = now - datetime.timedelta(days=days) end_time = now # Build the url # /api/history/period/<start_time>?filter_entity_id=<entity_id>&end_time=<end_time> apiurl = f'{self.config["ha_url"]}/api/history/period' if start_time: apiurl += "/" + utils.dt_to_str(start_time.replace(microsecond=0), self.AD.tz) if entity_id or end_time: if entity_id: query["filter_entity_id"] = entity_id if end_time: query["end_time"] = end_time apiurl += f'?{urlencode(query)}' orig_api_url = (await self.get_history_api_orig(**kwargs)) self.logger.debug(f'HASSPLUGIN - ORIG method: {orig_api_url}') self.logger.debug(f'HASSPLUGIN - NEW method: {apiurl}') return apiurl