def save(self): """Save the tasks in ServerConfig.""" for task_id, (date, callback, args, kwargs) in self.tasks.items(): if task_id in self.to_save: continue if getattr(callback, "__self__", None): # `callback` is an instance method obj = callback.__self__ name = callback.__name__ callback = (obj, name) # Check if callback can be pickled. args and kwargs have been checked safe_callback = None try: dbserialize(callback) except (TypeError, AttributeError): raise ValueError("the specified callback {} cannot be pickled. " \ "It must be a top-level function in a module or an " \ "instance method.".format(callback)) else: safe_callback = callback self.to_save[task_id] = dbserialize((date, safe_callback, args, kwargs)) ServerConfig.objects.conf("delayed_tasks", self.to_save)
def save(self): """Save the tasks in ServerConfig.""" for task_id, (date, callback, args, kwargs) in self.tasks.items(): if task_id in self.to_save: continue if getattr(callback, "__self__", None): # `callback` is an instance method obj = callback.__self__ name = callback.__name__ callback = (obj, name) # Check if callback can be pickled. args and kwargs have been checked safe_callback = None try: dbserialize(callback) except (TypeError, AttributeError): raise ValueError( "the specified callback {} cannot be pickled. " "It must be a top-level function in a module or an " "instance method.".format(callback)) else: safe_callback = callback self.to_save[task_id] = dbserialize( (date, safe_callback, args, kwargs)) ServerConfig.objects.conf("delayed_tasks", self.to_save)
def add(self, obj, fieldname, callback, idstring="", persistent=False, **kwargs): """ Add monitoring to a given field or Attribute. A field must be specified with the full db_* name or it will be assumed to be an Attribute (so `db_key`, not just `key`). Args: obj (Typeclassed Entity): The entity on which to monitor a field or Attribute. fieldname (str): Name of field (db_*) or Attribute to monitor. callback (callable): A callable on the form `callable(**kwargs), where kwargs holds keys fieldname and obj. idstring (str, optional): An id to separate this monitor from other monitors of the same field and object. persistent (bool, optional): If False, the monitor will survive a server reload but not a cold restart. This is default. Keyword Args: session (Session): If this keyword is given, the monitorhandler will correctly analyze it and remove the monitor if after a reload/reboot the session is no longer valid. any (any): Any other kwargs are passed on to the callback. Remember that all kwargs must be possible to pickle! """ if not fieldname.startswith("db_") or not hasattr(obj, fieldname): # an Attribute - we track its db_value field obj = obj.attributes.get(fieldname, return_obj=True) if not obj: return fieldname = "db_value" # we try to serialize this data to test it's valid. Otherwise we won't accept it. try: if not inspect.isfunction(callback): raise TypeError("callback is not a function.") dbserialize( (obj, fieldname, callback, idstring, persistent, kwargs)) except Exception: err = "Invalid monitor definition: \n" " (%s, %s, %s, %s, %s, %s)" % ( obj, fieldname, callback, idstring, persistent, kwargs, ) logger.log_trace(err) else: self.monitors[obj][fieldname][idstring] = (callback, persistent, kwargs)
def add(self, timedelay, callback, *args, **kwargs): """Add a new persistent task in the configuration. Args: timedelay (int or float): time in sedconds before calling the callback. callback (function or instance method): the callback itself any (any): any additional positional arguments to send to the callback Keyword Args: persistent (bool, optional): persist the task (store it). any (any): additional keyword arguments to send to the callback """ persistent = kwargs.get("persistent", False) if persistent: del kwargs["persistent"] now = datetime.now() delta = timedelta(seconds=timedelay) # Choose a free task_id safe_args = [] safe_kwargs = {} used_ids = list(self.tasks.keys()) task_id = 1 while task_id in used_ids: task_id += 1 # Check that args and kwargs contain picklable information for arg in args: try: dbserialize(arg) except (TypeError, AttributeError): log_err("The positional argument {} cannot be " "pickled and will not be present in the arguments " "fed to the callback {}".format(arg, callback)) else: safe_args.append(arg) for key, value in kwargs.items(): try: dbserialize(value) except (TypeError, AttributeError): log_err("The {} keyword argument {} cannot be " "pickled and will not be present in the arguments " "fed to the callback {}".format( key, value, callback)) else: safe_kwargs[key] = value self.tasks[task_id] = (now + delta, callback, safe_args, safe_kwargs) self.save() callback = self.do_task args = [task_id] kwargs = {} return deferLater(reactor, timedelay, callback, *args, **kwargs)
def add(self, timedelay, callback, *args, **kwargs): """Add a new persistent task in the configuration. Args: timedelay (int or float): time in sedconds before calling the callback. callback (function or instance method): the callback itself any (any): any additional positional arguments to send to the callback Kwargs: persistent (bool, optional): persist the task (store it). any (any): additional keyword arguments to send to the callback """ persistent = kwargs.get("persistent", False) if persistent: del kwargs["persistent"] now = datetime.now() delta = timedelta(seconds=timedelay) # Choose a free task_id safe_args = [] safe_kwargs = {} used_ids = self.tasks.keys() task_id = 1 while task_id in used_ids: task_id += 1 # Check that args and kwargs contain picklable information for arg in args: try: dbserialize(arg) except (TypeError, AttributeError): logger.log_err("The positional argument {} cannot be " \ "pickled and will not be present in the arguments " \ "fed to the callback {}".format(arg, callback)) else: safe_args.append(arg) for key, value in kwargs.items(): try: dbserialize(value) except (TypeError, AttributeError): logger.log_err("The {} keyword argument {} cannot be " \ "pickled and will not be present in the arguments " \ "fed to the callback {}".format(key, value, callback)) else: safe_kwargs[key] = value self.tasks[task_id] = (now + delta, callback, safe_args, safe_kwargs) self.save() callback = self.do_task args = [task_id] kwargs = {} return task.deferLater(reactor, timedelay, callback, *args, **kwargs)
def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict( (interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = { store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2]) } # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True)
def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: # get the current times so the tickers can be restarted with a delay later start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # remove any subscriptions that lost its object in the interim to_save = {store_key: (args, kwargs) for store_key, (args, kwargs) in self.ticker_storage.items() if ((store_key[1] and ("_obj" in kwargs and kwargs["_obj"].pk) and hasattr(kwargs["_obj"], store_key[1])) or # a valid method with existing obj store_key[2])} # a path given # update the timers for the tickers for store_key, (args, kwargs) in to_save.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(to_save)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True)
def add(self, obj, fieldname, callback, idstring="", persistent=False, **kwargs): """ Add monitoring to a given field or Attribute. A field must be specified with the full db_* name or it will be assumed to be an Attribute (so `db_key`, not just `key`). Args: obj (Typeclassed Entity): The entity on which to monitor a field or Attribute. fieldname (str): Name of field (db_*) or Attribute to monitor. callback (callable): A callable on the form `callable(**kwargs), where kwargs holds keys fieldname and obj. idstring (str, optional): An id to separate this monitor from other monitors of the same field and object. persistent (bool, optional): If False, the monitor will survive a server reload but not a cold restart. This is default. Kwargs: session (Session): If this keyword is given, the monitorhandler will correctly analyze it and remove the monitor if after a reload/reboot the session is no longer valid. any (any): Any other kwargs are passed on to the callback. Remember that all kwargs must be possible to pickle! """ if not fieldname.startswith("db_") or not hasattr(obj, fieldname): # an Attribute - we track its db_value field obj = obj.attributes.get(fieldname, return_obj=True) if not obj: return fieldname = "db_value" # we try to serialize this data to test it's valid. Otherwise we won't accept it. try: if not inspect.isfunction(callback): raise TypeError("callback is not a function.") dbserialize((obj, fieldname, callback, idstring, persistent, kwargs)) except Exception: err = "Invalid monitor definition: \n" \ " (%s, %s, %s, %s, %s, %s)" % (obj, fieldname, callback, idstring, persistent, kwargs) logger.log_trace(err) else: self.monitors[obj][fieldname][idstring] = (callback, persistent, kwargs)
def set_task(self, seconds, obj, callback_name): """ Set and schedule a task to run. Args: seconds (int, float): the delay in seconds from now. obj (Object): the typecalssed object connected to the event. callback_name (str): the callback's name. Notes: This method allows to schedule a "persistent" task. 'utils.delay' is called, but a copy of the task is kept in the event handler, and when the script restarts (after reload), the differed delay is called again. The dictionary of locals is frozen and will be available again when the task runs. This feature, however, is limited by the database: all data cannot be saved. Lambda functions, class methods, objects inside an instance and so on will not be kept in the locals dictionary. """ now = datetime.now() delta = timedelta(seconds=seconds) # Choose a free task_id used_ids = list(self.db.tasks.keys()) task_id = 1 while task_id in used_ids: task_id += 1 # Collect and freeze current locals locals = {} for key, value in self.ndb.current_locals.items(): try: dbserialize(value) except TypeError: continue else: locals[key] = value self.db.tasks[task_id] = (now + delta, obj, callback_name, locals) delay(seconds, complete_task, task_id)
def save(self): """ Handles saving of the OOBHandler data when the server reloads. Called from the Server process. """ # save ourselves as a tickerhandler super(OOBHandler, self).save() # handle the extra oob monitor store if self.ticker_storage: ServerConfig.objects.conf(key=self.oob_save_name, value=dbserialize(self.oob_monitor_storage)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.oob_save_name, delete=True)
def save(self): """ Store our monitors to the database. This is called by the server process. Since dbserialize can't handle defaultdicts, we convert to an intermediary save format ((obj,fieldname, idstring, callback, kwargs), ...) """ savedata = [] if self.monitors: for obj in self.monitors: for fieldname in self.monitors[obj]: for idstring, (callback, persistent, kwargs) in self.monitors[obj][fieldname].iteritems(): path = "%s.%s" % (callback.__module__, callback.func_name) savedata.append((obj, fieldname, idstring, path, persistent, kwargs)) savedata = dbserialize(savedata) ServerConfig.objects.conf(key=self.savekey, value=savedata)
def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: start_delays = dict((interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # update the timers for the tickers #for (obj, interval, idstring), (args, kwargs) in self.ticker_storage.items(): for store_key, (args, kwargs) in self.ticker_storage.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(self.ticker_storage)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True)
def save(self): """Save the tasks in ServerConfig.""" for task_id, (date, callback, args, kwargs, persistent, _) in self.tasks.items(): if task_id in self.to_save: continue if not persistent: continue if getattr(callback, "__self__", None): # `callback` is an instance method obj = callback.__self__ name = callback.__name__ callback = (obj, name) # Check if callback can be pickled. args and kwargs have been checked safe_callback = None self.to_save[task_id] = dbserialize((date, callback, args, kwargs)) ServerConfig.objects.conf("delayed_tasks", self.to_save)
def save(self): """ Store our monitors to the database. This is called by the server process. Since dbserialize can't handle defaultdicts, we convert to an intermediary save format ((obj,fieldname, idstring, callback, kwargs), ...) """ savedata = [] if self.monitors: for obj in self.monitors: for fieldname in self.monitors[obj]: for idstring, ( callback, persistent, kwargs) in self.monitors[obj][fieldname].items(): path = "%s.%s" % (callback.__module__, callback.__name__) savedata.append((obj, fieldname, idstring, path, persistent, kwargs)) savedata = dbserialize(savedata) ServerConfig.objects.conf(key=self.savekey, value=savedata)
def save(self): """ Save ticker_storage as a serialized string into a temporary ServerConf field. Whereas saving is done on the fly, if called by server when it shuts down, the current timer of each ticker will be saved so it can start over from that point. """ if self.ticker_storage: start_delays = dict( (interval, ticker.task.next_call_time()) for interval, ticker in self.ticker_pool.tickers.items()) # update the timers for the tickers #for (obj, interval, idstring), (args, kwargs) in self.ticker_storage.items(): for store_key, (args, kwargs) in self.ticker_storage.items(): interval = store_key[1] # this is a mutable, so it's updated in-place in ticker_storage kwargs["_start_delay"] = start_delays.get(interval, None) ServerConfig.objects.conf(key=self.save_name, value=dbserialize(self.ticker_storage)) else: # make sure we have nothing lingering in the database ServerConfig.objects.conf(key=self.save_name, delete=True)
def add(self, timedelay, callback, *args, **kwargs): """Add a new task. If the persistent kwarg is truthy: The callback, args and values for kwarg will be serialized. Type and attribute errors during the serialization will be logged, but will not throw exceptions. For persistent tasks do not use memory references in the callback function or arguments. After a restart those memory references are no longer accurate. Args: timedelay (int or float): time in seconds before calling the callback. callback (function or instance method): the callback itself any (any): any additional positional arguments to send to the callback *args: positional arguments to pass to callback. **kwargs: keyword arguments to pass to callback. - persistent (bool, optional): persist the task (stores it). Persistent key and value is removed from kwargs it will not be passed to callback. Returns: TaskHandlerTask: An object to represent a task. Reference evennia.scripts.taskhandler.TaskHandlerTask for complete details. """ # set the completion time # Only used on persistent tasks after a restart now = datetime.now() delta = timedelta(seconds=timedelay) comp_time = now + delta # get an open task id used_ids = list(self.tasks.keys()) task_id = 1 while task_id in used_ids: task_id += 1 # record the task to the tasks dictionary persistent = kwargs.get("persistent", False) if "persistent" in kwargs: del kwargs["persistent"] if persistent: safe_args = [] safe_kwargs = {} # an unsaveable callback should immediately abort try: dbserialize(callback) except (TypeError, AttributeError, PickleError): raise ValueError( "the specified callback {} cannot be pickled. " "It must be a top-level function in a module or an " "instance method.".format(callback)) return # Check that args and kwargs contain picklable information for arg in args: try: dbserialize(arg) except (TypeError, AttributeError, PickleError): log_err("The positional argument {} cannot be " "pickled and will not be present in the arguments " "fed to the callback {}".format(arg, callback)) else: safe_args.append(arg) for key, value in kwargs.items(): try: dbserialize(value) except (TypeError, AttributeError, PickleError): log_err("The {} keyword argument {} cannot be " "pickled and will not be present in the arguments " "fed to the callback {}".format( key, value, callback)) else: safe_kwargs[key] = value self.tasks[task_id] = (comp_time, callback, safe_args, safe_kwargs, persistent, None) self.save() else: # this is a non-persitent task self.tasks[task_id] = (comp_time, callback, args, kwargs, persistent, None) # defer the task callback = self.do_task args = [task_id] kwargs = {} d = deferLater(self.clock, timedelay, callback, *args, **kwargs) d.addErrback(handle_error) # some tasks may complete before the deferred can be added if task_id in self.tasks: task = self.tasks.get(task_id) task = list(task) task[4] = persistent task[5] = d self.tasks[task_id] = task else: # the task already completed return False if self.stale_timeout > 0: self.clean_stale_tasks() return TaskHandlerTask(task_id)