def distribute_message(self, msgobj, online=True): """ Sends a message to all connected players on channel, optionally sending only to players that are currently online (optimized for very large sends) """ if online: subs = self.subscriptions.online() else: subs = self.subscriptions.all() for entity in subs: if entity in msgobj.senders or entity.player_ob.db.highlight_all_mentions: names = [sub.char_ob.key for sub in subs if sub.char_ob] else: names = [entity.char_ob.key] if entity.char_ob else [] message = self.__format_mentions(msgobj.message, names) self.send_msg(message, entity, msgobj.senders) if msgobj.keep_log: # log to file logger.log_file( msgobj.message, self.attributes.get("log_file") or "channel_%s.log" % self.key, )
def distribute_message(self, msgobj, online=False): """ Method for grabbing all listeners that a message should be sent to on this channel, and sending them a message. Args: msgobj (Msg or TempMsg): Message to distribute. online (bool): Only send to receivers who are actually online (not currently used): Notes: This is also where logging happens, if enabled. """ # get all players or objects connected to this channel and send to them for entity in self.subscriptions.all(): # if the entity is muted, we don't send them a message if entity in self.mutelist: continue try: # note our addition of the from_channel keyword here. This could be checked # by a custom player.msg() to treat channel-receives differently. entity.msg(msgobj.message, from_obj=msgobj.senders, options={"from_channel":self.id}) except AttributeError as e: logger.log_trace("%s\nCannot send msg to '%s'." % (e, entity)) if msgobj.keep_log: # log to file logger.log_file(msgobj.message, self.attributes.get("log_file") or "channel_%s.log" % self.key)
def _destroy_instance(self, instance_id): # Make sure the instance_id exists on the ledger. if not instance_id in self.ledger.db.instances: err_msg = f"destroy_instance could not find instance_id: {instance_id}" logger.log_file(err_msg, filename='instance_errors.log') self.owner.msg( '|rCRITICAL ERROR! destroy_instance could not find the instance_id!|n' ) return # Check to make sure there are no orphaned characters or objects remaining in the instance. # Any left behind will be sent to their default home, so this is not critical. # Delete all exits. exits_list = list(self.ledger.db.instances[instance_id]['exits']) for exit in exits_list: self.ledger.db.instances[instance_id]['exits'].remove(exit) exit.delete() if len(self.ledger.db.instances[instance_id]['exits']) == 0: del self.ledger.db.instances[instance_id]['exits'] # Delete all rooms. rooms_list = list(self.ledger.db.instances[instance_id]['rooms']) for room in rooms_list: self.ledger.db.instances[instance_id]['rooms'].remove(room) room.delete() if len(self.ledger.db.instances[instance_id]['rooms']) == 0: del self.ledger.db.instances[instance_id]['rooms'] # Remove the instance from the creator object. creator = self.ledger.db.instances[instance_id]['creator'] del creator.db.instances[instance_id] # Exits and Rooms are deleted, remove the instance from the ledger. del self.ledger.db.instances[instance_id]
def wodbconnect(self, cmd, data): try: # cmdname, args, kwargs = json.loads(data) logger.log_infomsg(str(data)) except Exception: log_trace("Websocket malformed OOB request: %s" % data) raise # self.sessionhandler.data_in(self,"oob", oob=(cmd , args)) aaa = "filename:" + sys._getframe( ).f_code.co_filename + " func: " + sys._getframe( ).f_code.co_name + " line:" + str(sys._getframe().f_lineno) self.log_zk(aaa + " ;mark:{" + cmd + "}") logger.log_file(aaa) cursor = connection.cursor() #获得一个游标(cursor)对象 cursor.execute("select username from players_playerdb;") # names = [row[0] for row in cursor.fetchall()] raw = cursor.fetchall() jsonresult = json.dumps(raw) aaa = "filename:" + sys._getframe( ).f_code.co_filename + " func: " + sys._getframe( ).f_code.co_name + " line:" + str(sys._getframe().f_lineno) log_zk(aaa + " ;raw:{" + str(jsonresult) + "}") self.sendLine(str(raw))
def obj_enter_trash(self, object): grace_period = 2_592_000 # seconds = 30 days now = time.time() # Current epoch time deletion_time = now + grace_period object.attributes.add('deletion_time', deletion_time) log_str = f"{object.name} pending deletion at {deletion_time} epoch time." logger.log_file(log_str, filename='trash_bin.log')
def empty_trash(self): now = time.time() for obj in self.owner.contents: if obj.attributes.has('deletion_time'): deletion_time = obj.attributes.get('deletion_time') obj_name = obj.name if now >= deletion_time: deleted = obj.delete() if deleted: log_str = f"{obj_name} has been destroyed!" logger.log_file(log_str, filename='trash_bin.log')
def _create_log_string(self): border = "======================================================" instance_type = f"Instance Type: {'random' if self.randomize_room_type else self.room_type}" creation_time = f"Creation Time: {self.creation_time}" creator = f"Creator: {self.owner.name}" epoch_create = f"Epoch Creation: {self.epoch_creation}" epoch_expire = f"Epoch Expiration: {self.epoch_expiration}" room_count = f"Room Count: {len(self.rooms_list)}" log_str = f"{border}\n{instance_type}\n{creation_time}\n{creator}\n" log_str = f"{log_str}\n{epoch_create}\n{epoch_expire}\n{room_count}\n" self.ledger.db.instances[self.instance_id]['log_str'] = log_str self.owner.db.instances[self.instance_id]['log_str'] = log_str logger.log_file(log_str, filename='instances.log')
def to_file(data): """ Writes dictionaries of data generated by an AuditedServerSession to files in JSON format, bucketed by date. Uses Evennia's native logger and writes to the default log directory (~/yourgame/server/logs/ or settings.LOG_DIR) Args: data (dict): Parsed session transmission data. """ # Bucket logs by day and remove objects before serialization bucket = data.pop("objects")["time"].strftime("%Y-%m-%d") # Write it log_file(json.dumps(data), filename="audit_%s.log" % bucket)
def to_file(data): """ Writes dictionaries of data generated by an AuditedServerSession to files in JSON format, bucketed by date. Uses Evennia's native logger and writes to the default log directory (~/yourgame/server/logs/ or settings.LOG_DIR) Args: data (dict): Parsed session transmission data. """ # Bucket logs by day and remove objects before serialization bucket = data.pop('objects')['time'].strftime('%Y-%m-%d') # Write it log_file(json.dumps(data), filename="audit_%s.log" % bucket)
def enter_instance(self): # Determine the currently occupied room's instance_id. if self.owner.location.tags.get(category='instance_id'): instance_id = self.owner.location.tags.get(category='instance_id') # Add character to instance occupant list. if self.ledger.db.instances[instance_id].get('occupants') is None: self.ledger.db.instances[instance_id]['occupants'] = [] self.ledger.db.instances[instance_id]['occupants'].append( self.owner) else: # This instance_id acquisition should NOT fail. If it does, something went wrong. err_msg = f"enter_instance could not find instance_id!" logger.log_file(err_msg, filename='instance_errors.log') self.owner.msg( '|rCRITICAL ERROR! enter_instance could not find the instance_id!|n' ) return
def exit_instance(self, source_location): # This is where the instance cleanup is triggered. # Must first determine that all ledger occupants have exited. if source_location.tags.get(category='instance_id'): instance_id = source_location.tags.get(category='instance_id') self.ledger.db.instances[instance_id]['occupants'].remove( self.owner) if len(self.ledger.db.instances[instance_id]['occupants']) == 0: self._destroy_instance(instance_id) else: # This instance_id acquisition should NOT fail. If it does, something went wrong. err_msg = f"exit_instance could not find instance_id!" logger.log_file(err_msg, filename='instance_errors.log') self.owner.msg( '|rCRITICAL ERROR! exit_instance could not find the instance_id!|n' ) return
def _parse_lockstring(self, storage_lockstring): """ Helper function. This is normally only called when the lockstring is cached and does preliminary checking. locks are stored as a string atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype... Args: storage_locksring (str): The lockstring to parse. """ locks = {} if not storage_lockstring: return locks duplicates = 0 elist = [] # errors wlist = [] # warnings for raw_lockstring in storage_lockstring.split(';'): if not raw_lockstring: continue lock_funcs = [] try: access_type, rhs = (part.strip() for part in raw_lockstring.split(':', 1)) except ValueError: logger.log_trace() return locks # parse the lock functions and separators funclist = _RE_FUNCS.findall(rhs) evalstring = rhs for pattern in ('AND', 'OR', 'NOT'): evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring) nfuncs = len(funclist) for funcstring in funclist: funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1)) func = _LOCKFUNCS.get(funcname, None) if not callable(func): elist.append( _("Lock: lock-function '%s' is not available.") % funcstring) continue args = list(arg.strip() for arg in rest.split(',') if arg and '=' not in arg) kwargs = dict([ arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg ]) lock_funcs.append((func, args, kwargs)) evalstring = evalstring.replace(funcstring, '%s') if len(lock_funcs) < nfuncs: continue try: # purge the eval string of any superfluous items, then test it evalstring = " ".join(_RE_OK.findall(evalstring)) eval(evalstring % tuple(True for func in funclist), {}, {}) except Exception: elist.append( _("Lock: definition '%s' has syntax errors.") % raw_lockstring) continue if access_type in locks: duplicates += 1 wlist.append( _( "LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % { "obj": self.obj, "access_type": access_type, "source": locks[access_type][2], "goal": raw_lockstring })) locks[access_type] = (evalstring, tuple(lock_funcs), raw_lockstring) if wlist and WARNING_LOG: # a warning text was set, it's not an error, so only report logger.log_file("\n".join(wlist), WARNING_LOG) if elist: # an error text was set, raise exception. raise LockException("\n".join(elist)) # return the gathered locks in an easily executable form return locks
def log_zk(text): # aaa="filename:"+sys._getframe().f_code.co_filename+ " func: "+sys._getframe().f_code.co_name +" line:"+str(sys._getframe().f_lineno) logger.log_file(text, "wolfzk.log")
def _parse_lockstring(self, storage_lockstring): """ Helper function. This is normally only called when the lockstring is cached and does preliminary checking. locks are stored as a string atype:[NOT] lock()[[ AND|OR [NOT] lock()[...]];atype... Args: storage_locksring (str): The lockstring to parse. """ locks = {} if not storage_lockstring: return locks duplicates = 0 elist = [] # errors wlist = [] # warnings for raw_lockstring in storage_lockstring.split(";"): if not raw_lockstring: continue lock_funcs = [] try: access_type, rhs = (part.strip() for part in raw_lockstring.split(":", 1)) except ValueError: logger.log_trace() return locks # parse the lock functions and separators funclist = _RE_FUNCS.findall(rhs) evalstring = rhs for pattern in ("AND", "OR", "NOT"): evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring) nfuncs = len(funclist) for funcstring in funclist: funcname, rest = (part.strip().strip(")") for part in funcstring.split("(", 1)) func = _LOCKFUNCS.get(funcname, None) if not callable(func): elist.append(_("Lock: lock-function '%s' is not available.") % funcstring) continue args = list(arg.strip() for arg in rest.split(",") if arg and not "=" in arg) kwargs = dict([arg.split("=", 1) for arg in rest.split(",") if arg and "=" in arg]) lock_funcs.append((func, args, kwargs)) evalstring = evalstring.replace(funcstring, "%s") if len(lock_funcs) < nfuncs: continue try: # purge the eval string of any superfluous items, then test it evalstring = " ".join(_RE_OK.findall(evalstring)) eval(evalstring % tuple(True for func in funclist), {}, {}) except Exception: elist.append(_("Lock: definition '%s' has syntax errors.") % raw_lockstring) continue if access_type in locks: duplicates += 1 wlist.append( _( "LockHandler on %(obj)s: access type '%(access_type)s' changed from '%(source)s' to '%(goal)s' " % { "obj": self.obj, "access_type": access_type, "source": locks[access_type][2], "goal": raw_lockstring, } ) ) locks[access_type] = (evalstring, tuple(lock_funcs), raw_lockstring) if wlist: # a warning text was set, it's not an error, so only report logger.log_file("\n".join(wlist), WARNING_LOG) if elist: # an error text was set, raise exception. raise LockException("\n".join(elist)) # return the gathered locks in an easily executable form return locks
def log_mask(self, wearer): """Logging players using masks to keep track of shennigans""" log_file(f"{wearer} ({wearer.id}) put on {self} ({self.id})", "player_masks.log")
def black_hole(self, object): obj_name = object.name deleted = object.delete() if deleted: log_str = f"{obj_name} has been destroyed!" logger.log_file(log_str, filename='black_hole.log')
def _parse_conditional_string(storage_conditionstring): conditions = {} if not storage_conditionstring: return conditions duplicates = 0 elist = [] # errors wlist = [] # warnings for raw_condition in storage_conditionstring.split(';'): if not raw_condition: continue condition_funcs = [] try: access_type, rhs = (part.strip() for part in raw_condition.split(':', 1)) except ValueError: logger.log_trace() return conditions # parse the lock functions and separators funclist = _RE_FUNCS.findall(rhs) evalstring = rhs for pattern in ('AND', 'OR', 'NOT'): evalstring = re.sub(r"\b%s\b" % pattern, pattern.lower(), evalstring) nfuncs = len(funclist) for funcstring in funclist: funcname, rest = (part.strip().strip(')') for part in funcstring.split('(', 1)) func = _MAGIC_CONDITION_FUNCS.get(funcname, None) if not callable(func): elist.append( _("Condition: magic condition-function '%s' is not available." ) % funcstring) continue args = list(arg.strip() for arg in rest.split(',') if arg and '=' not in arg) kwargs = dict([ arg.split('=', 1) for arg in rest.split(',') if arg and '=' in arg ]) condition_funcs.append((func, args, kwargs)) evalstring = evalstring.replace(funcstring, '%s') if len(condition_funcs) < nfuncs: continue try: # purge the eval string of any superfluous items, then test it evalstring = " ".join(_RE_OK.findall(evalstring)) eval(evalstring % tuple(True for func in funclist), {}, {}) except Exception: elist.append( _("Condition: definition '%s' has syntax errors.") % raw_condition) continue if access_type in conditions: duplicates += 1 wlist.append( _( "ConditionalHandler: access type '%(access_type)s' changed from " "'%(source)s' to '%(goal)s' " % { "access_type": access_type, "source": conditions[access_type][2], "goal": raw_condition })) conditions[access_type] = (evalstring, tuple(condition_funcs), raw_condition) if wlist and WARNING_LOG: # a warning text was set, it's not an error, so only report logger.log_file("\n".join(wlist), WARNING_LOG) if elist: # an error text was set, raise exception. raise ConditionalException("\n".join(elist)) # return the gathered locks in an easily executable form return conditions