def __init__(self, data): if not 'name' in data: raise TraitException( "Required key not found in trait data: 'name'") if not 'type' in data: raise TraitException( "Required key not found in trait data: 'type'") self._type = data['type'] if not 'base' in data: data['base'] = 0 if not 'mod' in data: data['mod'] = 0 if not 'extra' in data: data['extra'] = {} if 'min' not in data: data['min'] = 0 if self._type == 'gauge' else None if 'max' not in data: data['max'] = 'base' if self._type == 'gauge' else None self._data = data self._keys = ('name', 'type', 'base', 'mod', 'current', 'min', 'max', 'extra') self._locked = True if not isinstance(data, _SaverDict): logger.log_warn('Non-persistent {} class loaded.'.format( type(self).__name__))
def at_db_location_postsave(self, new): """ This is called automatically after the location field was saved, no matter how. It checks for a variable _safe_contents_update to know if the save was triggered via the location handler (which updates the contents cache) or not. Args: new (bool): Set if this location has not yet been saved before. """ if not hasattr(self, "_safe_contents_update"): # changed/set outside of the location handler if new: # if new, there is no previous location to worry about if self.db_location: self.db_location.contents_cache.add(self) else: # Since we cannot know at this point was old_location was, we # trigger a full-on contents_cache update here. logger.log_warn( "db_location direct save triggered contents_cache.init() for all objects!" ) [ o.contents_cache.init() for o in self.__dbclass__.get_all_cached_instances() ]
def at_repeat(self): """ This gets called every self.obj.db.idle_interval seconds. """ if self.obj.location: # Random percentage chance of an idle message proccing remaining_chance = random.random() # Iterate over the room and its contents in random order possible_idle_objs = self.obj.location.contents[:] possible_idle_objs.append(self.obj.location) for obj in random.sample(possible_idle_objs, len(possible_idle_objs)): if (obj != self.obj and obj.db.idle and obj.access(self.obj, "view") and obj.access(self.obj, "idle", default=True)): # Randomly pick a line for idle_time, idle_line in random.sample( obj.db.idle, len(obj.db.idle)): # If it's not a valid number, just always display it if idle_time <= 0: idle_time = IDLE_INTERVAL # Following expected value, convert this to a probability per 5 seconds remaining_chance -= IDLE_INTERVAL / idle_time # Only one object at a time can display its idle message to avoid spam if remaining_chance <= 0: # Note that we only message the player, so idle messages are NOT broadcasted to everyone # This means players with a higher perception skill for instance can artificially boost # their chance of seeing idle messages self.obj.msg(idle_line) return else: logger.log_warn( "IdleScript on object {} still running without a location". format(self.obj.id))
def __init__(self, data): if not 'name' in data: raise TraitException( "Required key not found in trait data: 'name'") if not 'type' in data: raise TraitException( "Required key not found in trait data: 'type'") self._type = data['type'] if not 'base' in data: data['base'] = 0 if not 'mod' in data: data['mod'] = 0 if not 'extra' in data: data['extra'] = {} if 'min' not in data: data['min'] = 0 if self._type == 'gauge' else None if 'max' not in data: data['max'] = 'base' if self._type == 'gauge' else None self._data = data self._keys = ('name', 'type', 'base', 'mod', 'current', 'min', 'max', 'extra') self._locked = True if not isinstance(data, _SaverDict): logger.log_warn( 'Non-persistent {} class loaded.'.format( type(self).__name__ ))
def quest(accessing_obj, accessed_obj, *args, **kwargs): """ Checks to see if we have encountered a quest, or matches the exact stage of a quest. """ # Invalid lock if len(args) == 0: logger.log_warn("Invalid quest lock on {} accessed by {}.".format( accessed_obj, accessing_obj)) return False # Just check if we have seen the quest before quest_name = args[0] if len(args) == 1: return accessing_obj.quest_status(quest_name) is not None compare = 'eq' if kwargs: compare = kwargs.get('compare', 'eq') # Note that QUEST_COMPLETE is 0, so to check for completed quests lock at 0 required_quest_status = args[1] quest_status = accessing_obj.quest_status(quest_name) # Convert to a float-able value for comparisons if quest_status is None: quest_status = -1 return CF_MAPPING.get(compare, CF_MAPPING['default'])(quest_status, required_quest_status)
def __init__(self, data): if not 'name' in data: raise CurrencyException( "Required key not found in currency data: 'name'") if not 'value' in data: raise CurrencyException( "Required key not found in currency data: 'value'") self._value = data['value'] if not 'amount' in data: data['amount'] = 0 self._data = data self._keys = {'name', 'value', 'amount'} self._locked = True if not isinstance(data, _SaverDict): logger.log_warn('Non-persistant {} class loaded.'.format( type(self).__name__))
def at_db_location_postsave(self, new): """ This is called automatically after the location field was saved, no matter how. It checks for a variable _safe_contents_update to know if the save was triggered via the location handler (which updates the contents cache) or not. """ if not hasattr(self, "_safe_contents_update"): # changed/set outside of the location handler if new: # if new, there is no previous location to worry about if self.db_location: self.db_location.contents_cache.add(self) else: # Since we cannot know at this point was old_location was, we # trigger a full-on contents_cache update here. logger.log_warn("db_location direct save triggered contents_cache.init() for all objects!") [o.contents_cache.init() for o in self.__dbclass__.get_all_cached_instances()]
def func(self): """ Lists out your quests. """ table = evtable.EvTable("Quest", "Description", border="cells", maxwidth=80) for quest, quest_status in self.caller.db.quests.items(): try: quest_module = mod_import(QUEST_DIR + quest) except Exception as e: logger.log_err("Failed to import module {}: e".format( QUEST_DIR + quest, e)) continue # Quest already completed if not quest_status: continue # If we're missing the quest name then we skip displaying it quest_name = getattr(quest_module, QUEST_NAME_CONST, None) if not quest_name: continue # Skip the description if it's not present quest_descs = getattr(quest_module, QUEST_DESC_CONST, None) if not quest_descs or len(quest_descs) < quest_status + 1: logger.log_warn( "Quest {} missing description for progression {}.".format( quest, quest_status)) continue table.add_row(quest_name, quest_descs[quest_status]) if table.nrows < 2: self.caller.msg("No active quests.") else: output = "|wActive Quests|n\n{}".format(table) self.caller.msg(output)
def __init__(self, data): if not "name" in data: raise TraitException("Required key not found in trait data: 'name'") if not "type" in data: raise TraitException("Required key not found in trait data: 'type'") self._type = data["type"] if not "base" in data: data["base"] = 0 if not "mod" in data: data["mod"] = 0 if not "extra" in data: data["extra"] = {} if "min" not in data: data["min"] = 0 if self._type == "gauge" else None if "max" not in data: data["max"] = "base" if self._type == "gauge" else None self._data = data self._keys = ("name", "type", "base", "mod", "current", "min", "max", "extra") self._locked = True if not isinstance(data, _SaverDict): logger.log_warn("Non-persistent {} class loaded.".format(type(self).__name__))
def conditional_flush(max_rmem, force=False): """ Flush the cache if the estimated memory usage exceeds `max_rmem`. The flusher has a timeout to avoid flushing over and over in particular situations (this means that for some setups the memory usage will exceed the requirement and a server with more memory is probably required for the given game). Args: max_rmem (int): memory-usage estimation-treshold after which cache is flushed. force (bool, optional): forces a flush, regardless of timeout. Defaults to `False`. """ global LAST_FLUSH def mem2cachesize(desired_rmem): """ Estimate the size of the idmapper cache based on the memory desired. This is used to optionally cap the cache size. desired_rmem - memory in MB (minimum 50MB) The formula is empirically estimated from usage tests (Linux) and is Ncache = RMEM - 35.0 / 0.0157 where RMEM is given in MB and Ncache is the size of the cache for this memory usage. VMEM tends to be about 100MB higher than RMEM for large memory usage. """ vmem = max(desired_rmem, 50.0) Ncache = int(abs(float(vmem) - 35.0) / 0.0157) return Ncache if not max_rmem: # auto-flush is disabled return now = time.time() if not LAST_FLUSH: # server is just starting LAST_FLUSH = now return if ((now - LAST_FLUSH) < AUTO_FLUSH_MIN_INTERVAL) and not force: # too soon after last flush. logger.log_warn("Warning: Idmapper flush called more than "\ "once in %s min interval. Check memory usage." % (AUTO_FLUSH_MIN_INTERVAL/60.0)) return if os.name == "nt": # we can't look for mem info in Windows at the moment return # check actual memory usage Ncache_max = mem2cachesize(max_rmem) Ncache, _ = cache_size() actual_rmem = float(os.popen('ps -p %d -o %s | tail -1' % (os.getpid(), "rss")).read()) / 1000.0 # resident memory if Ncache >= Ncache_max and actual_rmem > max_rmem * 0.9: # flush cache when number of objects in cache is big enough and our # actual memory use is within 10% of our set max flush_cache() LAST_FLUSH = now
def func(self): if "all" in self.switches: idle_objs = search.search_object_by_tag(IDLE_TAG, TAG_CATEGORY_BUILDING) if len(idle_objs) == 0: self.caller.msg("No objects with idle lines exist.") return table = evtable.EvTable("Obj #", "Object", "Loc #", "Location") for obj in idle_objs: if obj.location: location_dbref = obj.location.dbref location_key = obj.location.key else: location_dbref = "N/A" location_key = "No Location" table.add_row(obj.dbref, obj.key, location_dbref, location_key) output = "|wObjects with idle lines by location:|n\n{}".format( table) self.caller.msg(output) return if not self.args: if not self.caller.location: self.caller.msg("No location to search for idle objects.") return idle_objs = [] # The location may also have idle lines if self.caller.location.db.idle: idle_objs.append(self.caller.location) for obj in self.caller.location.contents: if obj.db.idle: idle_objs.append(obj) if len(idle_objs) == 0: self.caller.msg( "No objects with idle lines are present in {}.".format( self.caller.location.name)) return output = "Objects with idle lines in this room:" for obj in idle_objs: output += "\n(#{}) {}".format(obj.id, obj.name) self.caller.msg(output) return target = self.caller.search(self.lhs) if not target: return # Clear all idle messages from an object if "clear" in self.switches: if target.db.idle: del target.db.idle self.caller.msg("All idle lines cleared from {}.".format( target.name)) else: self.caller.msg("{} had no idle lines to clear.".format( target.name)) return # Delete specified message if "del" in self.switches: if not self.rhs: self.caller.msg("Usage: @idle/del <objname> = <idle id>") return try: idle_id = int(self.rhs) except ValueError: self.caller.msg("Usage: @idle/del <objname> = <idle id>") return except Exception as e: logger.log_warn( "Unexpected exception casting idle_id on object {}: {}". format(target.id, e)) return idle_list = target.db.idle if not idle_list or idle_id >= len(idle_list): self.caller.msg("Idle list's largest element is {}.".format( len(idle_list) - 1)) return # Remove the nth element _, idle_line = target.db.idle.pop(idle_id) self.caller.msg("Removed from {}: {}".format( target.name, idle_line)) return # Check idle messages on an object if not self.rhs: idle_list = target.db.idle if not idle_list: self.caller.msg("No idle messages on {}.".format(target.name)) return # ID only looks good up to 99, but it will still function after that table = evtable.EvTable("ID", "Avg Sec", "Message") for i, (idle_time, idle_line) in enumerate(idle_list): table.add_row(i, idle_time, idle_line) output = "|wIdle messages on {}:|n\n{}".format(target.name, table) self.caller.msg(output) return # Add new idle line rhs_split = self.rhs.split(",", 1) if len(rhs_split) != 2: self.caller.msg( "Usage: @idle <objname> = <avg seconds>, <idle id>") return idle_time, idle_line = rhs_split try: idle_time = int(idle_time) except ValueError: self.caller.msg( "Usage: @idle <objname> = <avg seconds>, <idle id>") return except Exception as e: logger.log_warn( "Unexpected exception casting idle_time on object {}: {}". format(target.id, e)) return idle_line = idle_line.strip() if not target.db.idle: target.db.idle = [(idle_time, idle_line)] else: target.db.idle.append((idle_time, idle_line)) target.tags.add(IDLE_TAG, TAG_CATEGORY_BUILDING) self.caller.msg("Added new idle message to {}.".format(target.name))
def send_templated_mail(template_name, email_context, recipients, sender=None, bcc=None, fail_silently=False, files=None): """ send_templated_mail() is a warpper around Django's e-mail routines that allows us to easily send multipart (text/plain & text/html) e-mails using templates that are stored in the database. This lets the admin provide both a text and a HTML template for each message. template_name is the slug of the template to use for this message (see models.EmailTemplate) email_context is a dictionary to be used when rendering the template recipients can be either a string, eg '*****@*****.**', or a list of strings. sender should contain a string, eg 'My Site <*****@*****.**>'. If you leave it blank, it'll use settings.DEFAULT_FROM_EMAIL as a fallback. bcc is an optional list of addresses that will receive this message as a blind carbon copy. fail_silently is passed to Django's mail routine. Set to 'True' to ignore any errors at send time. files can be a list of tuple. Each tuple should be a filename to attach, along with the File objects to be read. files can be blank. """ from django.conf import settings from django.core.mail import EmailMultiAlternatives from django.template import engines from web.helpdesk.models import EmailTemplate from web.helpdesk.settings import HELPDESK_EMAIL_SUBJECT_TEMPLATE import os context = dict(email_context) if hasattr(context['queue'], 'locale'): locale = getattr(context['queue'], 'locale', '') else: locale = context['queue'].get('locale', 'en') if not locale: locale = 'en' t = None try: t = EmailTemplate.objects.get(template_name__iexact=template_name, locale=locale) except EmailTemplate.DoesNotExist: pass if not t: try: t = EmailTemplate.objects.get(template_name__iexact=template_name, locale__isnull=True) except EmailTemplate.DoesNotExist: from evennia.utils.logger import log_warn log_warn('template "%s" does not exist, no mail sent' % template_name) return # just ignore if template doesn't exist if not sender: sender = settings.DEFAULT_FROM_EMAIL footer_file = os.path.join('helpdesk', locale, 'email_text_footer.txt') text_part = engines['django'].from_string( "%s{%% include '%s' %%}" % (t.plain_text, footer_file)).render(context) email_html_base_file = os.path.join('helpdesk', locale, 'email_html_base.html') ''' keep new lines in html emails ''' from django.utils.safestring import mark_safe if context.has_key('comment'): html_txt = context['comment'] html_txt = html_txt.replace('\r\n', '<br>') context['comment'] = mark_safe(html_txt) html_part = engines['django'].from_string( "{%% extends '%s' %%}{%% block title %%}%s{%% endblock %%}{%% block content %%}%s{%% endblock %%}" % (email_html_base_file, t.heading, t.html)).render(context) subject_part = engines['django'].from_string( HELPDESK_EMAIL_SUBJECT_TEMPLATE % { "subject": t.subject, }).render(context) if isinstance(recipients, (str, unicode)): if recipients.find(','): recipients = recipients.split(',') elif type(recipients) != list: recipients = [ recipients, ] msg = EmailMultiAlternatives(subject_part.replace('\n', '').replace('\r', ''), text_part, sender, recipients, bcc=bcc) msg.attach_alternative(html_part, "text/html") if files: for attachment in files: file_to_attach = attachment[1] file_to_attach.open() msg.attach(filename=attachment[0], content=file_to_attach.read()) file_to_attach.close() return msg.send(fail_silently)