print myconfig.take('db.uri') You can even pass a cast function, i.e. print myconfig.take('auth.expiration', cast=int) Once the value has been fetched (and casted) it won't change until the process is restarted (or reload=True is passed). """ import os import json from gluon._compat import thread, configparser from gluon.globals import current locker = thread.allocate_lock() def AppConfig(*args, **vars): locker.acquire() reload_ = vars.pop('reload', False) try: instance_name = 'AppConfig_' + current.request.application if reload_ or not hasattr(AppConfig, instance_name): setattr(AppConfig, instance_name, AppConfigLoader(*args, **vars)) return getattr(AppConfig, instance_name).settings finally: locker.release()
| Copyrighted by Massimo Di Pierro <*****@*****.**> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Functions required to execute app components -------------------------------------------- Note: FOR INTERNAL USE ONLY """ from os import stat from gluon.fileutils import read_file from gluon._compat import thread cfs = {} # for speed-up cfs_lock = thread.allocate_lock() # and thread safety def getcfs(key, filename, filter=None): """ Caches the *filtered* file `filename` with `key` until the file is modified. Args: key(str): the cache key filename: the file to cache filter: is the function used for filtering. Normally `filename` is a .py file and `filter` is a function that bytecode compiles the file. In this way the bytecode compiled file is cached. (Default = None) This is used on Google App Engine since pyc files cannot be saved.
class CacheInRam(CacheAbstract): """ Ram based caching This is implemented as global (per process, shared by all threads) dictionary. A mutex-lock mechanism avoid conflicts. """ locker = thread.allocate_lock() meta_storage = {} stats = {} def __init__(self, request=None): self.initialized = False self.request = request self.storage = OrderedDict() if HAVE_PSUTIL else {} self.app = request.application if request else '' def initialize(self): if self.initialized: return else: self.initialized = True self.locker.acquire() if self.app not in self.meta_storage: self.storage = self.meta_storage[self.app] = \ OrderedDict() if HAVE_PSUTIL else {} self.stats[self.app] = {'hit_total': 0, 'misses': 0} else: self.storage = self.meta_storage[self.app] self.locker.release() def clear(self, regex=None): self.initialize() self.locker.acquire() storage = self.storage if regex is None: storage.clear() else: self._clear(storage, regex) if self.app not in self.stats: self.stats[self.app] = {'hit_total': 0, 'misses': 0} self.locker.release() def __call__(self, key, f, time_expire=DEFAULT_TIME_EXPIRE, destroyer=None): """ Attention! cache.ram does not copy the cached object. It just stores a reference to it. Turns out the deepcopying the object has some problems: - would break backward compatibility - would be limiting because people may want to cache live objects - would work unless we deepcopy no storage and retrival which would make things slow. Anyway. You can deepcopy explicitly in the function generating the value to be cached. """ self.initialize() dt = time_expire now = time.time() self.locker.acquire() item = self.storage.get(key, None) if item and f is None: del self.storage[key] if destroyer: destroyer(item[1]) self.stats[self.app]['hit_total'] += 1 self.locker.release() if f is None: return None if item and (dt is None or item[0] > now - dt): return item[1] elif item and (item[0] < now - dt) and destroyer: destroyer(item[1]) value = f() self.locker.acquire() self.storage[key] = (now, value) self.stats[self.app]['misses'] += 1 if HAVE_PSUTIL and self.max_ram_utilization is not None and random.random( ) < 0.10: remove_oldest_entries(self.storage, percentage=self.max_ram_utilization) self.locker.release() return value def increment(self, key, value=1): self.initialize() self.locker.acquire() try: if key in self.storage: value = self.storage[key][1] + value self.storage[key] = (time.time(), value) except BaseException as e: self.locker.release() raise e self.locker.release() return value