def dbm_cache_store(srv, dbmfile, filename, mtime, val): dbm_type = dbm_cache_type(dbmfile) # NOTE: acquiring a lock for the dbm file (also applies to dbm_cache_get) # See http://issues.apache.org/jira/browse/MODPYTHON-69 # In mod_python versions < 3.2 "pspcache" was used as the lock key. # ie. _apache._global_lock(srv, key, index) # Assuming there are 32 mutexes (the default in 3.1.x), "pspcache" # will hash to one of 31 mutexes (index 0 is reserved). Therefore # there is a 1 in 31 chance for a hash collision if a session is # used in the same request, which would result in a deadlock. This # has been confirmed by testing. # We can avoid this by using index 0 and setting the key to None. # Lock index 0 is also used by DbmSession for locking it's dbm file, # but since the lock is not held for the duration of the request there # should not be any additional deadlock issues. Likewise, the lock # here is only held for a short time, so it will not interfere # with DbmSession file locking. _apache._global_lock(srv, None, 0) try: dbm = dbm_type.open(dbmfile, 'c') dbm[filename] = "%d %s" % (mtime, code2str(val)) finally: try: dbm.close() except: pass _apache._global_unlock(srv, None, 0)
def lock_file(self): # self._lock = 1 indicates that session locking is turned on, # so let BaseSession handle it. # Otherwise, explicitly acquire a lock for the file manipulation. if not self._locked: _apache._global_lock(self._req.server, self._sid) self._locked = 1
def dbm_cleanup(data): filename, server = data _apache._global_lock(server, None, 0) db = dbm.open(filename, 'c') try: old = [] s = db.first() while 1: key, val = s dict = loads(val) try: if (time.time() - dict["_accessed"]) > dict["_timeout"]: old.append(key) except KeyError: old.append(key) try: s = next(db) except KeyError: break for key in old: try: del db[key] except: pass finally: db.close() _apache._global_unlock(server, None, 0)
def do_save(self, dict): _apache._global_lock(self._req.server, None, 0) dbm = self._get_dbm() try: dbm[self._sid.encode()] = dumps(dict) finally: dbm.close() _apache._global_unlock(self._req.server, None, 0)
def do_delete(self): _apache._global_lock(self._req.server, None, 0) dbm = self._get_dbm() try: try: del dbm[self._sid.encode()] except KeyError: pass finally: dbm.close() _apache._global_unlock(self._req.server, None, 0)
def global_lock(req): import _apache _apache._global_lock(req.server, 1) time.sleep(1) _apache._global_unlock(req.server, 1) req.write("test ok") return apache.OK
def do_load(self): _apache._global_lock(self._req.server, None, 0) dbm = self._get_dbm() try: if self._sid.encode() in dbm: return loads(dbm[self._sid.encode()]) else: return None finally: dbm.close() _apache._global_unlock(self._req.server, None, 0)
def do_load(self): _apache._global_lock(self._req.server, None, 0) dbm = self._get_dbm() try: if dbm.has_key(self._sid): return cPickle.loads(dbm[self._sid]) else: return None finally: dbm.close() _apache._global_unlock(self._req.server, None, 0)
def dbm_cache_get(srv, dbmfile, filename, mtime): dbm_type = dbm_cache_type(dbmfile) _apache._global_lock(srv, None, 0) try: dbm = dbm_type.open(dbmfile, 'c') try: entry = dbm[filename] t, val = entry.split(" ", 1) if long(t) == mtime: return str2code(val) except KeyError: return None finally: try: dbm.close() except: pass _apache._global_unlock(srv, None, 0)
def lock(self): if self._lock: _apache._global_lock(self._req.server, self._sid) self._locked = 1 self._req.register_cleanup(unlock_session_cleanup, self)