def __getitem__(self, item): try: out = IterableUserDict.__getitem__(self, item) self.last_key = item self.last_value = out return out except KeyError as e: if self.last_key is None or self.cmp(item, self.last_key): return self.default else: return self.last_value
def __getitem__(self, key): useEnv = self.useEnv if key == "MAT_PKG_HOME": useEnv = False if useEnv and os.environ.has_key(key): v = os.environ[key] if (not self.sourceAndVerboseStatus.has_key(key)) or \ (self.sourceAndVerboseStatus[key][0] != "<shell environment>"): self.sourceAndVerboseStatus[key] = [ "<shell environment>", False ] else: v = IterableUserDict.__getitem__(self, key) # If we've gotten this far, getitem hasn't thrown KeyError, which means # that the key HAS to have an entry in sourceAndVerboseStatus. # But let's check. if not self.sourceAndVerboseStatus.has_key(key): self.sourceAndVerboseStatus[key] = ["<unknown source>", False] if self.verboseConfig and (not self.sourceAndVerboseStatus[key][1]): self.sourceAndVerboseStatus[key][1] = True print >> sys.stderr, "[Read value of config var %s from %s]" % ( key, self.sourceAndVerboseStatus[key][0]) return v
def __getitem__(self, pkg_id): """Retrieve an ISessionData >>> sdc = PersistentSessionDataContainer() >>> sdc.timeout = 60 >>> sdc.resolution = 3 >>> sdc['clientid'] = sd = SessionData() To ensure stale data is removed, we can wind back the clock using undocumented means... >>> sd.lastAccessTime = sd.lastAccessTime - 64 >>> sdc._v_last_sweep = sdc._v_last_sweep - 4 Now the data should be garbage collected >>> sdc['clientid'] Traceback (most recent call last): [...] KeyError: 'clientid' Ensure lastAccessTime on the ISessionData is being updated occasionally. The ISessionDataContainer maintains this whenever the ISessionData is set or retrieved. lastAccessTime on the ISessionData is set when it is added to the ISessionDataContainer >>> sdc['client_id'] = sd = SessionData() >>> sd.lastAccessTime > 0 True lastAccessTime is also updated whenever the ISessionData is retrieved through the ISessionDataContainer, at most once every 'resolution' seconds. >>> then = sd.lastAccessTime = sd.lastAccessTime - 4 >>> now = sdc['client_id'].lastAccessTime >>> now > then True >>> time.sleep(1) >>> now == sdc['client_id'].lastAccessTime True Ensure lastAccessTime is not modified and no garbage collection occurs when timeout == 0. We test this by faking a stale ISessionData object. >>> sdc.timeout = 0 >>> sd.lastAccessTime = sd.lastAccessTime - 5000 >>> lastAccessTime = sd.lastAccessTime >>> sdc['client_id'].lastAccessTime == lastAccessTime True Next, we test session expiration functionality beyond transactions. >>> import transaction >>> from ZODB.DB import DB >>> from ZODB.DemoStorage import DemoStorage >>> sdc = PersistentSessionDataContainer() >>> sdc.timeout = 60 >>> sdc.resolution = 3 >>> db = DB(DemoStorage('test_storage')) >>> c = db.open() >>> c.root()['sdc'] = sdc >>> sdc['pkg_id'] = sd = SessionData() >>> sd['name'] = 'bob' >>> transaction.commit() Access immediately. the data should be accessible. >>> c.root()['sdc']['pkg_id']['name'] 'bob' Change the clock time and stale the session data. >>> sdc = c.root()['sdc'] >>> sd = sdc['pkg_id'] >>> sd.lastAccessTime = sd.lastAccessTime - 64 >>> sdc._v_last_sweep = sdc._v_last_sweep - 4 >>> transaction.commit() The data should be garbage collected. >>> c.root()['sdc']['pkg_id']['name'] Traceback (most recent call last): [...] KeyError: 'pkg_id' Then abort transaction and access the same data again. The previous GC was cancelled, but deadline is over. The data should be garbage collected again. >>> transaction.abort() >>> c.root()['sdc']['pkg_id']['name'] Traceback (most recent call last): [...] KeyError: 'pkg_id' """ if self.timeout == 0: return IterableUserDict.__getitem__(self, pkg_id) now = time.time() # TODO: When scheduler exists, sweeping should be done by # a scheduled job since we are currently busy handling a # request and may end up doing simultaneous sweeps # If transaction is aborted after sweep. _v_last_sweep keep # incorrect sweep time. So when self.data is ghost, revert the time # to the previous _v_last_sweep time(_v_old_sweep). if self.data._p_state < 0: try: self._v_last_sweep = self._v_old_sweep del self._v_old_sweep except AttributeError: pass if self._v_last_sweep + self.resolution < now: self.sweep() if getattr(self, '_v_old_sweep', None) is None: self._v_old_sweep = self._v_last_sweep self._v_last_sweep = now rv = IterableUserDict.__getitem__(self, pkg_id) # Only update lastAccessTime once every few minutes, rather than # every hit, to avoid ZODB bloat and conflicts if rv.lastAccessTime + self.resolution < now: rv.lastAccessTime = int(now) return rv
def __getitem__(self, spoke): try: return IterableUserDict.__getitem__(self, spoke) except KeyError: return self._default
def __getitem__(self, pkg_id): """Retrieve an `zope.session.interfaces.ISessionData` >>> sdc = PersistentSessionDataContainer() >>> sdc.timeout = 60 >>> sdc.resolution = 3 >>> sdc['clientid'] = sd = SessionData() To ensure stale data is removed, we can wind back the clock using undocumented means... >>> sd.setLastAccessTime(sd.getLastAccessTime() - 64) >>> sdc._v_last_sweep = sdc._v_last_sweep - 4 Now the data should be garbage collected >>> sdc['clientid'] Traceback (most recent call last): [...] KeyError: 'clientid' Can you disable the automatic removal of stale data. >>> sdc.disable_implicit_sweeps = True >>> sdc['stale'] = stale = SessionData() Now we try the same method of winding back the clock. >>> stale.setLastAccessTime(sd.getLastAccessTime() - 64) >>> sdc._v_last_sweep = sdc._v_last_sweep - 4 But the data is not automatically removed. >>> sdc['stale'] is stale True We can manually remove stale data by calling sweep() if stale data isn't being automatically removed. >>> stale.setLastAccessTime(sd.getLastAccessTime() - 64) >>> sdc.sweep() >>> sdc['stale'] Traceback (most recent call last): [...] KeyError: 'stale' Now we turn automatic removal back on. >>> sdc.disable_implicit_sweeps = False Ensure the ``lastAccessTime`` on the `.ISessionData` is being updated occasionally. The `.ISessionDataContainer` maintains this whenever the `.ISessionData` is set or retrieved. ``lastAccessTime`` on the ``ISessionData`` is set when it is added to the ``ISessionDataContainer`` >>> sdc['client_id'] = sd = SessionData() >>> sd.getLastAccessTime() > 0 True The ``lastAccessTime`` is also updated whenever the ``ISessionData`` is retrieved through the ``ISessionDataContainer``, at most once every ``resolution`` seconds. >>> then = sd.getLastAccessTime() - 4 >>> sd.setLastAccessTime(then) >>> now = sdc['client_id'].getLastAccessTime() >>> now > then True >>> time.sleep(1) >>> now == sdc['client_id'].getLastAccessTime() True Ensure the ``lastAccessTime`` is not modified and no garbage collection occurs when timeout == 0. We test this by faking a stale ``ISessionData`` object. >>> sdc.timeout = 0 >>> sd.setLastAccessTime(sd.getLastAccessTime() - 5000) >>> lastAccessTime = sd.getLastAccessTime() >>> sdc['client_id'].getLastAccessTime() == lastAccessTime True Next, we test session expiration functionality beyond transactions. >>> import transaction >>> from ZODB.DB import DB >>> from ZODB.DemoStorage import DemoStorage >>> sdc = PersistentSessionDataContainer() >>> sdc.timeout = 60 >>> sdc.resolution = 3 >>> db = DB(DemoStorage('test_storage')) >>> c = db.open() >>> c.root()['sdc'] = sdc >>> sdc['pkg_id'] = sd = SessionData() >>> sd['name'] = 'bob' >>> transaction.commit() Access immediately. the data should be accessible. >>> c.root()['sdc']['pkg_id']['name'] 'bob' Change the clock time and stale the session data. >>> sdc = c.root()['sdc'] >>> sd = sdc['pkg_id'] >>> sd.setLastAccessTime(sd.getLastAccessTime() - 64) >>> sdc._v_last_sweep = sdc._v_last_sweep - 4 >>> transaction.commit() The data should be garbage collected. >>> c.root()['sdc']['pkg_id']['name'] Traceback (most recent call last): [...] KeyError: 'pkg_id' Then abort transaction and access the same data again. The previous GC was cancelled, but deadline is over. The data should be garbage collected again. >>> transaction.abort() >>> c.root()['sdc']['pkg_id']['name'] Traceback (most recent call last): [...] KeyError: 'pkg_id' Cleanup: >>> transaction.abort() >>> c.close() """ if self.timeout == 0: return UserDict.__getitem__(self, pkg_id) now = time.time() # TODO: When scheduler exists, sweeping should be done by # a scheduled job since we are currently busy handling a # request and may end up doing simultaneous sweeps # If transaction is aborted after sweep. _v_last_sweep keep # incorrect sweep time. So when self.data is ghost, revert the time # to the previous _v_last_sweep time(_v_old_sweep). if self.data._p_state < 0: try: self._v_last_sweep = self._v_old_sweep del self._v_old_sweep except AttributeError: pass if (self._v_last_sweep + self.resolution < now and not self.disable_implicit_sweeps): self.sweep() if getattr(self, '_v_old_sweep', None) is None: self._v_old_sweep = self._v_last_sweep self._v_last_sweep = now rv = UserDict.__getitem__(self, pkg_id) # Only update the lastAccessTime once every few minutes, rather than # every hit, to avoid ZODB bloat and conflicts if rv.getLastAccessTime() + self.resolution < now: rv.setLastAccessTime(int(now)) return rv