def __init__(self, default_ttl=None, expire_interval=60, touch_on_access=False, max_items=None, recent_items=RECENT_SIZE): """ @param default_ttl: time to live when using __setitem__ rather than set. @param expire_interval: time between removals of expired items in seconds. Otherwise, expired items are removed lazily. @param touch_on_access: refresh item expire time by ttl when item is accessed. @param max_items: maximum size of cache. (see replacement algorithm above) """ self._exp = IndexedMultiMap( ) # expiration times. Multiple items can have the same expiration # times, but there can only be one instance of any one key # in the CacheMap. self._data = {} self._ttl = default_ttl self._touch = touch_on_access self._max_items = max_items self._expire_interval = expire_interval if max_items is not None: self._recent = _BoundedCacheSet(int(min(recent_items, max_items))) else: self._recent = None reactor.callLater(self._expire_interval, self._expire)
def test_injectLogger2(): injectLogger(log_file = "your.log", verbose=False, capture_output=True) print "hello world" def foo(): reactor.stop() zuul = dana reactor.callLater(0, foo)
def test_injectLogger2(): injectLogger(log_file="your.log", verbose=False, capture_output=True) print "hello world" def foo(): reactor.stop() zuul = dana reactor.callLater(0, foo)
def test_rate_limited_logger(): injectLogger(verbose = True) log = RateLimitedLogger(logging.getLogger("myapp"), 1,1) log.info( "should be printed." ) log.info( "should not be printed" ) # but should log "discard" message. log.info( "also should not be printed" ) # should not logging of discard message. df = Deferred() reactor.callLater(3, df.callback, True) like_yield(df) log.info( "should also be printed" ) reactor.stop()
def test_injectLogger(): injectLogger(log_file = "your.log", use_syslog=False, verbose=True) logger = logging.getLogger("myapp") logger.warning("You are awesome") print 'stdout!' print >>sys.stderr, 'stderr!' from twisted.internet import reactor from twisted.python import failure def foo(): reactor.stop() zuul = dana reactor.callLater(0, foo)
def sendQuery(self): if not (self.factory.queries or self.current_queries): assert not self.timeout_call self.timeout_call = reactor.callLater(self.timeout, self.transport.loseConnection) return if self.timeout_call and self.timeout_call.active(): self.timeout_call.cancel() self.timeout_call = None if not self.factory.queries: return query = self.factory.queries.pop(0) if pipeline_debug: self.log('sending', query.method) self.current_queries.append(query) self.sendCommand('POST', query.path) self.sendHeader('User-Agent', 'BTL/EBRPC 1.0') self.sendHeader('Host', query.host) self.sendHeader('Accept-encoding', 'gzip') self.sendHeader('Connection', 'Keep-Alive') self.sendHeader('Content-type', 'application/octet-stream') self.sendHeader('Content-length', str(len(query.payload))) #if query.user: # auth = '%s:%s' % (query.user, query.password) # auth = auth.encode('base64').strip() # self.sendHeader('Authorization', 'Basic %s' % (auth,)) self.endHeaders() self.transport.write(query.payload)
def __init__(self, logger, rate_limit, max_burst, log_all_level=CRITICAL): """@param logger: logging.Logger object that this class wraps. @param rate_limit: maximum number of log entries per second. @param max_burst: maximum number of log entries that can be printed in a burst. max_burst is the sigma in a (sigma,rho) token bucket. @param log_all_level: log all entries with level >= log_all_level. Such entries are still counted against the rate limit. """ self.logger = logger self.rate_limit = rate_limit self.max_burst = max_burst self.logged_discard = False # logged that we are dropping entries? self.tokens = self.max_burst self.log_all_above_level = log_all_level reactor.callLater(1, self._increment_tokens) reactor.callLater(5, self._log_clear)
def timeout_yield(orig_df, timeout = None ): """like_yield with a timeout. Pased timeout is in seconds. If timeout is None then uses the default default_yield_timeout. If the function f eventually completes (i.e., its deferred gets called) after having already timed out then the result is tossed. timeout is set to None rather than default_yield_timeout so that the default can be changed after import timeout_yield by changing default_yield_timeout. WARNING: It is left to the caller to free up any state that might be held by the hung deferred. """ assert isinstance(orig_df, defer.Deferred) df = defer.Deferred() if timeout is None: timeout = default_yield_timeout t = reactor.callLater(timeout, defer.timeout, df) def good(r): if t.active(): df.callback(r) def bad(r): if t.active(): df.errback(r) orig_df.addCallbacks(good, bad) try: r = like_yield(df) finally: if t.active(): t.cancel() return r
def __init__(self, logger, rate_limit, max_burst, log_all_level = CRITICAL ): """@param logger: logging.Logger object that this class wraps. @param rate_limit: maximum number of log entries per second. @param max_burst: maximum number of log entries that can be printed in a burst. max_burst is the sigma in a (sigma,rho) token bucket. @param log_all_level: log all entries with level >= log_all_level. Such entries are still counted against the rate limit. """ self.logger = logger self.rate_limit = rate_limit self.max_burst = max_burst self.logged_discard = False # logged that we are dropping entries? self.tokens = self.max_burst self.log_all_above_level = log_all_level reactor.callLater(1,self._increment_tokens) reactor.callLater(5,self._log_clear)
def timeout_yield(orig_df, timeout=None): """like_yield with a timeout. Pased timeout is in seconds. If timeout is None then uses the default default_yield_timeout. If the function f eventually completes (i.e., its deferred gets called) after having already timed out then the result is tossed. timeout is set to None rather than default_yield_timeout so that the default can be changed after import timeout_yield by changing default_yield_timeout. WARNING: It is left to the caller to free up any state that might be held by the hung deferred. """ assert isinstance(orig_df, defer.Deferred) df = defer.Deferred() if timeout is None: timeout = default_yield_timeout t = reactor.callLater(timeout, defer.timeout, df) def good(r): if t.active(): df.callback(r) def bad(r): if t.active(): df.errback(r) orig_df.addCallbacks(good, bad) try: r = like_yield(df) finally: if t.active(): t.cancel() return r
def listen_forever(self, doneflag=None): """Main event processing loop for RawServer. RawServer listens until the doneFlag is set by some other thread. The doneFlag tells all threads to clean-up and then exit.""" if not doneflag: doneflag = DeferredEvent() assert isinstance(doneflag, DeferredEvent) self.doneflag = doneflag if not self.associated: self.associate_thread() if self.listened: Exception( _("listen_forever() should only be called once per reactor.")) if main_thread == thread.get_ident() and not self.sigint_installed: self.install_sigint_handler() if is_iocpreactor and main_thread == thread.get_ident(): def pulse(): self.add_task(1, pulse) pulse() reactor.callLater(0, self.doneflag.addCallback, self._safestop) self.listened = True reactor.suggestThreadPoolSize(3) if profile: self.prof.enable() if noSignals: reactor.run(installSignalHandlers=False) else: reactor.run() if profile: self.prof.disable() st = Stats(self.prof.getstats()) st.sort() f = open(prof_file_name, 'wb') st.dump(file=f)
def listen_forever(self, doneflag=None): """Main event processing loop for RawServer. RawServer listens until the doneFlag is set by some other thread. The doneFlag tells all threads to clean-up and then exit.""" if not doneflag: doneflag = DeferredEvent() assert isinstance(doneflag, DeferredEvent) self.doneflag = doneflag if not self.associated: self.associate_thread() if self.listened: Exception(_("listen_forever() should only be called once per reactor.")) if main_thread == thread.get_ident() and not self.sigint_installed: self.install_sigint_handler() if is_iocpreactor and main_thread == thread.get_ident(): def pulse(): self.add_task(1, pulse) pulse() reactor.callLater(0, self.doneflag.addCallback, self._safestop) self.listened = True reactor.suggestThreadPoolSize(3) if profile: self.prof.enable() if noSignals: reactor.run(installSignalHandlers=False) else: reactor.run() if profile: self.prof.disable() st = Stats(self.prof.getstats()) st.sort() f = open(prof_file_name, 'wb') st.dump(file=f)
def __init__(self, default_ttl = None, expire_interval = 60, touch_on_access = False, max_items = None, recent_items = RECENT_SIZE ): """ @param default_ttl: time to live when using __setitem__ rather than set. @param expire_interval: time between removals of expired items in seconds. Otherwise, expired items are removed lazily. @param touch_on_access: refresh item expire time by ttl when item is accessed. @param max_items: maximum size of cache. (see replacement algorithm above) """ self._exp = IndexedMultiMap() # expiration times. Multiple items can have the same expiration # times, but there can only be one instance of any one key # in the CacheMap. self._data = {} self._ttl = default_ttl self._touch = touch_on_access self._max_items = max_items self._expire_interval = expire_interval if max_items is not None: self._recent = _BoundedCacheSet(int(min(recent_items,max_items))) else: self._recent = None reactor.callLater(self._expire_interval, self._expire)
assert 10 not in c ### # test expirations and for memory leaks. # Watch memory consumption (e.g., using top) and see if it grows. if LEAK_TEST: c = CacheMap(default_ttl=TTL, expire_interval=EPSILON) i = 0 while True: for x in xrange(100): i += 1 if i % 20 == 0: print len(c) c[i] = K() if i % 5 == 0: try: l = len(c) del c[i] assert len(c) == l - 1 except KeyError: pass # allow time for expirations. df = Deferred() reactor.callLater(TTL + EPSILON, df.callback, None) yield df df.getResult() reactor.callLater(0, run) reactor.run()
def connectionLost(self, reason): reactor.callLater(0, self.post_connectionLost, reason)
def _increment_tokens(self): self.tokens += self.rate_limit if self.tokens >= self.max_burst: self.tokens = self.max_burst reactor.callLater(1, self._increment_tokens)
def done(r): print 'client got:', r assert r == [1002] reactor.callLater(0.5, reactor.stop)
def wait(n): df = Deferred() reactor.callLater(n, df.callback, 0) return df
def _expire(self): self._expire2() reactor.callLater(self._expire_interval, self._expire)
injectLogger(log_file = "your.log", use_syslog=False, verbose=True) logger = logging.getLogger("myapp") logger.warning("You are awesome") print 'stdout!' print >>sys.stderr, 'stderr!' from twisted.internet import reactor from twisted.python import failure def foo(): reactor.stop() zuul = dana reactor.callLater(0, foo) def test_injectLogger2(): injectLogger(log_file = "your.log", verbose=False, capture_output=True) print "hello world" def foo(): reactor.stop() zuul = dana reactor.callLater(0, foo) #test_injectLogger() test_injectLogger2() #reactor.callLater(0, test_rate_limited_logger) reactor.run()
### # test expirations and for memory leaks. # Watch memory consumption (e.g., using top) and see if it grows. if LEAK_TEST: c = CacheMap(default_ttl=TTL,expire_interval=EPSILON) i = 0 while True: for x in xrange(100): i += 1 if i % 20 == 0: print len(c) c[i] = K() if i % 5 == 0: try: l = len(c) del c[i] assert len(c) == l-1 except KeyError: pass # allow time for expirations. df = Deferred() reactor.callLater(TTL+EPSILON,df.callback,None) yield df df.getResult() reactor.callLater(0,run) reactor.run()
def _run(): TTL = 1 SET_TTL = 2 # TTL used when explicitly setting TTL using "def set." EXPIRE_INTERVAL = .3 EPSILON = .5 ### # BoundedCacheSet correctness tests. c = _BoundedCacheSet(2) c.add(10) assert 10 in c c.add(15) assert 15 in c c.add(16) assert 16 in c assert 10 not in c assert 15 in c c.remove(15) assert 15 not in c try: c.remove(23) assert False except KeyError: pass ### # basic CacheMap correctness tests. c = CacheMap(default_ttl=TTL,expire_interval=EPSILON) class K(object): def __init__(self): self.x = range(10000) class V(object): def __init__(self): self.x = range(10000) k = K() v = V() t = time() c.set(k, v, SET_TTL) assert len(c) == 1 assert c.num_unexpired() == 1 assert c._exp.begin().key() < t + SET_TTL + EPSILON and \ c._exp.begin().key() > t + SET_TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + SET_TTL which is %s, but the expiration time is %s." \ % (t+SET_TTL, c._exp.begin().key()) assert c.has_key(k) assert not c.has_key( "blah" ) assert c[k] == v c._expire2() # should not expire anything because little time has passed. assert len(c) == 1 assert c.num_unexpired() == 1 try: y = c[10] assert False, "should've raised KeyError." except KeyError: pass v2 = V() c[k] = v2 assert c._exp.begin().key() < t + SET_TTL + EPSILON and \ c._exp.begin().key() > t + SET_TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + SET_TTL, but the expiration time is %s." % c._exp.begin().key() assert not c[k] == v assert c[k] == v2 assert len(c) == 1 assert c.num_unexpired() == 1 k2 = K() t = time() c[k2] = v2 assert c._exp.begin().key() < t + TTL + EPSILON and \ c._exp.begin().key() > t + TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + TTL, but the expiration time is %s." % c._exp.begin().key() assert c[k2] == v2 assert not c[k] == v # shouldn't be a problem with two items having the same value. assert len(c) == 2 assert c.num_unexpired() == 2 # wait long enough for the cache entries to expire. df = Deferred() reactor.callLater(SET_TTL+EPSILON, df.callback, None) yield df df.getResult() assert c.num_unexpired() == 0, "Should have expired all entries, but there are %d " \ "unexpired items and %d items in c._data. " % (c.num_unexpired(), len(c._data)) assert len(c) == 0 assert len(c._exp) == 0 assert len(c._data) == 0 assert k not in c assert k2 not in c # basic correctness of bounded-size cache map. c = CacheMap(default_ttl=TTL,expire_interval=1000,max_items = 2) c[k] = v assert len(c) == 1 assert c[k] == v c[k2] = v2 assert len(c) == 2 assert c[k2] == v2 c[10] = 15 assert len(c) == 2 assert c[10] == 15 assert c[k2] == v2 # order from most recent access is now [(k2,v2), (10,15), (k,v)]. try: a = c[k] assert False, "when cache with size bound of 2 exceeded 2 elements, " \ "the oldest should've been removed." except KeyError: pass c[56] = 1 # order from most recent access ... assert len(c) == 2 assert 56 in c assert 10 not in c ### # test expirations and for memory leaks. # Watch memory consumption (e.g., using top) and see if it grows. if LEAK_TEST: c = CacheMap(default_ttl=TTL,expire_interval=EPSILON) i = 0 while True: for x in xrange(100): i += 1 if i % 20 == 0: print len(c) c[i] = K() if i % 5 == 0: try: l = len(c) del c[i] assert len(c) == l-1 except KeyError: pass # allow time for expirations. df = Deferred() reactor.callLater(TTL+EPSILON,df.callback,None) yield df df.getResult()
def _run(): TTL = 1 SET_TTL = 2 # TTL used when explicitly setting TTL using "def set." EXPIRE_INTERVAL = .3 EPSILON = .5 ### # BoundedCacheSet correctness tests. c = _BoundedCacheSet(2) c.add(10) assert 10 in c c.add(15) assert 15 in c c.add(16) assert 16 in c assert 10 not in c assert 15 in c c.remove(15) assert 15 not in c try: c.remove(23) assert False except KeyError: pass ### # basic CacheMap correctness tests. c = CacheMap(default_ttl=TTL, expire_interval=EPSILON) class K(object): def __init__(self): self.x = range(10000) class V(object): def __init__(self): self.x = range(10000) k = K() v = V() t = time() c.set(k, v, SET_TTL) assert len(c) == 1 assert c.num_unexpired() == 1 assert c._exp.begin().key() < t + SET_TTL + EPSILON and \ c._exp.begin().key() > t + SET_TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + SET_TTL which is %s, but the expiration time is %s." \ % (t+SET_TTL, c._exp.begin().key()) assert c.has_key(k) assert not c.has_key("blah") assert c[k] == v c._expire2( ) # should not expire anything because little time has passed. assert len(c) == 1 assert c.num_unexpired() == 1 try: y = c[10] assert False, "should've raised KeyError." except KeyError: pass v2 = V() c[k] = v2 assert c._exp.begin().key() < t + SET_TTL + EPSILON and \ c._exp.begin().key() > t + SET_TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + SET_TTL, but the expiration time is %s." % c._exp.begin().key() assert not c[k] == v assert c[k] == v2 assert len(c) == 1 assert c.num_unexpired() == 1 k2 = K() t = time() c[k2] = v2 assert c._exp.begin().key() < t + TTL + EPSILON and \ c._exp.begin().key() > t + TTL - EPSILON, \ "First item in c._exp should have expiration time that is close to the " \ "current time + TTL, but the expiration time is %s." % c._exp.begin().key() assert c[k2] == v2 assert not c[ k] == v # shouldn't be a problem with two items having the same value. assert len(c) == 2 assert c.num_unexpired() == 2 # wait long enough for the cache entries to expire. df = Deferred() reactor.callLater(SET_TTL + EPSILON, df.callback, None) yield df df.getResult() assert c.num_unexpired() == 0, "Should have expired all entries, but there are %d " \ "unexpired items and %d items in c._data. " % (c.num_unexpired(), len(c._data)) assert len(c) == 0 assert len(c._exp) == 0 assert len(c._data) == 0 assert k not in c assert k2 not in c # basic correctness of bounded-size cache map. c = CacheMap(default_ttl=TTL, expire_interval=1000, max_items=2) c[k] = v assert len(c) == 1 assert c[k] == v c[k2] = v2 assert len(c) == 2 assert c[k2] == v2 c[10] = 15 assert len(c) == 2 assert c[10] == 15 assert c[ k2] == v2 # order from most recent access is now [(k2,v2), (10,15), (k,v)]. try: a = c[k] assert False, "when cache with size bound of 2 exceeded 2 elements, " \ "the oldest should've been removed." except KeyError: pass c[56] = 1 # order from most recent access ... assert len(c) == 2 assert 56 in c assert 10 not in c ### # test expirations and for memory leaks. # Watch memory consumption (e.g., using top) and see if it grows. if LEAK_TEST: c = CacheMap(default_ttl=TTL, expire_interval=EPSILON) i = 0 while True: for x in xrange(100): i += 1 if i % 20 == 0: print len(c) c[i] = K() if i % 5 == 0: try: l = len(c) del c[i] assert len(c) == l - 1 except KeyError: pass # allow time for expirations. df = Deferred() reactor.callLater(TTL + EPSILON, df.callback, None) yield df df.getResult()