def test_verify_json_for_server(self): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) yield self.hs.datastore.store_server_verify_key( "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1) ) json1 = {} signedjson.sign.sign_json(json1, "server9", key1) sentinel_context = LoggingContext.current_context() with LoggingContext("one") as context_one: context_one.request = "one" defer = kr.verify_json_for_server("server9", {}) try: yield defer self.fail("should fail on unsigned json") except SynapseError: pass self.assertIs(LoggingContext.current_context(), context_one) defer = kr.verify_json_for_server("server9", json1) self.assertFalse(defer.called) self.assertIs(LoggingContext.current_context(), sentinel_context) yield defer self.assertIs(LoggingContext.current_context(), context_one)
def setUp(orig): # enable debugging of delayed calls - this means that we get a # traceback when a unit test exits leaving things on the reactor. twisted.internet.base.DelayedCall.debug = True # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: self.fail( "Test starting with non-sentinel logging context %s" % ( LoggingContext.current_context(), ) ) old_level = logging.getLogger().level if level is not None and old_level != level: @around(self) def tearDown(orig): ret = orig() logging.getLogger().setLevel(old_level) return ret logging.getLogger().setLevel(level) return orig()
def test_verify_json_for_server(self): kr = keyring.Keyring(self.hs) key1 = signedjson.key.generate_signing_key(1) yield self.hs.datastore.store_server_verify_key( "server9", "", time.time() * 1000, signedjson.key.get_verify_key(key1), ) json1 = {} signedjson.sign.sign_json(json1, "server9", key1) sentinel_context = LoggingContext.current_context() with LoggingContext("one") as context_one: context_one.request = "one" defer = kr.verify_json_for_server("server9", {}) try: yield defer self.fail("should fail on unsigned json") except SynapseError: pass self.assertIs(LoggingContext.current_context(), context_one) defer = kr.verify_json_for_server("server9", json1) self.assertFalse(defer.called) self.assertIs(LoggingContext.current_context(), sentinel_context) yield defer self.assertIs(LoggingContext.current_context(), context_one)
def setUp(orig): # enable debugging of delayed calls - this means that we get a # traceback when a unit test exits leaving things on the reactor. twisted.internet.base.DelayedCall.debug = True # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: self.fail( "Test starting with non-sentinel logging context %s" % (LoggingContext.current_context(),) ) old_level = logging.getLogger().level if level is not None and old_level != level: @around(self) def tearDown(orig): ret = orig() logging.getLogger().setLevel(old_level) return ret logging.getLogger().setLevel(level) return orig()
def assert_sentinel_context(self): if LoggingContext.current_context() != LoggingContext.sentinel: self.fail( "Expected sentinel context but got %s" % ( LoggingContext.current_context(), ) )
def errback(res, deferred_name): self.assertIs( LoggingContext.current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, LoggingContext.current_context()), ) return res
def check_ctx(r): if LoggingContext.current_context() != start_context: err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", f, start_context, LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) return r
def test_make_deferred_yieldable_with_chained_deferreds(self): sentinel_context = LoggingContext.current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = logcontext.make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def _wait_for_sync_for_user(self, sync_config, since_token, timeout, full_state): context = LoggingContext.current_context() if context: if since_token is None: context.tag = "initial_sync" elif full_state: context.tag = "full_state_sync" else: context.tag = "incremental_sync" if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. result = yield self.current_sync_for_user( sync_config, since_token, full_state=full_state, ) defer.returnValue(result) else: def current_sync_callback(before_token, after_token): return self.current_sync_for_user(sync_config, since_token) result = yield self.notifier.wait_for_events( sync_config.user.to_string(), timeout, current_sync_callback, from_token=since_token, ) defer.returnValue(result)
def get_perspectives(**kwargs): self.assertEquals( LoggingContext.current_context().request, "11", ) with logcontext.PreserveLoggingContext(): yield persp_deferred defer.returnValue(persp_resp)
def wait_for_sync_for_user(self, sync_config, since_token=None, timeout=0, full_state=False): """Get the sync for a client if we have new data for it now. Otherwise wait for new data to arrive on the server. If the timeout expires, then return an empty sync result. Returns: A Deferred SyncResult. """ context = LoggingContext.current_context() if context: if since_token is None: context.tag = "initial_sync" elif full_state: context.tag = "full_state_sync" else: context.tag = "incremental_sync" if timeout == 0 or since_token is None or full_state: # we are going to return immediately, so don't bother calling # notifier.wait_for_events. result = yield self.current_sync_for_user( sync_config, since_token, full_state=full_state, ) defer.returnValue(result) else: def current_sync_callback(before_token, after_token): return self.current_sync_for_user(sync_config, since_token) result = yield self.notifier.wait_for_events( sync_config.user.to_string(), timeout, current_sync_callback, from_token=since_token, ) defer.returnValue(result)
def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) # should have reset to the sentinel context self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) result = yield resolve_d # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) defer.returnValue(result)
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() start_time = time.time() * 1000 after_callbacks = [] def inner_func(conn, *args, **kwargs): with LoggingContext("runInteraction") as context: sql_scheduling_timer.inc_by(time.time() * 1000 - start_time) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() current_context.copy_to(context) return self._new_transaction( conn, desc, after_callbacks, current_context, func, *args, **kwargs ) with PreserveLoggingContext(): result = yield self._db_pool.runWithConnection( inner_func, *args, **kwargs ) for after_callback, after_args in after_callbacks: after_callback(*after_args) defer.returnValue(result)
def stop(self, clock, request): context = LoggingContext.current_context() tag = "" if context: tag = context.tag if context != self.start_context: logger.warn("Context have unexpectedly changed %r, %r", context, self.start_context) return response_count.inc(request.method, self.name, tag) response_timer.inc_by(clock.time_msec() - self.start, request.method, self.name, tag) ru_utime, ru_stime = context.get_resource_usage() response_ru_utime.inc_by(ru_utime, request.method, self.name, tag) response_ru_stime.inc_by(ru_stime, request.method, self.name, tag) response_db_txn_count.inc_by(context.db_txn_count, request.method, self.name, tag) response_db_txn_duration.inc_by(context.db_txn_duration_ms / 1000., request.method, self.name, tag) response_db_sched_duration.inc_by(context.db_sched_duration_ms / 1000., request.method, self.name, tag)
async def testfunc(): self._check_test_key("one") d = Clock(reactor).sleep(0) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) await d self._check_test_key("one")
def finished_processing(self): try: context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count db_txn_duration = context.db_txn_duration except: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration = (0, 0) self.site.access_logger.info( "%s - %s - {%s}" " Processed request: %dms (%dms, %dms) (%dms/%d)" " %sB %s \"%s %s %s\" \"%s\"", self.getClientIP(), self.site.site_tag, self.authenticated_entity, int(time.time() * 1000) - self.start_time, int(ru_utime * 1000), int(ru_stime * 1000), int(db_txn_duration * 1000), int(db_txn_count), self.sentLength, self.code, self.method, self.get_redacted_uri(), self.clientproto, self.get_user_agent(), )
def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() if self.start_context: self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count self.db_txn_duration = self.start_context.db_txn_duration
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None or not self.start_context: return duration = self.clock.time_msec() - self.start block_timer.inc_by(duration, self.name) context = LoggingContext.current_context() if context != self.start_context: logger.warn( "Context have unexpectedly changed from '%s' to '%s'. (%r)", context, self.start_context, self.name) return if not context: logger.warn("Expected context. (%r)", self.name) return ru_utime, ru_stime = context.get_resource_usage() block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name) block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name) block_db_txn_count.inc_by(context.db_txn_count - self.db_txn_count, self.name) block_db_txn_duration.inc_by( context.db_txn_duration - self.db_txn_duration, self.name) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None or not self.start_context: return duration = self.clock.time_msec() - self.start block_timer.inc_by(duration, self.name) context = LoggingContext.current_context() if context != self.start_context: logger.warn( "Context have unexpectedly changed from '%s' to '%s'. (%r)", context, self.start_context, self.name ) return if not context: logger.warn("Expected context. (%r)", self.name) return ru_utime, ru_stime = context.get_resource_usage() block_ru_utime.inc_by(ru_utime - self.ru_utime, self.name) block_ru_stime.inc_by(ru_stime - self.ru_stime, self.name) block_db_txn_count.inc_by( context.db_txn_count - self.db_txn_count, self.name ) block_db_txn_duration.inc_by( context.db_txn_duration - self.db_txn_duration, self.name ) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb)
def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: return duration = self.clock.time() - self.start block_counter.labels(self.name).inc() block_timer.labels(self.name).inc(duration) context = LoggingContext.current_context() if context != self.start_context: logger.warn( "Context has unexpectedly changed from '%s' to '%s'. (%r)", self.start_context, context, self.name) return if not context: logger.warn("Expected context. (%r)", self.name) return usage = context.get_resource_usage() - self.start_usage block_ru_utime.labels(self.name).inc(usage.ru_utime) block_ru_stime.labels(self.name).inc(usage.ru_stime) block_db_txn_count.labels(self.name).inc(usage.db_txn_count) block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc( usage.db_sched_duration_sec) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb)
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() def inner_func(txn, *args, **kwargs): with LoggingContext("runInteraction") as context: current_context.copy_to(context) start = time.clock() * 1000 txn_id = SQLBaseStore._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1) name = "%s-%x" % (desc, txn_id, ) transaction_logger.debug("[TXN START] {%s}", name) try: return func(LoggingTransaction(txn, name), *args, **kwargs) except: logger.exception("[TXN FAIL] {%s}", name) raise finally: end = time.clock() * 1000 transaction_logger.debug( "[TXN END] {%s} %f", name, end - start ) with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( inner_func, *args, **kwargs ) defer.returnValue(result)
def stop(self, clock, request): context = LoggingContext.current_context() tag = "" if context: tag = context.tag if context != self.start_context: logger.warn( "Context have unexpectedly changed %r, %r", context, self.start_context ) return incoming_requests_counter.inc(request.method, self.name, tag) response_timer.inc_by( clock.time_msec() - self.start, request.method, self.name, tag ) ru_utime, ru_stime = context.get_resource_usage() response_ru_utime.inc_by( ru_utime, request.method, self.name, tag ) response_ru_stime.inc_by( ru_stime, request.method, self.name, tag ) response_db_txn_count.inc_by( context.db_txn_count, request.method, self.name, tag ) response_db_txn_duration.inc_by( context.db_txn_duration, request.method, self.name, tag )
def call_later(self, delay, callback): current_context = LoggingContext.current_context() def wrapped_callback(): LoggingContext.thread_local.current_context = current_context callback() return reactor.callLater(delay, wrapped_callback)
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() start_time = time.time() * 1000 after_callbacks = [] def inner_func(conn, *args, **kwargs): with LoggingContext("runInteraction") as context: sql_scheduling_timer.inc_by(time.time() * 1000 - start_time) if self.database_engine.is_connection_closed(conn): logger.debug("Reconnecting closed database connection") conn.reconnect() current_context.copy_to(context) return self._new_transaction( conn, desc, after_callbacks, func, *args, **kwargs ) result = yield preserve_context_over_fn( self._db_pool.runWithConnection, inner_func, *args, **kwargs ) for after_callback, after_args in after_callbacks: after_callback(*after_args) defer.returnValue(result)
def runInteraction(self, desc, func, *args, **kwargs): """Wraps the .runInteraction() method on the underlying db_pool.""" current_context = LoggingContext.current_context() def inner_func(txn, *args, **kwargs): with LoggingContext("runInteraction") as context: current_context.copy_to(context) start = time.clock() * 1000 txn_id = SQLBaseStore._TXN_ID # We don't really need these to be unique, so lets stop it from # growing really large. self._TXN_ID = (self._TXN_ID + 1) % (sys.maxint - 1) name = "%s-%x" % ( desc, txn_id, ) transaction_logger.debug("[TXN START] {%s}", name) try: return func(LoggingTransaction(txn, name), *args, **kwargs) except: logger.exception("[TXN FAIL] {%s}", name) raise finally: end = time.clock() * 1000 transaction_logger.debug("[TXN END] {%s} %f", name, end - start) with PreserveLoggingContext(): result = yield self._db_pool.runInteraction( inner_func, *args, **kwargs) defer.returnValue(result)
def with_logging_context(fn): context = LoggingContext.current_context() def restore_context_callback(x): with PreserveLoggingContext(context): return fn(x) return restore_context_callback
def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() if self.start_context: self.ru_utime, self.ru_stime = self.start_context.get_resource_usage( ) self.db_txn_count = self.start_context.db_txn_count self.db_txn_duration = self.start_context.db_txn_duration
def test_logcontexts_with_async_result(self): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) defer.returnValue("yay") @defer.inlineCallbacks def test(): with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertIs(LoggingContext.current_context(), c1) self.assertEqual(res, "yay") # run the test twice in parallel d = defer.gatherResults([test(), test()]) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) yield d self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
def start(self, time_sec, name, method): self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method self._request_stats = _RequestStats.from_context(self.start_context) _in_flight_requests.add(self)
def with_logging_context(fn): context = LoggingContext.current_context() def restore_context_callback(x): with PreserveLoggingContext(): LoggingContext.thread_local.current_context = context return fn(x) return restore_context_callback
def __enter__(self): self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.start_usage = self.start_context.get_resource_usage()
def _get_events(self, event_ids, check_redacted=True, get_prev_content=False, allow_rejected=False): if not event_ids: defer.returnValue([]) event_id_list = event_ids event_ids = set(event_ids) event_entry_map = self._get_events_from_cache( event_ids, allow_rejected=allow_rejected, ) missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: log_ctx = LoggingContext.current_context() log_ctx.record_event_fetch(len(missing_events_ids)) missing_events = yield self._enqueue_events( missing_events_ids, check_redacted=check_redacted, allow_rejected=allow_rejected, ) event_entry_map.update(missing_events) events = [] for event_id in event_id_list: entry = event_entry_map.get(event_id, None) if not entry: continue if allow_rejected or not entry.event.rejected_reason: if check_redacted and entry.redacted_event: event = entry.redacted_event else: event = entry.event events.append(event) if get_prev_content: if "replaces_state" in event.unsigned: prev = yield self.get_event( event.unsigned["replaces_state"], get_prev_content=False, allow_none=True, ) if prev: event.unsigned = dict(event.unsigned) event.unsigned["prev_content"] = prev.content event.unsigned["prev_sender"] = prev.sender defer.returnValue(events)
def _finished_processing(self): try: context = LoggingContext.current_context() ru_utime, ru_stime = context.get_resource_usage() db_txn_count = context.db_txn_count db_txn_duration_sec = context.db_txn_duration_sec db_sched_duration_sec = context.db_sched_duration_sec evt_db_fetch_count = context.evt_db_fetch_count except Exception: ru_utime, ru_stime = (0, 0) db_txn_count, db_txn_duration_sec = (0, 0) evt_db_fetch_count = 0 end_time = time.time() # need to decode as it could be raw utf-8 bytes # from a IDN servname in an auth header authenticated_entity = self.authenticated_entity if authenticated_entity is not None: authenticated_entity = authenticated_entity.decode("utf-8", "replace") # ...or could be raw utf-8 bytes in the User-Agent header. # N.B. if you don't do this, the logger explodes cryptically # with maximum recursion trying to log errors about # the charset problem. # c.f. https://github.com/matrix-org/synapse/issues/3471 user_agent = self.get_user_agent() if user_agent is not None: user_agent = user_agent.decode("utf-8", "replace") self.site.access_logger.info( "%s - %s - {%s}" " Processed request: %.3fsec (%.3fsec, %.3fsec) (%.3fsec/%.3fsec/%d)" " %sB %s \"%s %s %s\" \"%s\" [%d dbevts]", self.getClientIP(), self.site.site_tag, authenticated_entity, end_time - self.start_time, ru_utime, ru_stime, db_sched_duration_sec, db_txn_duration_sec, int(db_txn_count), self.sentLength, self.code, self.method, self.get_redacted_uri(), self.clientproto, user_agent, evt_db_fetch_count, ) try: self.request_metrics.stop(end_time, self) except Exception as e: logger.warn("Failed to stop metrics: %r", e)
def call_later(self, delay, callback, *args, **kwargs): current_context = LoggingContext.current_context() def wrapped_callback(*args, **kwargs): with PreserveLoggingContext(): LoggingContext.thread_local.current_context = current_context callback(*args, **kwargs) with PreserveLoggingContext(): return reactor.callLater(delay, wrapped_callback, *args, **kwargs)
def call_later(self, delay, callback): current_context = LoggingContext.current_context() def wrapped_callback(): LoggingContext.thread_local.current_context = current_context callback() t = (self.now + delay, wrapped_callback) self.timers.append(t) return t
def test_wait_for_previous_lookups(self): sentinel_context = LoggingContext.current_context() kr = keyring.Keyring(self.hs) lookup_1_deferred = defer.Deferred() lookup_2_deferred = defer.Deferred() with LoggingContext("one") as context_one: context_one.request = "one" wait_1_deferred = kr.wait_for_previous_lookups( ["server1"], {"server1": lookup_1_deferred}, ) # there were no previous lookups, so the deferred should be ready self.assertTrue(wait_1_deferred.called) # ... so we should have preserved the LoggingContext. self.assertIs(LoggingContext.current_context(), context_one) wait_1_deferred.addBoth(self.check_context, "one") with LoggingContext("two") as context_two: context_two.request = "two" # set off another wait. It should block because the first lookup # hasn't yet completed. wait_2_deferred = kr.wait_for_previous_lookups( ["server1"], {"server1": lookup_2_deferred}, ) self.assertFalse(wait_2_deferred.called) # ... so we should have reset the LoggingContext. self.assertIs(LoggingContext.current_context(), sentinel_context) wait_2_deferred.addBoth(self.check_context, "two") # let the first lookup complete (in the sentinel context) lookup_1_deferred.callback(None) # now the second wait should complete and restore our # loggingcontext. yield wait_2_deferred
def __enter__(self): self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.start_usage = self.start_context.get_resource_usage() in_flight.register((self.name, ), self._update_in_flight)
def call_later(self, delay, callback, *args, **kwargs): current_context = LoggingContext.current_context() def wrapped_callback(): LoggingContext.thread_local.current_context = current_context callback(*args, **kwargs) t = [self.now + delay, wrapped_callback, False] self.timers.append(t) return t
def __enter__(self): self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.start_usage = self.start_context.get_resource_usage() in_flight.register((self.name,), self._update_in_flight)
def start(self, time_sec, name, method): self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method # _request_stats records resource usage that we have already added # to the "in flight" metrics. self._request_stats = self.start_context.get_resource_usage() _in_flight_requests.add(self)
def check_logcontext(): if not callback_completed[0]: reactor.callLater(0.01, check_logcontext) return # make sure that the context was reset before it got thrown back # into the reactor try: self.assertIs(LoggingContext.current_context(), sentinel_context) d2.callback(None) except BaseException: d2.errback(twisted.python.failure.Failure())
def test_make_deferred_yieldable(self): # a function which retuns an incomplete deferred, but doesn't follow # the synapse rules. def blocking_function(): d = defer.Deferred() reactor.callLater(0, d.callback, None) return d sentinel_context = LoggingContext.current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = logcontext.make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() if not self.start_context: logger.warn("Entered Measure without log context: %s", self.name) self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count self.db_txn_duration = self.start_context.db_txn_duration
def __enter__(self): self.start = self.clock.time_msec() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.ru_utime, self.ru_stime = self.start_context.get_resource_usage() self.db_txn_count = self.start_context.db_txn_count self.db_txn_duration_ms = self.start_context.db_txn_duration_ms self.db_sched_duration_ms = self.start_context.db_sched_duration_ms
def test_logcontext_is_preserved_on_cancellation(self): blocking_was_cancelled = [False] @defer.inlineCallbacks def blocking(): non_completing_d = Deferred() with logcontext.PreserveLoggingContext(): try: yield non_completing_d except CancelledError: blocking_was_cancelled[0] = True raise with logcontext.LoggingContext("one") as context_one: # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( LoggingContext.current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, LoggingContext.current_context()), ) return res original_deferred = blocking() original_deferred.addErrback(errback, "orig") timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock) self.assertNoResult(timing_out_d) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) timing_out_d.addErrback(errback, "timingout") self.clock.pump((1.0,)) self.assertTrue( blocking_was_cancelled[0], "non-completing deferred was not cancelled" ) self.failureResultOf(timing_out_d, defer.TimeoutError) self.assertIs(LoggingContext.current_context(), context_one)