def setUp(orig): # enable debugging of delayed calls - this means that we get a # traceback when a unit test exits leaving things on the reactor. twisted.internet.base.DelayedCall.debug = True # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: self.fail( "Test starting with non-sentinel logging context %s" % (LoggingContext.current_context(),) ) old_level = logging.getLogger().level if level is not None and old_level != level: @around(self) def tearDown(orig): ret = orig() logging.getLogger().setLevel(old_level) return ret logging.getLogger().setLevel(level) return orig()
def errback(res, deferred_name): self.assertIs( LoggingContext.current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, LoggingContext.current_context()), ) return res
def func(i, sleep=False): with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): self.assertEqual(LoggingContext.current_context(), lc) if sleep: yield Clock(reactor).sleep(0) self.assertEqual(LoggingContext.current_context(), lc)
def check_ctx(r): if LoggingContext.current_context() != start_context: err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", f, start_context, LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) return r
def test_make_deferred_yieldable_with_chained_deferreds(self): sentinel_context = LoggingContext.current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def do_lookup(): with LoggingContext() as c1: c1.name = "c1" try: d = obj.fn(1) self.assertEqual( LoggingContext.current_context(), LoggingContext.sentinel ) yield d self.fail("No exception thrown") except SynapseError: pass self.assertEqual(LoggingContext.current_context(), c1)
def do_lookup(): with LoggingContext() as c1: c1.name = "c1" try: d = obj.fn(1) self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) yield d self.fail("No exception thrown") except SynapseError: pass self.assertEqual(LoggingContext.current_context(), c1) # the cache should now be empty self.assertEqual(len(obj.fn.cache.cache), 0)
def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) # should have reset to the sentinel context self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) result = yield resolve_d # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) return result
async def testfunc(): self._check_test_key("one") d = Clock(reactor).sleep(0) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) await d self._check_test_key("one")
def __call__(self, event: dict) -> None: """ Consume a log event and emit it to the parent observer after filtering and adding log context information. Args: event (dict) """ # Filter out some useless events that Twisted outputs if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client") or event[ "log_format"].startswith("Timing out client"): return context = LoggingContext.current_context() # Copy the context information to the log event. if context is not None: context.copy_to_twisted_log_entry(event) else: # If there's no logging context, not even the root one, we might be # starting up or it might be from non-Synapse code. Log it as if it # came from the root logger. event["request"] = None event["scope"] = None self.observer(event)
def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): """Fetch a bunch of events from the cache or the database. If events are pulled from the database, they will be cached for future lookups. Args: event_ids (Iterable[str]): The event_ids of the events to fetch allow_rejected (bool): Whether to include rejected events Returns: Deferred[Dict[str, _EventCacheEntry]]: map from event id to result """ event_entry_map = self._get_events_from_cache( event_ids, allow_rejected=allow_rejected) missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: log_ctx = LoggingContext.current_context() log_ctx.record_event_fetch(len(missing_events_ids)) # Note that _get_events_from_db is also responsible for turning db rows # into FrozenEvents (via _get_event_from_row), which involves seeing if # the events have been redacted, and if so pulling the redaction event out # of the database to check it. # missing_events = yield self._get_events_from_db( missing_events_ids, allow_rejected=allow_rejected) event_entry_map.update(missing_events) return event_entry_map
def activate(self, span, finish_on_close): """ Makes a Span active. Args span (Span): the span that should become active. finish_on_close (Boolean): whether Span should be automatically finished when Scope.close() is called. Returns: Scope to control the end of the active period for *span*. It is a programming error to neglect to call Scope.close() on the returned instance. """ enter_logcontext = False ctx = LoggingContext.current_context() if ctx is LoggingContext.sentinel: # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") return Scope(None, span) elif ctx.scope is not None: # We want the logging scope to look exactly the same so we give it # a blank suffix ctx = nested_logging_context("") enter_logcontext = True scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) ctx.scope = scope return scope
def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyJsonRequest]): """ try: ctx = LoggingContext.current_context() # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) # Wait for any previous lookups to complete before proceeding. yield self.wait_for_previous_lookups(server_to_request_ids.keys()) # take out a lock on each of the servers by sticking a Deferred in # key_downloads for server_name in server_to_request_ids.keys(): self.key_downloads[server_name] = defer.Deferred() logger.debug("Got key lookup lock on %s", server_name) # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. def drop_server_lock(server_name): d = self.key_downloads.pop(server_name) d.callback(None) def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] server_requests.remove(id(verify_request)) # if there are no more requests for this server, we can drop the lock. if not server_requests: with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name) # ... but not immediately, as that can cause stack explosions if # we get a long queue of lookups. self.clock.call_later(0, drop_server_lock, server_name) return res for verify_request in verify_requests: verify_request.key_ready.addBoth(lookup_done, verify_request) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups")
def test_cache(self): class Cls(object): def __init__(self): self.mock = mock.Mock() @descriptors.cached() def fn(self, arg1, arg2): pass @descriptors.cachedList("fn", "args1", inlineCallbacks=True) def list_fn(self, args1, arg2): assert LoggingContext.current_context().request == "c1" # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) r = yield d1 self.assertEqual(LoggingContext.current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) obj.mock.assert_called_once_with([30], 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() # all the values should now be cached r = yield obj.fn(10, 2) self.assertEqual(r, "fish") r = yield obj.fn(20, 2) self.assertEqual(r, "chips") r = yield obj.fn(30, 2) self.assertEqual(r, "peas") r = yield obj.list_fn([10, 20, 30], 2) obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" complete_lookup = defer.Deferred() class Cls(object): @descriptors.cached() def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): with PreserveLoggingContext(): yield complete_lookup return 1 return inner_fn() @defer.inlineCallbacks def do_lookup(): with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) return r def check_result(r): self.assertEqual(r, 1) obj = Cls() # set off a deferred which will do a cache lookup d1 = do_lookup() self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d1.addCallback(check_result) # and another d2 = do_lookup() self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d2.addCallback(check_result) # let the lookup complete complete_lookup.callback(None) return defer.gatherResults([d1, d2])
def __enter__(self): if self._logging_context: raise RuntimeError("Measure() objects cannot be re-used") self.start = self.clock.time() parent_context = LoggingContext.current_context() self._logging_context = LoggingContext("Measure[%s]" % (self.name, ), parent_context) self._logging_context.__enter__() in_flight.register((self.name, ), self._update_in_flight)
def test_logcontexts_with_async_result(self): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) defer.returnValue("yay") @defer.inlineCallbacks def test(): with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertIs(LoggingContext.current_context(), c1) self.assertEqual(res, "yay") # run the test twice in parallel d = defer.gatherResults([test(), test()]) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) yield d self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel)
def setUp(orig): # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if LoggingContext.current_context() is not LoggingContext.sentinel: self.fail( "Test starting with non-sentinel logging context %s" % (LoggingContext.current_context(), )) old_level = logging.getLogger().level if level is not None and old_level != level: @around(self) def tearDown(orig): ret = orig() logging.getLogger().setLevel(old_level) return ret logging.getLogger().setLevel(level) return orig()
def call_later(self, delay, callback, *args, **kwargs): current_context = LoggingContext.current_context() def wrapped_callback(): LoggingContext.thread_local.current_context = current_context callback(*args, **kwargs) t = [self.now + delay, wrapped_callback, False] self.timers.append(t) return t
def __enter__(self): self.start = self.clock.time() self.start_context = LoggingContext.current_context() if not self.start_context: self.start_context = LoggingContext("Measure") self.start_context.__enter__() self.created_context = True self.start_usage = self.start_context.get_resource_usage() in_flight.register((self.name, ), self._update_in_flight)
def check_logcontext(): if not callback_completed[0]: reactor.callLater(0.01, check_logcontext) return # make sure that the context was reset before it got thrown back # into the reactor try: self.assertIs(LoggingContext.current_context(), sentinel_context) d2.callback(None) except BaseException: d2.errback(twisted.python.failure.Failure())
def test_make_deferred_yieldable(self): # a function which retuns an incomplete deferred, but doesn't follow # the synapse rules. def blocking_function(): d = defer.Deferred() reactor.callLater(0, d.callback, None) return d sentinel_context = LoggingContext.current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(LoggingContext.current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def start(self, time_sec, name, method): self.start = time_sec self.start_context = LoggingContext.current_context() self.name = name self.method = method # _request_stats records resource usage that we have already added # to the "in flight" metrics. self._request_stats = self.start_context.get_resource_usage() with _in_flight_requests_lock: _in_flight_requests.add(self)
def test_logcontext_is_preserved_on_cancellation(self): blocking_was_cancelled = [False] @defer.inlineCallbacks def blocking(): non_completing_d = Deferred() with PreserveLoggingContext(): try: yield non_completing_d except CancelledError: blocking_was_cancelled[0] = True raise with LoggingContext("one") as context_one: # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( LoggingContext.current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, LoggingContext.current_context()), ) return res original_deferred = blocking() original_deferred.addErrback(errback, "orig") timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock) self.assertNoResult(timing_out_d) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) timing_out_d.addErrback(errback, "timingout") self.clock.pump((1.0, )) self.assertTrue(blocking_was_cancelled[0], "non-completing deferred was not cancelled") self.failureResultOf(timing_out_d, defer.TimeoutError) self.assertIs(LoggingContext.current_context(), context_one)
def wrapped(*args, **kwargs): start_context = LoggingContext.current_context() try: res = orig(*args, **kwargs) except Exception: if LoggingContext.current_context() != start_context: err = "%s changed context from %s to %s on exception" % ( f, start_context, LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) raise if not isinstance(res, Deferred) or res.called: if LoggingContext.current_context() != start_context: err = "%s changed context from %s to %s" % ( f, start_context, LoggingContext.current_context(), ) # print the error to stderr because otherwise all we # see in travis-ci is the 500 error print(err, file=sys.stderr) raise Exception(err) return res if LoggingContext.current_context() != LoggingContext.sentinel: err = ( "%s returned incomplete deferred in non-sentinel context " "%s (start was %s)") % ( f, LoggingContext.current_context(), start_context) print(err, file=sys.stderr) raise Exception(err) def check_ctx(r): if LoggingContext.current_context() != start_context: err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", f, start_context, LoggingContext.current_context(), ) print(err, file=sys.stderr) raise Exception(err) return r res.addBoth(check_ctx) return res
def test_does_not_cache_failures(self): """Checks that, if the callback returns a failure, it is called again for the next request. """ called = [False] def cb(): if called[0]: # return a valid result the second time return defer.succeed(self.mock_http_response) called[0] = True return defer.fail(Exception("boo")) with LoggingContext("test") as test_context: try: yield self.cache.fetch_or_execute(self.mock_key, cb) except Exception as e: self.assertEqual(e.args[0], "boo") self.assertIs(LoggingContext.current_context(), test_context) res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertEqual(res, self.mock_http_response) self.assertIs(LoggingContext.current_context(), test_context)
def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) context = LoggingContext.current_context() tag = "" if context: tag = context.tag if context != self.start_context: logger.warn( "Context have unexpectedly changed %r, %r", context, self.start_context, ) return response_code = str(response_code) outgoing_responses_counter.labels(self.method, response_code).inc() response_count.labels(self.method, self.name, tag).inc() response_timer.labels(self.method, self.name, tag, response_code).observe(time_sec - self.start) resource_usage = context.get_resource_usage() response_ru_utime.labels(self.method, self.name, tag).inc(resource_usage.ru_utime) response_ru_stime.labels(self.method, self.name, tag).inc(resource_usage.ru_stime) response_db_txn_count.labels(self.method, self.name, tag).inc(resource_usage.db_txn_count) response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec) response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec) response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in # flight. self.update_metrics()
def active(self): """ Returns the currently active Scope which can be used to access the currently active Scope.span. If there is a non-null Scope, its wrapped Span becomes an implicit parent of any newly-created Span at Tracer.start_active_span() time. Return: (Scope) : the Scope that is active, or None if not available. """ ctx = LoggingContext.current_context() if ctx is LoggingContext.sentinel: return None else: return ctx.scope
def __exit__(self, exc_type, exc_val, exc_tb): if isinstance(exc_type, Exception) or not self.start_context: return in_flight.unregister((self.name, ), self._update_in_flight) duration = self.clock.time() - self.start block_counter.labels(self.name).inc() block_timer.labels(self.name).inc(duration) context = LoggingContext.current_context() if context != self.start_context: logger.warn( "Context has unexpectedly changed from '%s' to '%s'. (%r)", self.start_context, context, self.name, ) return if not context: logger.warn("Expected context. (%r)", self.name) return current = context.get_resource_usage() usage = current - self.start_usage try: block_ru_utime.labels(self.name).inc(usage.ru_utime) block_ru_stime.labels(self.name).inc(usage.ru_stime) block_db_txn_count.labels(self.name).inc(usage.db_txn_count) block_db_txn_duration.labels(self.name).inc( usage.db_txn_duration_sec) block_db_sched_duration.labels(self.name).inc( usage.db_sched_duration_sec) except ValueError: logger.warn("Failed to save metrics! OLD: %r, NEW: %r", self.start_usage, current) if self.created_context: self.start_context.__exit__(exc_type, exc_val, exc_tb)
def wait_for_previous_lookups(self, server_to_deferred): """Waits for any previous key lookups for the given servers to finish. Args: server_to_deferred (dict[str, Deferred]): server_name to deferred which gets resolved once we've finished looking up keys for that server. The Deferreds should be regular twisted ones which call their callbacks with no logcontext. Returns: a Deferred which resolves once all key lookups for the given servers have completed. Follows the synapse rules of logcontext preservation. """ loop_count = 1 while True: wait_on = [(server_name, self.key_downloads[server_name]) for server_name in server_to_deferred.keys() if server_name in self.key_downloads] if not wait_on: break logger.info( "Waiting for existing lookups for %s to complete [loop %i]", [w[0] for w in wait_on], loop_count, ) with PreserveLoggingContext(): yield defer.DeferredList((w[1] for w in wait_on)) loop_count += 1 ctx = LoggingContext.current_context() def rm(r, server_name_): with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name_) self.key_downloads.pop(server_name_, None) return r for server_name, deferred in server_to_deferred.items(): logger.debug("Got key lookup lock on %s", server_name) self.key_downloads[server_name] = deferred deferred.addBoth(rm, server_name)