def errback(res, deferred_name): self.assertIs( current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, current_context()), ) return res
def test_cache(self): class Cls: def __init__(self): self.mock = mock.Mock() @descriptors.cached() def fn(self, arg1, arg2): pass @descriptors.cachedList("fn", "args1") async def list_fn(self, args1, arg2): assert current_context().name == "c1" # we want this to behave like an asynchronous function await run_on_reactor() assert current_context().name == "c1" return self.mock(args1, arg2) with LoggingContext("c1") as c1: obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} # start the lookup off d1 = obj.list_fn([10, 20], 2) self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 self.assertEqual(current_context(), c1) obj.mock.assert_called_once_with((10, 20), 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) obj.mock.assert_called_once_with((30, ), 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() # all the values should now be cached r = yield obj.fn(10, 2) self.assertEqual(r, "fish") r = yield obj.fn(20, 2) self.assertEqual(r, "chips") r = yield obj.fn(30, 2) self.assertEqual(r, "peas") r = yield obj.list_fn([10, 20, 30], 2) obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"}) # we should also be able to use a (single-use) iterable, and should # deduplicate the keys obj.mock.reset_mock() obj.mock.return_value = {40: "gravy"} iterable = (x for x in [10, 40, 40]) r = yield obj.list_fn(iterable, 2) obj.mock.assert_called_once_with((40, ), 2) self.assertEqual(r, {10: "fish", 40: "gravy"})
def test_make_deferred_yieldable_with_chained_deferreds(self): sentinel_context = current_context() with LoggingContext("one"): d1 = make_deferred_yieldable(_chained_deferred_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def check_ctx(r): if current_context() != start_context: for err in changes: print(err, file=sys.stderr) err = "%s completion of %s changed context from %s to %s" % ( "Failure" if isinstance(r, Failure) else "Success", f, start_context, current_context(), ) print(err, file=sys.stderr) raise Exception(err) return r
def activate(self, span, finish_on_close): """ Makes a Span active. Args span (Span): the span that should become active. finish_on_close (Boolean): whether Span should be automatically finished when Scope.close() is called. Returns: Scope to control the end of the active period for *span*. It is a programming error to neglect to call Scope.close() on the returned instance. """ enter_logcontext = False ctx = current_context() if not ctx: # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") return Scope(None, span) elif ctx.scope is not None: # We want the logging scope to look exactly the same so we give it # a blank suffix ctx = nested_logging_context("") enter_logcontext = True scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) ctx.scope = scope return scope
async def second_lookup_fetch(keys_to_fetch): self.assertEquals(current_context().request, "context_12") return { "server10": { get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) } }
def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyJsonRequest]): """ try: ctx = current_context() # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) # Wait for any previous lookups to complete before proceeding. yield self.wait_for_previous_lookups(server_to_request_ids.keys()) # take out a lock on each of the servers by sticking a Deferred in # key_downloads for server_name in server_to_request_ids.keys(): self.key_downloads[server_name] = defer.Deferred() logger.debug("Got key lookup lock on %s", server_name) # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. def drop_server_lock(server_name): d = self.key_downloads.pop(server_name) d.callback(None) def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] server_requests.remove(id(verify_request)) # if there are no more requests for this server, we can drop the lock. if not server_requests: with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name) # ... but not immediately, as that can cause stack explosions if # we get a long queue of lookups. self.clock.call_later(0, drop_server_lock, server_name) return res for verify_request in verify_requests: verify_request.key_ready.addBoth(lookup_done, verify_request) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups")
async def outer(): with LoggingContext("c") as c: try: await delay_cancellation(inner()) self.fail("`CancelledError` was not raised") except CancelledError: self.assertEqual(c, current_context())
def test_preserves_logcontext(self): """Test that logging contexts are preserved.""" blocking_d: "Deferred[None]" = Deferred() async def inner(): await make_deferred_yieldable(blocking_d) async def outer(): with LoggingContext("c") as c: try: await delay_cancellation(inner()) self.fail("`CancelledError` was not raised") except CancelledError: self.assertEqual(c, current_context()) # Succeed with no error, unless the logging context is wrong. # Run and block inside `inner()`. d = defer.ensureDeferred(outer()) self.assertEqual(SENTINEL_CONTEXT, current_context()) d.cancel() # Now unblock. `outer()` will consume the `CancelledError` and check the # logging context. blocking_d.callback(None) self.successResultOf(d)
def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) # should have reset to the sentinel context self.assertIs(current_context(), SENTINEL_CONTEXT) result = yield resolve_d # should have restored our context self.assertIs(current_context(), ctx) return result
def __call__(self, event: dict) -> None: """ Consume a log event and emit it to the parent observer after filtering and adding log context information. Args: event (dict) """ # Filter out some useless events that Twisted outputs if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client") or event[ "log_format"].startswith("Timing out client"): return context = current_context() # Copy the context information to the log event. if context is not None: context.copy_to_twisted_log_entry(event) else: # If there's no logging context, not even the root one, we might be # starting up or it might be from non-Synapse code. Log it as if it # came from the root logger. event["request"] = None event["scope"] = None self.observer(event)
def __call__(self, event: dict) -> None: """ Consume a log event and emit it to the parent observer after filtering and adding log context information. Args: event (dict) """ # Filter out some useless events that Twisted outputs if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client") or event[ "log_format"].startswith("Timing out client"): return context = current_context() # Copy the context information to the log event. context.copy_to_twisted_log_entry(event) self.observer(event)
def fetch(self, path, file_info): """See StorageProvider.fetch""" logcontext = current_context() d = defer.Deferred() self._download_pool.callInThread(swift_download_task, self.container, self.api_kwargs, path, d, logcontext) return make_deferred_yieldable(d)
def do_lookup(): with LoggingContext("c1") as c1: try: d = obj.fn(1) self.assertEqual( current_context(), SENTINEL_CONTEXT, ) yield d self.fail("No exception thrown") except SynapseError: pass self.assertEqual(current_context(), c1) # the cache should now be empty self.assertEqual(len(obj.fn.cache.cache), 0)
def test_cache(self): class Cls: def __init__(self): self.mock = mock.Mock() @descriptors.cached() def fn(self, arg1, arg2): pass @descriptors.cachedList("fn", "args1") async def list_fn(self, args1, arg2): assert current_context().request == "c1" # we want this to behave like an asynchronous function await run_on_reactor() assert current_context().request == "c1" return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 self.assertEqual(current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) obj.mock.assert_called_once_with([30], 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() # all the values should now be cached r = yield obj.fn(10, 2) self.assertEqual(r, "fish") r = yield obj.fn(20, 2) self.assertEqual(r, "chips") r = yield obj.fn(30, 2) self.assertEqual(r, "peas") r = yield obj.list_fn([10, 20, 30], 2) obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
def test_logcontexts_with_async_result(self): @defer.inlineCallbacks def cb(): yield Clock(reactor).sleep(0) return "yay" @defer.inlineCallbacks def test(): with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertIs(current_context(), c1) self.assertEqual(res, "yay") # run the test twice in parallel d = defer.gatherResults([test(), test()]) self.assertIs(current_context(), SENTINEL_CONTEXT) yield d self.assertIs(current_context(), SENTINEL_CONTEXT)
def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" complete_lookup = defer.Deferred() class Cls: @descriptors.cached() def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): with PreserveLoggingContext(): yield complete_lookup return 1 return inner_fn() @defer.inlineCallbacks def do_lookup(): with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) self.assertEqual(current_context(), c1) return r def check_result(r): self.assertEqual(r, 1) obj = Cls() # set off a deferred which will do a cache lookup d1 = do_lookup() self.assertEqual(current_context(), SENTINEL_CONTEXT) d1.addCallback(check_result) # and another d2 = do_lookup() self.assertEqual(current_context(), SENTINEL_CONTEXT) d2.addCallback(check_result) # let the lookup complete complete_lookup.callback(None) return defer.gatherResults([d1, d2])
def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) result = yield defer.ensureDeferred(resolve_d) # should have restored our context self.assertIs(current_context(), ctx) return result
async def first_lookup_fetch(keys_to_fetch): self.assertEquals(current_context().request, "context_11") self.assertEqual(keys_to_fetch, {"server10": {get_key_id(key1): 0}}) await make_deferred_yieldable(first_lookup_deferred) return { "server10": { get_key_id(key1): FetchKeyResult(get_verify_key(key1), 100) } }
def test_make_deferred_yieldable(self): # a function which returns an incomplete deferred, but doesn't follow # the synapse rules. def blocking_function(): d = defer.Deferred() reactor.callLater(0, d.callback, None) return d sentinel_context = current_context() with LoggingContext("one"): d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def __enter__(self): if self._logging_context: raise RuntimeError("Measure() objects cannot be re-used") self.start = self.clock.time() parent_context = current_context() self._logging_context = LoggingContext("Measure[%s]" % (self.name, ), parent_context) self._logging_context.__enter__() in_flight.register((self.name, ), self._update_in_flight)
async def do_lookup(): with LoggingContext("c1") as c1: try: await obj.list_fn([123]) self.fail("No CancelledError thrown") except CancelledError: self.assertEqual( current_context(), c1, "CancelledError was not raised with the correct logcontext", )
def setUp(orig): # if we're not starting in the sentinel logcontext, then to be honest # all future bets are off. if current_context(): self.fail( "Test starting with non-sentinel logging context %s" % (current_context(), )) old_level = logging.getLogger().level if level is not None and old_level != level: @around(self) def tearDown(orig): ret = orig() logging.getLogger().setLevel(old_level) return ret logging.getLogger().setLevel(level) return orig()
def call_later(self, delay, callback, *args, **kwargs): ctx = current_context() def wrapped_callback(): set_current_context(ctx) callback(*args, **kwargs) t = [self.now + delay, wrapped_callback, False] self.timers.append(t) return t
def fetch(self, path, file_info): """See StorageProvider.fetch""" logcontext = current_context() d = defer.Deferred() def _get_file(): s3_download_task(self._get_s3_client(), self.bucket, path, d, logcontext) self._s3_pool.callInThread(_get_file) return make_deferred_yieldable(d)
def check_logcontext(): if not callback_completed[0]: reactor.callLater(0.01, check_logcontext) return # make sure that the context was reset before it got thrown back # into the reactor try: self.assertIs(current_context(), sentinel_context) d2.callback(None) except BaseException: d2.errback(twisted.python.failure.Failure())
def start(self, time_sec, name, method): self.start = time_sec self.start_context = current_context() self.name = name self.method = method # _request_stats records resource usage that we have already added # to the "in flight" metrics. self._request_stats = self.start_context.get_resource_usage() with _in_flight_requests_lock: _in_flight_requests.add(self)
def stop(self, time_sec, response_code, sent_bytes): with _in_flight_requests_lock: _in_flight_requests.discard(self) context = current_context() tag = "" if context: tag = context.tag if context != self.start_context: logger.warning( "Context have unexpectedly changed %r, %r", context, self.start_context, ) return response_code = str(response_code) outgoing_responses_counter.labels(self.method, response_code).inc() response_count.labels(self.method, self.name, tag).inc() response_timer.labels(self.method, self.name, tag, response_code).observe( time_sec - self.start ) resource_usage = context.get_resource_usage() response_ru_utime.labels(self.method, self.name, tag).inc( resource_usage.ru_utime ) response_ru_stime.labels(self.method, self.name, tag).inc( resource_usage.ru_stime ) response_db_txn_count.labels(self.method, self.name, tag).inc( resource_usage.db_txn_count ) response_db_txn_duration.labels(self.method, self.name, tag).inc( resource_usage.db_txn_duration_sec ) response_db_sched_duration.labels(self.method, self.name, tag).inc( resource_usage.db_sched_duration_sec ) response_size.labels(self.method, self.name, tag).inc(sent_bytes) # We always call this at the end to ensure that we update the metrics # regardless of whether a call to /metrics while the request was in # flight. self.update_metrics()
def test_make_deferred_yieldable_with_await(self): # an async function which returns an incomplete coroutine, but doesn't # follow the synapse rules. async def blocking_function(): d = defer.Deferred() reactor.callLater(0, d.callback, None) await d sentinel_context = current_context() with LoggingContext() as context_one: context_one.request = "one" d1 = make_deferred_yieldable(blocking_function()) # make sure that the context was reset by make_deferred_yieldable self.assertIs(current_context(), sentinel_context) yield d1 # now it should be restored self._check_test_key("one")
def test_logcontext_is_preserved_on_cancellation(self): blocking_was_cancelled = [False] @defer.inlineCallbacks def blocking(): non_completing_d = Deferred() with PreserveLoggingContext(): try: yield non_completing_d except CancelledError: blocking_was_cancelled[0] = True raise with LoggingContext("one") as context_one: # the errbacks should be run in the test logcontext def errback(res, deferred_name): self.assertIs( current_context(), context_one, "errback %s run in unexpected logcontext %s" % (deferred_name, current_context()), ) return res original_deferred = blocking() original_deferred.addErrback(errback, "orig") timing_out_d = timeout_deferred(original_deferred, 1.0, self.clock) self.assertNoResult(timing_out_d) self.assertIs(current_context(), SENTINEL_CONTEXT) timing_out_d.addErrback(errback, "timingout") self.clock.pump((1.0,)) self.assertTrue( blocking_was_cancelled[0], "non-completing deferred was not cancelled" ) self.failureResultOf(timing_out_d, defer.TimeoutError) self.assertIs(current_context(), context_one)