def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) self.assertNoResult(resolve_d) # should have reset to the sentinel context self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) result = yield resolve_d # should have restored our context self.assertIs(LoggingContext.current_context(), ctx) return result
def test_log_output(self): """ When a structured logging config is given, Synapse will use it. """ beginner = FakeBeginner() publisher = setup_logging(self.hs, self.hs.config, logBeginner=beginner) # Make a logger and send an event logger = Logger(namespace="tests.logging.test_structured", observer=publisher) with LoggingContext("testcontext", request="somereq"): logger.info("Hello there, {name}!", name="steve") with open(self.homeserver_log, "r") as f: logged_events = [ eventAsText(x, includeTimestamp=False) for x in eventsFromJSONLogFile(f) ] logs = "\n".join(logged_events) self.assertTrue("***** STARTING SERVER *****" in logs) self.assertTrue("Hello there, steve!" in logs)
def _get_events_from_cache_or_db(self, event_ids, allow_rejected=False): """Fetch a bunch of events from the cache or the database. If events are pulled from the database, they will be cached for future lookups. Args: event_ids (Iterable[str]): The event_ids of the events to fetch allow_rejected (bool): Whether to include rejected events Returns: Deferred[Dict[str, _EventCacheEntry]]: map from event id to result """ event_entry_map = self._get_events_from_cache( event_ids, allow_rejected=allow_rejected) missing_events_ids = [e for e in event_ids if e not in event_entry_map] if missing_events_ids: log_ctx = LoggingContext.current_context() log_ctx.record_event_fetch(len(missing_events_ids)) # Note that _get_events_from_db is also responsible for turning db rows # into FrozenEvents (via _get_event_from_row), which involves seeing if # the events have been redacted, and if so pulling the redaction event out # of the database to check it. # missing_events = yield self._get_events_from_db( missing_events_ids, allow_rejected=allow_rejected) event_entry_map.update(missing_events) return event_entry_map
def activate(self, span, finish_on_close): """ Makes a Span active. Args span (Span): the span that should become active. finish_on_close (Boolean): whether Span should be automatically finished when Scope.close() is called. Returns: Scope to control the end of the active period for *span*. It is a programming error to neglect to call Scope.close() on the returned instance. """ enter_logcontext = False ctx = LoggingContext.current_context() if ctx is LoggingContext.sentinel: # We don't want this scope to affect. logger.error("Tried to activate scope outside of loggingcontext") return Scope(None, span) elif ctx.scope is not None: # We want the logging scope to look exactly the same so we give it # a blank suffix ctx = nested_logging_context("") enter_logcontext = True scope = _LogContextScope(self, span, ctx, enter_logcontext, finish_on_close) ctx.scope = scope return scope
def test_collects_logcontext(self): """ Test that log outputs have the attached logging context. """ log_config = {"drains": {}} # Begin the logger with our config beginner = FakeBeginner() publisher = setup_structured_logging(self.hs, self.hs.config, log_config, logBeginner=beginner) logs = [] publisher.addObserver(logs.append) # Make a logger and send an event logger = Logger(namespace="tests.logging.test_structured", observer=beginner.observers[0]) with LoggingContext("testcontext", request="somereq"): logger.info("Hello there, {name}!", name="steve") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["request"], "somereq")
def s3_download_task(s3_client, bucket, key, deferred, parent_logcontext): """Attempts to download a file from S3. Args: s3_client: boto3 s3 client bucket (str): The S3 bucket which may have the file key (str): The key of the file deferred (Deferred[_S3Responder|None]): If file exists resolved with an _S3Responder instance, if it doesn't exist then resolves with None. parent_logcontext (LoggingContext): the logcontext to report logs and metrics against. """ with LoggingContext(parent_context=parent_logcontext): logger.info("Fetching %s from S3", key) try: resp = s3_client.get_object(Bucket=bucket, Key=key) except botocore.exceptions.ClientError as e: if e.response["Error"]["Code"] in ("404", "NoSuchKey",): logger.info("Media %s not found in S3", key) reactor.callFromThread(deferred.callback, None) return reactor.callFromThread(deferred.errback, Failure()) return producer = _S3Responder() reactor.callFromThread(deferred.callback, producer) _stream_to_producer(reactor, producer, resp["Body"], timeout=90.0)
async def outer(): with LoggingContext("c") as c: try: await delay_cancellation(inner()) self.fail("`CancelledError` was not raised") except CancelledError: self.assertEqual(c, current_context())
async def testfunc(): self._check_test_key("one") d = Clock(reactor).sleep(0) self.assertIs(LoggingContext.current_context(), LoggingContext.sentinel) await d self._check_test_key("one")
def test_cant_hide_direct_ancestors(self): """ If you send a message, you must be able to provide the direct prev_events that said event references. """ def post_json(destination, path, data, headers=None, timeout=0): # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} self.http_client.post_json = post_json # Figure out what the most recent event is most_recent = self.successResultOf( maybeDeferred( self.homeserver.get_datastore().get_latest_event_ids_in_room, self.room_id, ) )[0] # Now lie about an event lying_event = FrozenEvent( { "room_id": self.room_id, "sender": "@baduser:test.serv", "event_id": "one:test.serv", "depth": 1000, "origin_server_ts": 1, "type": "m.room.message", "origin": "test.serv", "content": {"body": "hewwo?"}, "auth_events": [], "prev_events": [("two:test.serv", {}), (most_recent, {})], } ) with LoggingContext(request="lying_event"): d = self.handler.on_receive_pdu( "test.serv", lying_event, sent_to_us_directly=True ) # Step the reactor, so the database fetches come back self.reactor.advance(1) # on_receive_pdu should throw an error failure = self.failureResultOf(d) self.assertEqual( failure.value.args[0], ( "ERROR 403: Your server isn't divulging details about prev_events " "referenced in this event." ), ) # Make sure the invalid event isn't there extrem = maybeDeferred( self.homeserver.get_datastore().get_latest_event_ids_in_room, self.room_id ) self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
def _start_key_lookups(self, verify_requests): """Sets off the key fetches for each verify request Once each fetch completes, verify_request.key_ready will be resolved. Args: verify_requests (List[VerifyJsonRequest]): """ try: ctx = LoggingContext.current_context() # map from server name to a set of outstanding request ids server_to_request_ids = {} for verify_request in verify_requests: server_name = verify_request.server_name request_id = id(verify_request) server_to_request_ids.setdefault(server_name, set()).add(request_id) # Wait for any previous lookups to complete before proceeding. yield self.wait_for_previous_lookups(server_to_request_ids.keys()) # take out a lock on each of the servers by sticking a Deferred in # key_downloads for server_name in server_to_request_ids.keys(): self.key_downloads[server_name] = defer.Deferred() logger.debug("Got key lookup lock on %s", server_name) # When we've finished fetching all the keys for a given server_name, # drop the lock by resolving the deferred in key_downloads. def drop_server_lock(server_name): d = self.key_downloads.pop(server_name) d.callback(None) def lookup_done(res, verify_request): server_name = verify_request.server_name server_requests = server_to_request_ids[server_name] server_requests.remove(id(verify_request)) # if there are no more requests for this server, we can drop the lock. if not server_requests: with PreserveLoggingContext(ctx): logger.debug("Releasing key lookup lock on %s", server_name) # ... but not immediately, as that can cause stack explosions if # we get a long queue of lookups. self.clock.call_later(0, drop_server_lock, server_name) return res for verify_request in verify_requests: verify_request.key_ready.addBoth(lookup_done, verify_request) # Actually start fetching keys. self._get_server_verify_keys(verify_requests) except Exception: logger.exception("Error starting key lookups")
def render(self, resrc): # this is called once a Resource has been found to serve the request; in our # case the Resource in question will normally be a JsonResource. # create a LogContext for this request request_id = self.get_request_id() logcontext = self.logcontext = LoggingContext(request_id) logcontext.request = request_id # override the Server header which is set by twisted self.setHeader("Server", self.site.server_version_string) with PreserveLoggingContext(self.logcontext): # we start the request metrics timer here with an initial stab # at the servlet name. For most requests that name will be # JsonResource (or a subclass), and JsonResource._async_render # will update it once it picks a servlet. servlet_name = resrc.__class__.__name__ self._started_processing(servlet_name) Request.render(self, resrc) # record the arrival of the request *after* # dispatching to the handler, so that the handler # can update the servlet name in the request # metrics requests_counter.labels(self.get_method(), self.request_metrics.name).inc()
def first_lookup(): with LoggingContext("11") as context_11: context_11.request = "11" res_deferreds = kr.verify_json_objects_for_server([ ("server10", json1, 0, "test10"), ("server11", {}, 0, "test11") ]) # the unsigned json should be rejected pretty quickly self.assertTrue(res_deferreds[1].called) try: yield res_deferreds[1] self.assertFalse("unsigned json didn't cause a failure") except SynapseError: pass self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) yield make_deferred_yieldable(res_deferreds[0]) # let verify_json_objects_for_server finish its work before we kill the # logcontext yield self.clock.sleep(0)
def __call__(self, event: dict) -> None: """ Consume a log event and emit it to the parent observer after filtering and adding log context information. Args: event (dict) """ # Filter out some useless events that Twisted outputs if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client") or event[ "log_format"].startswith("Timing out client"): return context = LoggingContext.current_context() # Copy the context information to the log event. if context is not None: context.copy_to_twisted_log_entry(event) else: # If there's no logging context, not even the root one, we might be # starting up or it might be from non-Synapse code. Log it as if it # came from the root logger. event["request"] = None event["scope"] = None self.observer(event)
def run_in_context(f, *args, **kwargs): with LoggingContext("testctx") as ctx: # we set the "request" prop to make it easier to follow what's going on in the # logs. ctx.request = "testctx" rv = yield f(*args, **kwargs) return rv
def test_cache(self): class Cls(object): def __init__(self): self.mock = mock.Mock() @descriptors.cached() def fn(self, arg1, arg2): pass @descriptors.cachedList("fn", "args1", inlineCallbacks=True) def list_fn(self, args1, arg2): assert LoggingContext.current_context().request == "c1" # we want this to behave like an asynchronous function yield run_on_reactor() assert LoggingContext.current_context().request == "c1" return self.mock(args1, arg2) with LoggingContext() as c1: c1.request = "c1" obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} d1 = obj.list_fn([10, 20], 2) self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) r = yield d1 self.assertEqual(LoggingContext.current_context(), c1) obj.mock.assert_called_once_with([10, 20], 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) obj.mock.assert_called_once_with([30], 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() # all the values should now be cached r = yield obj.fn(10, 2) self.assertEqual(r, "fish") r = yield obj.fn(20, 2) self.assertEqual(r, "chips") r = yield obj.fn(30, 2) self.assertEqual(r, "peas") r = yield obj.list_fn([10, 20, 30], 2) obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"})
def test_with_request_context(self): """ Information from the logging context request should be added to the JSON response. """ handler = logging.StreamHandler(self.output) handler.setFormatter(JsonFormatter()) handler.addFilter(LoggingContextFilter()) logger = self.get_logger(handler) # A full request isn't needed here. site = Mock(spec=["site_tag", "server_version_string", "getResourceFor"]) site.site_tag = "test-site" site.server_version_string = "Server v1" site.reactor = Mock() request = SynapseRequest(FakeChannel(site, None), site) # Call requestReceived to finish instantiating the object. request.content = BytesIO() # Partially skip some of the internal processing of SynapseRequest. request._started_processing = Mock() request.request_metrics = Mock(spec=["name"]) with patch.object(Request, "render"): request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1") # Also set the requester to ensure the processing works. request.requester = "@foo:test" with LoggingContext( request.get_request_id(), parent_context=request.logcontext ): logger.info("Hello there, %s!", "wally") log = self.get_log_line() # The terse logger includes additional request information, if possible. expected_log_keys = [ "log", "level", "namespace", "request", "ip_address", "site_tag", "requester", "authenticated_entity", "method", "url", "protocol", "user_agent", ] self.assertCountEqual(log.keys(), expected_log_keys) self.assertEqual(log["log"], "Hello there, wally!") self.assertTrue(log["request"].startswith("POST-")) self.assertEqual(log["ip_address"], "127.0.0.1") self.assertEqual(log["site_tag"], "test-site") self.assertEqual(log["requester"], "@foo:test") self.assertEqual(log["authenticated_entity"], "@foo:test") self.assertEqual(log["method"], "POST") self.assertEqual(log["url"], "/_matrix/client/versions") self.assertEqual(log["protocol"], "1.1") self.assertEqual(log["user_agent"], "")
def test_cant_hide_direct_ancestors(self): """ If you send a message, you must be able to provide the direct prev_events that said event references. """ async def post_json(destination, path, data, headers=None, timeout=0): # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} self.http_client.post_json = post_json # Figure out what the most recent event is most_recent = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id))[0] # Now lie about an event lying_event = make_event_from_dict({ "room_id": self.room_id, "sender": "@baduser:test.serv", "event_id": "one:test.serv", "depth": 1000, "origin_server_ts": 1, "type": "m.room.message", "origin": "test.serv", "content": { "body": "hewwo?" }, "auth_events": [], "prev_events": [("two:test.serv", {}), (most_recent, {})], }) federation_event_handler = self.homeserver.get_federation_event_handler( ) with LoggingContext("test-context"): failure = self.get_failure( federation_event_handler.on_receive_pdu( "test.serv", lying_event), FederationError, ) # on_receive_pdu should throw an error self.assertEqual( failure.value.args[0], ("ERROR 403: Your server isn't divulging details about prev_events " "referenced in this event."), ) # Make sure the invalid event isn't there extrem = self.get_success( self.store.get_latest_event_ids_in_room(self.room_id)) self.assertEqual(extrem[0], "$join:test.serv")
def test_simple(self): with LoggingContext(name="test") as ctx: res = self.get_success( self.store.have_seen_events("room1", [self.event_ids[0], "event19"])) self.assertEqual(res, {self.event_ids[0]}) # that should result in a single db query self.assertEqual(ctx.get_resource_usage().db_txn_count, 1) # a second lookup of the same events should cause no queries with LoggingContext(name="test") as ctx: res = self.get_success( self.store.have_seen_events("room1", [self.event_ids[0], "event19"])) self.assertEqual(res, {self.event_ids[0]}) self.assertEqual(ctx.get_resource_usage().db_txn_count, 0)
def test_simple(self): """Test that we cache events that we pull from the DB.""" with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) # We should have fetched the event from the DB self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1)
def _store_file(): with LoggingContext(parent_context=parent_logcontext): self._get_s3_client().upload_file( Filename=os.path.join(self.cache_directory, path), Bucket=self.bucket, Key=path, ExtraArgs={"StorageClass": self.storage_class}, )
def __init__(self, clock, name): self.clock = clock self.name = name curr_context = current_context() if not curr_context: logger.warning( "Starting metrics collection %r from sentinel context: metrics will be lost", name, ) parent_context = None else: assert isinstance(curr_context, LoggingContext) parent_context = curr_context self._logging_context = LoggingContext( "Measure[%s]" % (self.name,), parent_context ) self.start = None
def test_cache_logcontexts(self): """Check that logcontexts are set and restored correctly when using the cache.""" complete_lookup = defer.Deferred() class Cls(object): @descriptors.cached() def fn(self, arg1): @defer.inlineCallbacks def inner_fn(): with PreserveLoggingContext(): yield complete_lookup return 1 return inner_fn() @defer.inlineCallbacks def do_lookup(): with LoggingContext() as c1: c1.name = "c1" r = yield obj.fn(1) self.assertEqual(LoggingContext.current_context(), c1) return r def check_result(r): self.assertEqual(r, 1) obj = Cls() # set off a deferred which will do a cache lookup d1 = do_lookup() self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d1.addCallback(check_result) # and another d2 = do_lookup() self.assertEqual(LoggingContext.current_context(), LoggingContext.sentinel) d2.addCallback(check_result) # let the lookup complete complete_lookup.callback(None) return defer.gatherResults([d1, d2])
def func(i, sleep=False): with LoggingContext("func(%s)" % i) as lc: with (yield linearizer.queue("")): self.assertEqual(current_context(), lc) if sleep: yield Clock(reactor).sleep(0) self.assertEqual(current_context(), lc)
def test_backfill_with_many_backward_extremities(self): """ Check that we can backfill with many backward extremities. The goal is to make sure that when we only use a portion of backwards extremities(the magic number is more than 5), no errors are thrown. Regression test, see #11027 """ # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") requester = create_requester(user_id) room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) ev1 = self.helper.send(room_id, "first message", tok=tok) # Create "many" backward extremities. The magic number we're trying to # create more than is 5 which corresponds to the number of backward # extremities we slice off in `_maybe_backfill_inner` for _ in range(0, 8): event_handler = self.hs.get_event_creation_handler() event, context = self.get_success( event_handler.create_event( requester, { "type": "m.room.message", "content": { "msgtype": "m.text", "body": "message connected to fake event", }, "room_id": room_id, "sender": user_id, }, prev_event_ids=[ ev1["event_id"], # We're creating an backward extremity each time thanks # to this fake event generate_fake_event_id(), ], ) ) self.get_success( event_handler.handle_new_client_event(requester, event, context) ) current_depth = 1 limit = 100 with LoggingContext("receive_pdu"): # Make sure backfill still works d = run_in_background( self.hs.get_federation_handler().maybe_backfill, room_id, current_depth, limit, ) self.get_success(d)
def stopProducing(self): """See IPushProducer.stopProducing""" # The consumer wants no more data ever, signal _S3DownloadThread self.stop_event.set() self.wakeup_event.set() if not self.deferred.called: with LoggingContext(): self.deferred.errback( Exception("Consumer ask to stop producing"))
def test_cache(self): class Cls: def __init__(self): self.mock = mock.Mock() @descriptors.cached() def fn(self, arg1, arg2): pass @descriptors.cachedList("fn", "args1") async def list_fn(self, args1, arg2): assert current_context().name == "c1" # we want this to behave like an asynchronous function await run_on_reactor() assert current_context().name == "c1" return self.mock(args1, arg2) with LoggingContext("c1") as c1: obj = Cls() obj.mock.return_value = {10: "fish", 20: "chips"} # start the lookup off d1 = obj.list_fn([10, 20], 2) self.assertEqual(current_context(), SENTINEL_CONTEXT) r = yield d1 self.assertEqual(current_context(), c1) obj.mock.assert_called_once_with((10, 20), 2) self.assertEqual(r, {10: "fish", 20: "chips"}) obj.mock.reset_mock() # a call with different params should call the mock again obj.mock.return_value = {30: "peas"} r = yield obj.list_fn([20, 30], 2) obj.mock.assert_called_once_with((30, ), 2) self.assertEqual(r, {20: "chips", 30: "peas"}) obj.mock.reset_mock() # all the values should now be cached r = yield obj.fn(10, 2) self.assertEqual(r, "fish") r = yield obj.fn(20, 2) self.assertEqual(r, "chips") r = yield obj.fn(30, 2) self.assertEqual(r, "peas") r = yield obj.list_fn([10, 20, 30], 2) obj.mock.assert_not_called() self.assertEqual(r, {10: "fish", 20: "chips", 30: "peas"}) # we should also be able to use a (single-use) iterable, and should # deduplicate the keys obj.mock.reset_mock() obj.mock.return_value = {40: "gravy"} iterable = (x for x in [10, 40, 40]) r = yield obj.list_fn(iterable, 2) obj.mock.assert_called_once_with((40, ), 2) self.assertEqual(r, {10: "fish", 40: "gravy"})
def do_lookup(): with LoggingContext("one") as ctx: resolve_d = resolver.resolve_service(service_name) result = yield defer.ensureDeferred(resolve_d) # should have restored our context self.assertIs(current_context(), ctx) return result
def test_query_via_event_cache(self): # fetch an event into the event cache self.get_success(self.store.get_event("event10")) # looking it up should now cause no db hits with LoggingContext(name="test") as ctx: res = self.get_success( self.store.have_seen_events("room1", ["event10"])) self.assertEquals(res, {"event10"}) self.assertEquals(ctx.get_resource_usage().db_txn_count, 0)
async def second_lookup(): with LoggingContext("context_12", request=FakeRequest("context_12")): res_deferreds_2 = kr.verify_json_objects_for_server([ ("server10", json1, 0, "test") ]) res_deferreds_2[0].addBoth(self.check_context, None) second_lookup_state[0] = 1 await make_deferred_yieldable(res_deferreds_2[0]) second_lookup_state[0] = 2
def test_rejected_state_event_state(self): """ Check that we store the state group correctly for rejected state events. Regression test for #6289. """ OTHER_SERVER = "otherserver" OTHER_USER = "******" + OTHER_SERVER # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) room_version = self.get_success(self.store.get_room_version(room_id)) # pretend that another server has joined join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) # check the state group sg = self.successResultOf( self.store._get_state_group_for_event(join_event.event_id)) # build and send an event which will be rejected ev = event_from_pdu_json( { "type": "org.matrix.test", "state_key": "test_key", "content": {}, "room_id": room_id, "sender": "@yetanotheruser:"******"depth": join_event["depth"] + 1, "prev_events": [join_event.event_id], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, room_version, ) with LoggingContext("send_rejected"): d = run_in_background( self.hs.get_federation_event_handler().on_receive_pdu, OTHER_SERVER, ev) self.get_success(d) # that should have been rejected e = self.get_success( self.store.get_event(ev.event_id, allow_rejected=True)) self.assertIsNotNone(e.rejected_reason) # ... and the state group should be the same as before sg2 = self.successResultOf( self.store._get_state_group_for_event(ev.event_id)) self.assertEqual(sg, sg2)