def test_nested_logging_context(self): with LoggingContext(request="foo"): nested_context = logcontext.nested_logging_context(suffix="bar") self.assertEqual(nested_context.request, "foo-bar")
def competing_callback(): with LoggingContext() as competing_context: competing_context.request = "competing" yield clock.sleep(0) self._check_test_key("competing")
async def run(): with LoggingContext("command"): _base.start(ss, []) await args.func(ss, args)
def test_with_context(self): with LoggingContext() as context_one: context_one.request = "test" self._check_test_key("test")
def in_thread(): # Uncomment to enable tracing of log context changes. # sys.settrace(logcontext_tracer) with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) reactor.run()
def in_thread(): with LoggingContext("run"): change_resource_limit(hs.config.soft_file_limit) reactor.run()
def test_cant_hide_direct_ancestors(self): """ If you send a message, you must be able to provide the direct prev_events that said event references. """ def post_json(destination, path, data, headers=None, timeout=0): # If it asks us for new missing events, give them NOTHING if path.startswith("/_matrix/federation/v1/get_missing_events/"): return {"events": []} self.http_client.post_json = post_json # Figure out what the most recent event is most_recent = self.successResultOf( maybeDeferred( self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id))[0] # Now lie about an event lying_event = FrozenEvent({ "room_id": self.room_id, "sender": "@baduser:test.serv", "event_id": "one:test.serv", "depth": 1000, "origin_server_ts": 1, "type": "m.room.message", "origin": "test.serv", "content": "hewwo?", "auth_events": [], "prev_events": [("two:test.serv", {}), (most_recent, {})], }) with LoggingContext(request="lying_event"): d = self.handler.on_receive_pdu("test.serv", lying_event, sent_to_us_directly=True) # Step the reactor, so the database fetches come back self.reactor.advance(1) # on_receive_pdu should throw an error failure = self.failureResultOf(d) self.assertEqual( failure.value.args[0], ("ERROR 403: Your server isn't divulging details about prev_events " "referenced in this event."), ) # Make sure the invalid event isn't there extrem = maybeDeferred( self.homeserver.datastore.get_latest_event_ids_in_room, self.room_id) self.assertEqual(self.successResultOf(extrem)[0], "$join:test.serv")
def _async_render_with_logging_context(self, request): request_id = "%s-%s" % (request.method, JsonResource._request_id) JsonResource._request_id += 1 with LoggingContext(request_id) as request_context: request_context.request = request_id yield self._async_render(request)
def wrapped_request_handler(self, request): global _next_request_id request_id = "%s-%s" % (request.method, _next_request_id) _next_request_id += 1 with LoggingContext(request_id) as request_context: with Measure(self.clock, "wrapped_request_handler"): request_metrics = RequestMetrics() request_metrics.start(self.clock, name=self.__class__.__name__) request_context.request = request_id with request.processing(): try: with PreserveLoggingContext(request_context): if include_metrics: yield request_handler(self, request, request_metrics) else: yield request_handler(self, request) except CodeMessageException as e: code = e.code if isinstance(e, SynapseError): logger.info("%s SynapseError: %s - %s", request, code, e.msg) else: logger.exception(e) outgoing_responses_counter.inc(request.method, str(code)) respond_with_json( request, code, cs_exception(e), send_cors=True, pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) except Exception: # failure.Failure() fishes the original Failure out # of our stack, and thus gives us a sensible stack # trace. f = failure.Failure() logger.error( "Failed handle request %s.%s on %r: %r: %s", request_handler.__module__, request_handler.__name__, self, request, f.getTraceback().rstrip(), ) respond_with_json( request, 500, { "error": "Internal server error", "errcode": Codes.UNKNOWN, }, send_cors=True, pretty_print=_request_user_agent_is_curl(request), version_string=self.version_string, ) finally: try: request_metrics.stop(self.clock, request) except Exception as e: logger.warn("Failed to stop metrics: %r", e)
def run(): with LoggingContext("run"): change_resource_limit(ps.config.soft_file_limit) if ps.config.gc_thresholds: gc.set_threshold(*ps.config.gc_thresholds) reactor.run()
def main(): with LoggingContext("main"): setup()
def run(): with LoggingContext("run"): reactor.run()
def run(_reactor): with LoggingContext("command"): yield _base.start(ss, []) yield args.func(ss, args)
def main(): with LoggingContext("main"): # check base requirements check_requirements() hs = setup(sys.argv[1:]) run(hs)
def test(): with LoggingContext("c") as c1: res = yield self.cache.fetch_or_execute(self.mock_key, cb) self.assertIs(LoggingContext.current_context(), c1) self.assertEqual(res, "yay")
def main() -> None: with LoggingContext("main"): start(sys.argv[1:])
def test_verify_json_objects_for_server_awaits_previous_requests(self): key1 = signedjson.key.generate_signing_key(1) kr = keyring.Keyring(self.hs) json1 = {} signedjson.sign.sign_json(json1, "server10", key1) persp_resp = { "server_keys": [ self.mock_perspective_server.get_signed_key( "server10", signedjson.key.get_verify_key(key1)), ] } persp_deferred = defer.Deferred() @defer.inlineCallbacks def get_perspectives(**kwargs): self.assertEquals( LoggingContext.current_context().test_key, "11", ) with logcontext.PreserveLoggingContext(): yield persp_deferred defer.returnValue(persp_resp) self.http_client.post_json.side_effect = get_perspectives with LoggingContext("11") as context_11: context_11.test_key = "11" # start off a first set of lookups res_deferreds = kr.verify_json_objects_for_server([ ("server10", json1), ("server11", {}) ]) # the unsigned json should be rejected pretty quickly self.assertTrue(res_deferreds[1].called) try: yield res_deferreds[1] self.assertFalse("unsigned json didn't cause a failure") except SynapseError: pass self.assertFalse(res_deferreds[0].called) res_deferreds[0].addBoth(self.check_context, None) # wait a tick for it to send the request to the perspectives server # (it first tries the datastore) yield async .sleep(0.005) self.http_client.post_json.assert_called_once() self.assertIs(LoggingContext.current_context(), context_11) context_12 = LoggingContext("12") context_12.test_key = "12" with logcontext.PreserveLoggingContext(context_12): # a second request for a server with outstanding requests # should block rather than start a second call self.http_client.post_json.reset_mock() self.http_client.post_json.return_value = defer.Deferred() res_deferreds_2 = kr.verify_json_objects_for_server( [("server10", json1)], ) yield async .sleep(0.005) self.http_client.post_json.assert_not_called() res_deferreds_2[0].addBoth(self.check_context, None) # complete the first request with logcontext.PreserveLoggingContext(): persp_deferred.callback(persp_resp) self.assertIs(LoggingContext.current_context(), context_11) with logcontext.PreserveLoggingContext(): yield res_deferreds[0] yield res_deferreds_2[0]
logger.info("Running") change_resource_limit(config.soft_file_limit) if config.gc_thresholds: gc.set_threshold(*config.gc_thresholds) reactor.run() def start(): ss.get_state_handler().start_caching() ss.get_datastore().start_profiling() ss.replicate() reactor.callWhenRunning(start) if config.worker_daemonize: daemon = Daemonize( app="synapse-federation-reader", pid=config.worker_pid_file, action=run, auto_close_fds=False, verbose=True, logger=logger, ) daemon.start() else: run() if __name__ == '__main__': with LoggingContext("main"): start(sys.argv[1:])
async def run() -> None: with LoggingContext("command"): await _base.start(ss) await args.func(ss, args)