def test_log_backpressure_info(self): """ When backpressure is hit, DEBUG and INFO logs will be shed. """ log_config = { "loggers": { "synapse": { "level": "DEBUG" } }, "drains": { "tersejson": { "type": "network_json_terse", "host": "127.0.0.1", "port": 8000, "maximum_buffer": 10, } }, } # Begin the logger with our config beginner = FakeBeginner() setup_structured_logging( self.hs, self.hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False, ) logger = Logger(namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]) # Send some debug messages for i in range(0, 3): logger.debug("debug %s" % (i, )) # Send a bunch of useful messages for i in range(0, 10): logger.warn("test warn %s" % (i, )) # Send a bunch of info messages for i in range(0, 3): logger.info("test message %s" % (i, )) # The last debug message pushes it past the maximum buffer logger.debug("too much debug") # Allow the reconnection client, server = connect_client(self.reactor, 0) self.pump() # The 10 warnings made it through, the debugs and infos were elided logs = list(map(json.loads, server.data.decode("utf8").splitlines())) self.assertEqual(len(logs), 10) self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10})
def test_log_output(self): """ The Terse JSON outputter delivers simplified structured logs over TCP. """ log_config = { "drains": { "tersejson": { "type": "network_json_terse", "host": "127.0.0.1", "port": 8000, } } } # Begin the logger with our config beginner = FakeBeginner() setup_structured_logging(self.hs, self.hs.config, log_config, logBeginner=beginner) logger = Logger(namespace="tests.logging.test_terse_json", observer=beginner.observers[0]) logger.info("Hello there, {name}!", name="wally") # Trigger the connection self.pump() _, server = connect_client(self.reactor, 0) # Trigger data being sent self.pump() # One log message, with a single trailing newline logs = server.data.decode("utf8").splitlines() self.assertEqual(len(logs), 1) self.assertEqual(server.data.count(b"\n"), 1) log = json.loads(logs[0]) # The terse logger should give us these keys. expected_log_keys = [ "log", "time", "level", "log_namespace", "request", "scope", "server_name", "name", ] self.assertEqual(set(log.keys()), set(expected_log_keys)) # It contains the data we expect. self.assertEqual(log["name"], "wally")
def test_log_backpressure_cut_middle(self): """ When backpressure is hit, and no more DEBUG and INFOs cannot be culled, it will cut the middle messages out. """ log_config = { "loggers": { "synapse": { "level": "DEBUG" } }, "drains": { "tersejson": { "type": "network_json_terse", "host": "127.0.0.1", "port": 8000, "maximum_buffer": 10, } }, } # Begin the logger with our config beginner = FakeBeginner() setup_structured_logging( self.hs, self.hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False, ) logger = Logger(namespace="synapse.logging.test_terse_json", observer=beginner.observers[0]) # Send a bunch of useful messages for i in range(0, 20): logger.warn("test warn", num=i) # Allow the reconnection client, server = connect_client(self.reactor, 0) self.pump() # The first five and last five warnings made it through, the debugs and # infos were elided logs = list(map(json.loads, server.data.decode("utf8").splitlines())) self.assertEqual(len(logs), 10) self.assertEqual(Counter([x["level"] for x in logs]), {"WARN": 10}) self.assertEqual([0, 1, 2, 3, 4, 15, 16, 17, 18, 19], [x["num"] for x in logs])
def setup_logging( hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner) -> ILogObserver: """ Set up the logging subsystem. Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use the 'worker_log_config' option instead of 'log_config'. logBeginner: The Twisted logBeginner to use. Returns: The "root" Twisted Logger observer, suitable for sending logs to from a Logger instance. """ log_config = config.worker_log_config if use_worker_options else config.log_config def read_config(*args, callback=None): if log_config is None: return None with open(log_config, "rb") as f: log_config_body = yaml.safe_load(f.read()) if callback: callback(log_config=log_config_body) logging.info("Reloaded log config from %s due to SIGHUP", log_config) return log_config_body log_config_body = read_config() if log_config_body and log_config_body.get("structured") is True: logger = setup_structured_logging(hs, config, log_config_body, logBeginner=logBeginner) appbase.register_sighup(read_config, callback=reload_structured_logging) else: logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner) appbase.register_sighup(read_config, callback=_reload_stdlib_logging) # make sure that the first thing we log is a thing we can grep backwards # for logging.warning("***** STARTING SERVER *****") logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) return logger
def test_collects_logcontext(self): """ Test that log outputs have the attached logging context. """ log_config = {"drains": {}} # Begin the logger with our config beginner = FakeBeginner() publisher = setup_structured_logging(self.hs, self.hs.config, log_config, logBeginner=beginner) logs = [] publisher.addObserver(logs.append) # Make a logger and send an event logger = Logger(namespace="tests.logging.test_structured", observer=beginner.observers[0]) with LoggingContext("testcontext", request="somereq"): logger.info("Hello there, {name}!", name="steve") self.assertEqual(len(logs), 1) self.assertEqual(logs[0]["request"], "somereq")
def test_output_to_json_round_trip(self): """ Synapse logs can be outputted to JSON and then read back again. """ temp_dir = self.mktemp() os.mkdir(temp_dir) self.addCleanup(shutil.rmtree, temp_dir) json_log_file = os.path.abspath(os.path.join(temp_dir, "out.json")) log_config = { "drains": { "jsonfile": { "type": "file_json", "location": json_log_file } } } # Begin the logger with our config beginner = FakeBeginner() setup_structured_logging(self.hs, self.hs.config, log_config, logBeginner=beginner) # Make a logger and send an event logger = Logger(namespace="tests.logging.test_structured", observer=beginner.observers[0]) logger.info("Hello there, {name}!", name="wally") # Read the log file and check it has the event we sent with open(json_log_file, "r") as f: logged_events = list(eventsFromJSONLogFile(f)) self.assertEqual(len(logged_events), 1) # The event pulled from the file should render fine self.assertEqual( eventAsText(logged_events[0], includeTimestamp=False), "[tests.logging.test_structured#info] Hello there, wally!", )
def test_output_to_text(self): """ Synapse logs can be outputted to text. """ temp_dir = self.mktemp() os.mkdir(temp_dir) self.addCleanup(shutil.rmtree, temp_dir) log_file = os.path.abspath(os.path.join(temp_dir, "out.log")) log_config = { "drains": { "file": { "type": "file", "location": log_file } } } # Begin the logger with our config beginner = FakeBeginner() setup_structured_logging(self.hs, self.hs.config, log_config, logBeginner=beginner) # Make a logger and send an event logger = Logger(namespace="tests.logging.test_structured", observer=beginner.observers[0]) logger.info("Hello there, {name}!", name="wally") # Read the log file and check it has the event we sent with open(log_file, "r") as f: logged_events = f.read().strip().split("\n") self.assertEqual(len(logged_events), 1) # The event pulled from the file should render fine self.assertTrue(logged_events[0].endswith( " - tests.logging.test_structured - INFO - None - Hello there, wally!" ))
def _load_logging_config(log_config_path: str) -> None: """ Configure logging from a log config path. """ with open(log_config_path, "rb") as f: log_config = yaml.safe_load(f.read()) if not log_config: logging.warning("Loaded a blank logging config?") # If the old structured logging configuration is being used, convert it to # the new style configuration. if "structured" in log_config and log_config.get("structured"): log_config = setup_structured_logging(log_config) logging.config.dictConfig(log_config)
async def main(reactor, loops): """ Benchmark how long it takes to send `loops` messages. """ servers = [] def protocol(): p = LineCounter() servers.append(p) return p logger_factory = ServerFactory.forProtocol(protocol) logger_factory.wait_for = loops logger_factory.on_done = Deferred() port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") hs, wait, cleanup = await make_homeserver(reactor) errors = StringIO() publisher = LogPublisher() mock_sys = Mock() beginner = LogBeginner(publisher, errors, mock_sys, warnings, initialBufferSize=loops) log_config = { "loggers": { "synapse": { "level": "DEBUG" } }, "drains": { "tersejson": { "type": "network_json_terse", "host": "127.0.0.1", "port": port.getHost().port, "maximum_buffer": 100, } }, } logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher) logging_system = setup_structured_logging(hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False) # Wait for it to connect... await logging_system._observers[0]._service.whenConnected() start = perf_counter() # Send a bunch of useful messages for i in range(0, loops): logger.info("test message %s" % (i, )) if (len(logging_system._observers[0]._buffer) == logging_system._observers[0].maximum_buffer): while (len(logging_system._observers[0]._buffer) > logging_system._observers[0].maximum_buffer / 2): await wait(0.01) await logger_factory.on_done end = perf_counter() - start logging_system.stop() port.stopListening() cleanup() return end