def setUp(self) -> None: # Patch exit and kill so we can capture usage and prevent actual exits # and kills. self.exit = DummyExit() self.kill = DummyKill() self.patch(_runner, "exit", self.exit) self.patch(_runner, "kill", self.kill) # Patch getpid so we get a known result self.pid = 1337 self.pidFileContent = f"{self.pid}\n".encode("utf-8") # Patch globalLogBeginner so that we aren't trying to install multiple # global log observers. self.stdout = StringIO() self.stderr = StringIO() self.stdio = DummyStandardIO(self.stdout, self.stderr) self.warnings = DummyWarningsModule() self.globalLogPublisher = LogPublisher() self.globalLogBeginner = LogBeginner( self.globalLogPublisher, self.stdio.stderr, self.stdio, self.warnings, ) self.patch(_runner, "stderr", self.stderr) self.patch(_runner, "globalLogBeginner", self.globalLogBeginner)
def test_startLoggingOverridesWarning(self): """ startLogging() overrides global C{warnings.showwarning} such that warnings go to Twisted log observers. """ self._startLoggingCleanup() newPublisher = NewLogPublisher() class SysModule: stdout = object() stderr = object() tempLogPublisher = LogPublisher( newPublisher, newPublisher, logBeginner=LogBeginner(newPublisher, StringIO(), SysModule, warnings), ) # Trial reports warnings in two ways. First, it intercepts the global # 'showwarning' function *itself*, after starting logging (by way of # the '_collectWarnings' function which collects all warnings as a # around the test's 'run' method). Second, it has a log observer which # immediately reports warnings when they're propagated into the log # system (which, in normal operation, happens only at the end of the # test case). In order to avoid printing a spurious warning in this # test, we first replace the global log publisher's 'showwarning' in # the module with our own. self.patch(log, "theLogPublisher", tempLogPublisher) # And, one last thing, pretend we're starting from a fresh import, or # warnings.warn won't be patched at all. log._oldshowwarning = None # Global mutable state is bad, kids. Stay in school. fakeFile = StringIO() # We didn't previously save log messages, so let's make sure we don't # save them any more. evt = {"pre-start": "event"} received = [] def preStartObserver(x): if "pre-start" in x.keys(): received.append(x) newPublisher(evt) newPublisher.addObserver(preStartObserver) log.startLogging(fakeFile, setStdout=False) self.addCleanup(tempLogPublisher._stopLogging) self.assertEqual(received, []) warnings.warn("hello!") output = fakeFile.getvalue() self.assertIn("UserWarning: hello!", output)
def setUp(self): self._subprocess_timeout = 15 if platform.python_implementation() == 'PyPy': self._subprocess_timeout = 30 self.stderr = NativeStringIO() self.stdout = NativeStringIO() self.publisher = LogPublisher() self.beginner = LogBeginner(LogPublisher(), self.stderr, sys, warnings) self.patch(_logging, "_stderr", self.stderr) self.patch(_logging, "_stdout", self.stdout) self.patch(_logging, "log_publisher", self.publisher) self.patch(_logging, "globalLogBeginner", self.beginner) self.patch(_logging, "_loggers", WeakKeyDictionary()) self.patch(_logging, "_loglevel", "info")
async def main(reactor, loops): """ Benchmark how long it takes to send `loops` messages. """ servers = [] def protocol(): p = LineCounter() servers.append(p) return p logger_factory = ServerFactory.forProtocol(protocol) logger_factory.wait_for = loops logger_factory.on_done = Deferred() port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") # A fake homeserver config. class Config: server_name = "synmark-" + str(loops) no_redirect_stdio = True hs_config = Config() # To be able to sleep. clock = Clock(reactor) errors = StringIO() publisher = LogPublisher() mock_sys = Mock() beginner = LogBeginner(publisher, errors, mock_sys, warnings, initialBufferSize=loops) log_config = { "version": 1, "loggers": { "synapse": { "level": "DEBUG", "handlers": ["tersejson"] } }, "formatters": { "tersejson": { "class": "synapse.logging.TerseJsonFormatter" } }, "handlers": { "tersejson": { "class": "synapse.logging.RemoteHandler", "host": "127.0.0.1", "port": port.getHost().port, "maximum_buffer": 100, "_reactor": reactor, } }, } logger = logging.getLogger("synapse.logging.test_terse_json") _setup_stdlib_logging( hs_config, log_config, logBeginner=beginner, ) # Wait for it to connect... for handler in logging.getLogger("synapse").handlers: if isinstance(handler, RemoteHandler): break else: raise RuntimeError("Improperly configured: no RemoteHandler found.") await handler._service.whenConnected() start = perf_counter() # Send a bunch of useful messages for i in range(0, loops): logger.info("test message %s", i) if len(handler._buffer) == handler.maximum_buffer: while len(handler._buffer) > handler.maximum_buffer / 2: await clock.sleep(0.01) await logger_factory.on_done end = perf_counter() - start handler.close() port.stopListening() return end
def test_startLogging(self) -> None: """ L{Runner.startLogging} sets up a filtering observer with a log level predicate set to the given log level that contains a file observer of the given type which writes to the given file. """ logFile = StringIO() # Patch the log beginner so that we don't try to start the already # running (started by trial) logging system. class LogBeginner: observers: List[ILogObserver] = [] def beginLoggingTo(self, observers: Iterable[ILogObserver]) -> None: LogBeginner.observers = list(observers) self.patch(_runner, "globalLogBeginner", LogBeginner()) # Patch FilteringLogObserver so we can capture its arguments class MockFilteringLogObserver(FilteringLogObserver): observer: Optional[ILogObserver] = None predicates: List[LogLevelFilterPredicate] = [] def __init__( self, observer: ILogObserver, predicates: Iterable[LogLevelFilterPredicate], negativeObserver: ILogObserver = cast(ILogObserver, lambda event: None), ): MockFilteringLogObserver.observer = observer MockFilteringLogObserver.predicates = list(predicates) FilteringLogObserver.__init__(self, observer, predicates, negativeObserver) self.patch(_runner, "FilteringLogObserver", MockFilteringLogObserver) # Patch FileLogObserver so we can capture its arguments class MockFileLogObserver(FileLogObserver): outFile: Optional[TextIO] = None def __init__(self, outFile: TextIO) -> None: MockFileLogObserver.outFile = outFile FileLogObserver.__init__(self, outFile, str) # Start logging runner = Runner( reactor=MemoryReactor(), defaultLogLevel=LogLevel.critical, logFile=logFile, fileLogObserverFactory=MockFileLogObserver, ) runner.startLogging() # Check for a filtering observer self.assertEqual(len(LogBeginner.observers), 1) self.assertIsInstance(LogBeginner.observers[0], FilteringLogObserver) # Check log level predicate with the correct default log level self.assertEqual(len(MockFilteringLogObserver.predicates), 1) self.assertIsInstance(MockFilteringLogObserver.predicates[0], LogLevelFilterPredicate) self.assertIdentical( MockFilteringLogObserver.predicates[0].defaultLogLevel, LogLevel.critical) # Check for a file observer attached to the filtering observer observer = cast(MockFileLogObserver, MockFilteringLogObserver.observer) self.assertIsInstance(observer, MockFileLogObserver) # Check for the file we gave it self.assertIdentical(observer.outFile, logFile)
def test_startLogging(self): """ L{Runner.startLogging} sets up a filtering observer with a log level predicate set to the given log level that contains a file observer of the given type which writes to the given file. """ logFile = BytesIO() # Patch the log beginner so that we don't try to start the already # running (started by trial) logging system. class LogBeginner(object): def beginLoggingTo(self, observers): LogBeginner.observers = observers self.patch(_runner, "globalLogBeginner", LogBeginner()) # Patch FilteringLogObserver so we can capture its arguments class MockFilteringLogObserver(FilteringLogObserver): def __init__( self, observer, predicates, negativeObserver=lambda event: None ): MockFilteringLogObserver.observer = observer MockFilteringLogObserver.predicates = predicates FilteringLogObserver.__init__( self, observer, predicates, negativeObserver ) self.patch(_runner, "FilteringLogObserver", MockFilteringLogObserver) # Patch FileLogObserver so we can capture its arguments class MockFileLogObserver(FileLogObserver): def __init__(self, outFile): MockFileLogObserver.outFile = outFile FileLogObserver.__init__(self, outFile, str) # Start logging runner = Runner( defaultLogLevel=LogLevel.critical, logFile=logFile, fileLogObserverFactory=MockFileLogObserver, ) runner.startLogging() # Check for a filtering observer self.assertEqual(len(LogBeginner.observers), 1) self.assertIsInstance(LogBeginner.observers[0], FilteringLogObserver) # Check log level predicate with the correct default log level self.assertEqual(len(MockFilteringLogObserver.predicates), 1) self.assertIsInstance( MockFilteringLogObserver.predicates[0], LogLevelFilterPredicate ) self.assertIdentical( MockFilteringLogObserver.predicates[0].defaultLogLevel, LogLevel.critical ) # Check for a file observer attached to the filtering observer self.assertIsInstance( MockFilteringLogObserver.observer, MockFileLogObserver ) # Check for the file we gave it self.assertIdentical( MockFilteringLogObserver.observer.outFile, logFile )
async def main(reactor, loops): """ Benchmark how long it takes to send `loops` messages. """ servers = [] def protocol(): p = LineCounter() servers.append(p) return p logger_factory = ServerFactory.forProtocol(protocol) logger_factory.wait_for = loops logger_factory.on_done = Deferred() port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") hs, wait, cleanup = await make_homeserver(reactor) errors = StringIO() publisher = LogPublisher() mock_sys = Mock() beginner = LogBeginner(publisher, errors, mock_sys, warnings, initialBufferSize=loops) log_config = { "loggers": { "synapse": { "level": "DEBUG" } }, "drains": { "tersejson": { "type": "network_json_terse", "host": "127.0.0.1", "port": port.getHost().port, "maximum_buffer": 100, } }, } logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher) logging_system = setup_structured_logging(hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False) # Wait for it to connect... await logging_system._observers[0]._service.whenConnected() start = perf_counter() # Send a bunch of useful messages for i in range(0, loops): logger.info("test message %s" % (i, )) if (len(logging_system._observers[0]._buffer) == logging_system._observers[0].maximum_buffer): while (len(logging_system._observers[0]._buffer) > logging_system._observers[0].maximum_buffer / 2): await wait(0.01) await logger_factory.on_done end = perf_counter() - start logging_system.stop() port.stopListening() cleanup() return end