Esempio n. 1
0
    def setUp(self) -> None:
        # Patch exit and kill so we can capture usage and prevent actual exits
        # and kills.

        self.exit = DummyExit()
        self.kill = DummyKill()

        self.patch(_runner, "exit", self.exit)
        self.patch(_runner, "kill", self.kill)

        # Patch getpid so we get a known result

        self.pid = 1337
        self.pidFileContent = f"{self.pid}\n".encode("utf-8")

        # Patch globalLogBeginner so that we aren't trying to install multiple
        # global log observers.

        self.stdout = StringIO()
        self.stderr = StringIO()
        self.stdio = DummyStandardIO(self.stdout, self.stderr)
        self.warnings = DummyWarningsModule()

        self.globalLogPublisher = LogPublisher()
        self.globalLogBeginner = LogBeginner(
            self.globalLogPublisher,
            self.stdio.stderr,
            self.stdio,
            self.warnings,
        )

        self.patch(_runner, "stderr", self.stderr)
        self.patch(_runner, "globalLogBeginner", self.globalLogBeginner)
Esempio n. 2
0
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
    """
    Set up Python stdlib logging.
    """
    if log_config is None:
        log_format = (
            "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
            " - %(message)s")

        logger = logging.getLogger("")
        logger.setLevel(logging.INFO)
        logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)

        formatter = logging.Formatter(log_format)

        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        handler.addFilter(LoggingContextFilter(request=""))
        logger.addHandler(handler)
    else:
        logging.config.dictConfig(log_config)

    # Route Twisted's native logging through to the standard library logging
    # system.
    observer = STDLibLogObserver()

    def _log(event):

        if "log_text" in event:
            if event["log_text"].startswith(
                    "DNSDatagramProtocol starting on "):
                return

            if event["log_text"].startswith("(UDP Port "):
                return

            if event["log_text"].startswith("Timing out client"):
                return

        return observer(event)

    logBeginner.beginLoggingTo([_log],
                               redirectStandardIO=not config.no_redirect_stdio)
    if not config.no_redirect_stdio:
        print("Redirected stdout/stderr to logs")

    return observer
Esempio n. 3
0
    def test_startLoggingOverridesWarning(self):
        """
        startLogging() overrides global C{warnings.showwarning} such that
        warnings go to Twisted log observers.
        """
        self._startLoggingCleanup()
        newPublisher = NewLogPublisher()

        class SysModule:
            stdout = object()
            stderr = object()

        tempLogPublisher = LogPublisher(
            newPublisher,
            newPublisher,
            logBeginner=LogBeginner(newPublisher, StringIO(), SysModule,
                                    warnings),
        )
        # Trial reports warnings in two ways.  First, it intercepts the global
        # 'showwarning' function *itself*, after starting logging (by way of
        # the '_collectWarnings' function which collects all warnings as a
        # around the test's 'run' method).  Second, it has a log observer which
        # immediately reports warnings when they're propagated into the log
        # system (which, in normal operation, happens only at the end of the
        # test case).  In order to avoid printing a spurious warning in this
        # test, we first replace the global log publisher's 'showwarning' in
        # the module with our own.
        self.patch(log, "theLogPublisher", tempLogPublisher)
        # And, one last thing, pretend we're starting from a fresh import, or
        # warnings.warn won't be patched at all.
        log._oldshowwarning = None
        # Global mutable state is bad, kids.  Stay in school.
        fakeFile = StringIO()
        # We didn't previously save log messages, so let's make sure we don't
        # save them any more.
        evt = {"pre-start": "event"}
        received = []

        def preStartObserver(x):
            if "pre-start" in x.keys():
                received.append(x)

        newPublisher(evt)
        newPublisher.addObserver(preStartObserver)
        log.startLogging(fakeFile, setStdout=False)
        self.addCleanup(tempLogPublisher._stopLogging)
        self.assertEqual(received, [])
        warnings.warn("hello!")
        output = fakeFile.getvalue()
        self.assertIn("UserWarning: hello!", output)
Esempio n. 4
0
    def setUp(self):

        self._subprocess_timeout = 15

        if platform.python_implementation() == 'PyPy':
            self._subprocess_timeout = 30

        self.stderr = NativeStringIO()
        self.stdout = NativeStringIO()

        self.publisher = LogPublisher()
        self.beginner = LogBeginner(LogPublisher(), self.stderr, sys, warnings)

        self.patch(_logging, "_stderr", self.stderr)
        self.patch(_logging, "_stdout", self.stdout)
        self.patch(_logging, "log_publisher", self.publisher)
        self.patch(_logging, "globalLogBeginner", self.beginner)
        self.patch(_logging, "_loggers", WeakKeyDictionary())
        self.patch(_logging, "_loglevel", "info")
Esempio n. 5
0
def _setup_stdlib_logging(config, log_config_path,
                          logBeginner: LogBeginner) -> None:
    """
    Set up Python standard library logging.
    """
    if log_config_path is None:
        log_format = (
            "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
            " - %(message)s")

        logger = logging.getLogger("")
        logger.setLevel(logging.INFO)
        logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)

        formatter = logging.Formatter(log_format)

        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    else:
        # Load the logging configuration.
        _load_logging_config(log_config_path)

    # We add a log record factory that runs all messages through the
    # LoggingContextFilter so that we get the context *at the time we log*
    # rather than when we write to a handler. This can be done in config using
    # filter options, but care must when using e.g. MemoryHandler to buffer
    # writes.

    log_context_filter = LoggingContextFilter(request="")
    log_metadata_filter = MetadataFilter({"server_name": config.server_name})
    old_factory = logging.getLogRecordFactory()

    def factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        log_context_filter.filter(record)
        log_metadata_filter.filter(record)
        return record

    logging.setLogRecordFactory(factory)

    # Route Twisted's native logging through to the standard library logging
    # system.
    observer = STDLibLogObserver()

    threadlocal = threading.local()

    def _log(event):
        if "log_text" in event:
            if event["log_text"].startswith(
                    "DNSDatagramProtocol starting on "):
                return

            if event["log_text"].startswith("(UDP Port "):
                return

            if event["log_text"].startswith("Timing out client"):
                return

        # this is a workaround to make sure we don't get stack overflows when the
        # logging system raises an error which is written to stderr which is redirected
        # to the logging system, etc.
        if getattr(threadlocal, "active", False):
            # write the text of the event, if any, to the *real* stderr (which may
            # be redirected to /dev/null, but there's not much we can do)
            try:
                event_text = eventAsText(event)
                print("logging during logging: %s" % event_text,
                      file=sys.__stderr__)
            except Exception:
                # gah.
                pass
            return

        try:
            threadlocal.active = True
            return observer(event)
        finally:
            threadlocal.active = False

    logBeginner.beginLoggingTo([_log],
                               redirectStandardIO=not config.no_redirect_stdio)
    if not config.no_redirect_stdio:
        print("Redirected stdout/stderr to logs")
Esempio n. 6
0
async def main(reactor, loops):
    """
    Benchmark how long it takes to send `loops` messages.
    """
    servers = []

    def protocol():
        p = LineCounter()
        servers.append(p)
        return p

    logger_factory = ServerFactory.forProtocol(protocol)
    logger_factory.wait_for = loops
    logger_factory.on_done = Deferred()
    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")

    # A fake homeserver config.
    class Config:
        server_name = "synmark-" + str(loops)
        no_redirect_stdio = True

    hs_config = Config()

    # To be able to sleep.
    clock = Clock(reactor)

    errors = StringIO()
    publisher = LogPublisher()
    mock_sys = Mock()
    beginner = LogBeginner(publisher,
                           errors,
                           mock_sys,
                           warnings,
                           initialBufferSize=loops)

    log_config = {
        "version": 1,
        "loggers": {
            "synapse": {
                "level": "DEBUG",
                "handlers": ["tersejson"]
            }
        },
        "formatters": {
            "tersejson": {
                "class": "synapse.logging.TerseJsonFormatter"
            }
        },
        "handlers": {
            "tersejson": {
                "class": "synapse.logging.RemoteHandler",
                "host": "127.0.0.1",
                "port": port.getHost().port,
                "maximum_buffer": 100,
                "_reactor": reactor,
            }
        },
    }

    logger = logging.getLogger("synapse.logging.test_terse_json")
    _setup_stdlib_logging(
        hs_config,
        log_config,
        logBeginner=beginner,
    )

    # Wait for it to connect...
    for handler in logging.getLogger("synapse").handlers:
        if isinstance(handler, RemoteHandler):
            break
    else:
        raise RuntimeError("Improperly configured: no RemoteHandler found.")

    await handler._service.whenConnected()

    start = perf_counter()

    # Send a bunch of useful messages
    for i in range(0, loops):
        logger.info("test message %s", i)

        if len(handler._buffer) == handler.maximum_buffer:
            while len(handler._buffer) > handler.maximum_buffer / 2:
                await clock.sleep(0.01)

    await logger_factory.on_done

    end = perf_counter() - start

    handler.close()
    port.stopListening()

    return end
Esempio n. 7
0
def _setup_stdlib_logging(config, log_config, logBeginner: LogBeginner):
    """
    Set up Python stdlib logging.
    """
    if log_config is None:
        log_format = (
            "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s"
            " - %(message)s")

        logger = logging.getLogger("")
        logger.setLevel(logging.INFO)
        logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO)

        formatter = logging.Formatter(log_format)

        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logger.addHandler(handler)
    else:
        logging.config.dictConfig(log_config)

    # We add a log record factory that runs all messages through the
    # LoggingContextFilter so that we get the context *at the time we log*
    # rather than when we write to a handler. This can be done in config using
    # filter options, but care must when using e.g. MemoryHandler to buffer
    # writes.

    log_filter = LoggingContextFilter(request="")
    old_factory = logging.getLogRecordFactory()

    def factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        log_filter.filter(record)
        return record

    logging.setLogRecordFactory(factory)

    # Route Twisted's native logging through to the standard library logging
    # system.
    observer = STDLibLogObserver()

    def _log(event):

        if "log_text" in event:
            if event["log_text"].startswith(
                    "DNSDatagramProtocol starting on "):
                return

            if event["log_text"].startswith("(UDP Port "):
                return

            if event["log_text"].startswith("Timing out client"):
                return

        return observer(event)

    logBeginner.beginLoggingTo([_log],
                               redirectStandardIO=not config.no_redirect_stdio)
    if not config.no_redirect_stdio:
        print("Redirected stdout/stderr to logs")

    return observer
Esempio n. 8
0
    def test_startLogging(self) -> None:
        """
        L{Runner.startLogging} sets up a filtering observer with a log level
        predicate set to the given log level that contains a file observer of
        the given type which writes to the given file.
        """
        logFile = StringIO()

        # Patch the log beginner so that we don't try to start the already
        # running (started by trial) logging system.

        class LogBeginner:
            observers: List[ILogObserver] = []

            def beginLoggingTo(self,
                               observers: Iterable[ILogObserver]) -> None:
                LogBeginner.observers = list(observers)

        self.patch(_runner, "globalLogBeginner", LogBeginner())

        # Patch FilteringLogObserver so we can capture its arguments

        class MockFilteringLogObserver(FilteringLogObserver):
            observer: Optional[ILogObserver] = None
            predicates: List[LogLevelFilterPredicate] = []

            def __init__(
                self,
                observer: ILogObserver,
                predicates: Iterable[LogLevelFilterPredicate],
                negativeObserver: ILogObserver = cast(ILogObserver,
                                                      lambda event: None),
            ):
                MockFilteringLogObserver.observer = observer
                MockFilteringLogObserver.predicates = list(predicates)
                FilteringLogObserver.__init__(self, observer, predicates,
                                              negativeObserver)

        self.patch(_runner, "FilteringLogObserver", MockFilteringLogObserver)

        # Patch FileLogObserver so we can capture its arguments

        class MockFileLogObserver(FileLogObserver):
            outFile: Optional[TextIO] = None

            def __init__(self, outFile: TextIO) -> None:
                MockFileLogObserver.outFile = outFile
                FileLogObserver.__init__(self, outFile, str)

        # Start logging
        runner = Runner(
            reactor=MemoryReactor(),
            defaultLogLevel=LogLevel.critical,
            logFile=logFile,
            fileLogObserverFactory=MockFileLogObserver,
        )
        runner.startLogging()

        # Check for a filtering observer
        self.assertEqual(len(LogBeginner.observers), 1)
        self.assertIsInstance(LogBeginner.observers[0], FilteringLogObserver)

        # Check log level predicate with the correct default log level
        self.assertEqual(len(MockFilteringLogObserver.predicates), 1)
        self.assertIsInstance(MockFilteringLogObserver.predicates[0],
                              LogLevelFilterPredicate)
        self.assertIdentical(
            MockFilteringLogObserver.predicates[0].defaultLogLevel,
            LogLevel.critical)

        # Check for a file observer attached to the filtering observer
        observer = cast(MockFileLogObserver, MockFilteringLogObserver.observer)
        self.assertIsInstance(observer, MockFileLogObserver)

        # Check for the file we gave it
        self.assertIdentical(observer.outFile, logFile)
Esempio n. 9
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = StoppableLogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Esempio n. 10
0
    def test_startLogging(self):
        """
        L{Runner.startLogging} sets up a filtering observer with a log level
        predicate set to the given log level that contains a file observer of
        the given type which writes to the given file.
        """
        logFile = BytesIO()

        # Patch the log beginner so that we don't try to start the already
        # running (started by trial) logging system.

        class LogBeginner(object):
            def beginLoggingTo(self, observers):
                LogBeginner.observers = observers

        self.patch(_runner, "globalLogBeginner", LogBeginner())

        # Patch FilteringLogObserver so we can capture its arguments

        class MockFilteringLogObserver(FilteringLogObserver):
            def __init__(
                self, observer, predicates,
                negativeObserver=lambda event: None
            ):
                MockFilteringLogObserver.observer = observer
                MockFilteringLogObserver.predicates = predicates
                FilteringLogObserver.__init__(
                    self, observer, predicates, negativeObserver
                )

        self.patch(_runner, "FilteringLogObserver", MockFilteringLogObserver)

        # Patch FileLogObserver so we can capture its arguments

        class MockFileLogObserver(FileLogObserver):
            def __init__(self, outFile):
                MockFileLogObserver.outFile = outFile
                FileLogObserver.__init__(self, outFile, str)

        # Start logging
        runner = Runner(
            defaultLogLevel=LogLevel.critical,
            logFile=logFile,
            fileLogObserverFactory=MockFileLogObserver,
        )
        runner.startLogging()

        # Check for a filtering observer
        self.assertEqual(len(LogBeginner.observers), 1)
        self.assertIsInstance(LogBeginner.observers[0], FilteringLogObserver)

        # Check log level predicate with the correct default log level
        self.assertEqual(len(MockFilteringLogObserver.predicates), 1)
        self.assertIsInstance(
            MockFilteringLogObserver.predicates[0],
            LogLevelFilterPredicate
        )
        self.assertIdentical(
            MockFilteringLogObserver.predicates[0].defaultLogLevel,
            LogLevel.critical
        )

        # Check for a file observer attached to the filtering observer
        self.assertIsInstance(
            MockFilteringLogObserver.observer, MockFileLogObserver
        )

        # Check for the file we gave it
        self.assertIdentical(
            MockFilteringLogObserver.observer.outFile, logFile
        )
Esempio n. 11
0
async def main(reactor, loops):
    """
    Benchmark how long it takes to send `loops` messages.
    """
    servers = []

    def protocol():
        p = LineCounter()
        servers.append(p)
        return p

    logger_factory = ServerFactory.forProtocol(protocol)
    logger_factory.wait_for = loops
    logger_factory.on_done = Deferred()
    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")

    hs, wait, cleanup = await make_homeserver(reactor)

    errors = StringIO()
    publisher = LogPublisher()
    mock_sys = Mock()
    beginner = LogBeginner(publisher,
                           errors,
                           mock_sys,
                           warnings,
                           initialBufferSize=loops)

    log_config = {
        "loggers": {
            "synapse": {
                "level": "DEBUG"
            }
        },
        "drains": {
            "tersejson": {
                "type": "network_json_terse",
                "host": "127.0.0.1",
                "port": port.getHost().port,
                "maximum_buffer": 100,
            }
        },
    }

    logger = Logger(namespace="synapse.logging.test_terse_json",
                    observer=publisher)
    logging_system = setup_structured_logging(hs,
                                              hs.config,
                                              log_config,
                                              logBeginner=beginner,
                                              redirect_stdlib_logging=False)

    # Wait for it to connect...
    await logging_system._observers[0]._service.whenConnected()

    start = perf_counter()

    # Send a bunch of useful messages
    for i in range(0, loops):
        logger.info("test message %s" % (i, ))

        if (len(logging_system._observers[0]._buffer) ==
                logging_system._observers[0].maximum_buffer):
            while (len(logging_system._observers[0]._buffer) >
                   logging_system._observers[0].maximum_buffer / 2):
                await wait(0.01)

    await logger_factory.on_done

    end = perf_counter() - start

    logging_system.stop()
    port.stopListening()
    cleanup()

    return end