Beispiel #1
0
 def __init__(
     self,
     observerPublisher=None,
     publishPublisher=None,
     logBeginner=None,
     warningsModule=warnings,
 ):
     if publishPublisher is None:
         publishPublisher = NewPublisher()
         if observerPublisher is None:
             observerPublisher = publishPublisher
     if observerPublisher is None:
         observerPublisher = NewPublisher()
     self._observerPublisher = observerPublisher
     self._publishPublisher = publishPublisher
     self._legacyObservers = []
     if logBeginner is None:
         # This default behavior is really only used for testing.
         beginnerPublisher = NewPublisher()
         beginnerPublisher.addObserver(observerPublisher)
         logBeginner = LogBeginner(beginnerPublisher,
                                   cast(BinaryIO, NullFile()), sys,
                                   warnings)
     self._logBeginner = logBeginner
     self._warningsModule = warningsModule
     self._oldshowwarning = warningsModule.showwarning
     self.showwarning = self._logBeginner.showwarning
Beispiel #2
0
    def test_startLoggingOverridesWarning(self):
        """
        startLogging() overrides global C{warnings.showwarning} such that
        warnings go to Twisted log observers.
        """
        self._startLoggingCleanup()
        newPublisher = NewLogPublisher()

        class SysModule:
            stdout = object()
            stderr = object()

        tempLogPublisher = LogPublisher(
            newPublisher,
            newPublisher,
            logBeginner=LogBeginner(newPublisher, StringIO(), SysModule,
                                    warnings),
        )
        # Trial reports warnings in two ways.  First, it intercepts the global
        # 'showwarning' function *itself*, after starting logging (by way of
        # the '_collectWarnings' function which collects all warnings as a
        # around the test's 'run' method).  Second, it has a log observer which
        # immediately reports warnings when they're propagated into the log
        # system (which, in normal operation, happens only at the end of the
        # test case).  In order to avoid printing a spurious warning in this
        # test, we first replace the global log publisher's 'showwarning' in
        # the module with our own.
        self.patch(log, "theLogPublisher", tempLogPublisher)
        # And, one last thing, pretend we're starting from a fresh import, or
        # warnings.warn won't be patched at all.
        log._oldshowwarning = None
        # Global mutable state is bad, kids.  Stay in school.
        fakeFile = StringIO()
        # We didn't previously save log messages, so let's make sure we don't
        # save them any more.
        evt = {"pre-start": "event"}
        received = []

        def preStartObserver(x):
            if "pre-start" in x.keys():
                received.append(x)

        newPublisher(evt)
        newPublisher.addObserver(preStartObserver)
        log.startLogging(fakeFile, setStdout=False)
        self.addCleanup(tempLogPublisher._stopLogging)
        self.assertEqual(received, [])
        warnings.warn("hello!")
        output = fakeFile.getvalue()
        self.assertIn("UserWarning: hello!", output)
Beispiel #3
0
    def test_startLoggingOverridesWarning(self):
        """
        startLogging() overrides global C{warnings.showwarning} such that
        warnings go to Twisted log observers.
        """
        self._startLoggingCleanup()
        newPublisher = NewLogPublisher()

        class SysModule(object):
            stdout = object()
            stderr = object()

        tempLogPublisher = LogPublisher(
            newPublisher, newPublisher,
            logBeginner=LogBeginner(newPublisher, StringIO(), SysModule,
                                    warnings)
        )
        # Trial reports warnings in two ways.  First, it intercepts the global
        # 'showwarning' function *itself*, after starting logging (by way of
        # the '_collectWarnings' function which collects all warnings as a
        # around the test's 'run' method).  Second, it has a log observer which
        # immediately reports warnings when they're propagated into the log
        # system (which, in normal operation, happens only at the end of the
        # test case).  In order to avoid printing a spurious warning in this
        # test, we first replace the global log publisher's 'showwarning' in
        # the module with our own.
        self.patch(log, "theLogPublisher", tempLogPublisher)
        # And, one last thing, pretend we're starting from a fresh import, or
        # warnings.warn won't be patched at all.
        log._oldshowwarning = None
        # Global mutable state is bad, kids.  Stay in school.
        fakeFile = StringIO()
        # We didn't previously save log messages, so let's make sure we don't
        # save them any more.
        evt = {"pre-start": "event"}
        received = []

        def preStartObserver(x):
            if 'pre-start' in x.keys():
                received.append(x)

        newPublisher(evt)
        newPublisher.addObserver(preStartObserver)
        log.startLogging(fakeFile, setStdout=False)
        self.addCleanup(tempLogPublisher._stopLogging)
        self.assertEqual(received, [])
        warnings.warn("hello!")
        output = fakeFile.getvalue()
        self.assertIn("UserWarning: hello!", output)
Beispiel #4
0
    def setUp(self) -> None:
        # Patch exit and kill so we can capture usage and prevent actual exits
        # and kills.

        self.exit = DummyExit()
        self.kill = DummyKill()

        self.patch(_runner, "exit", self.exit)
        self.patch(_runner, "kill", self.kill)

        # Patch getpid so we get a known result

        self.pid = 1337
        self.pidFileContent = f"{self.pid}\n".encode("utf-8")

        # Patch globalLogBeginner so that we aren't trying to install multiple
        # global log observers.

        self.stdout = StringIO()
        self.stderr = StringIO()
        self.stdio = DummyStandardIO(self.stdout, self.stderr)
        self.warnings = DummyWarningsModule()

        self.globalLogPublisher = LogPublisher()
        self.globalLogBeginner = LogBeginner(
            self.globalLogPublisher,
            self.stdio.stderr,
            self.stdio,
            self.warnings,
        )

        self.patch(_runner, "stderr", self.stderr)
        self.patch(_runner, "globalLogBeginner", self.globalLogBeginner)
Beispiel #5
0
    def setUp(self):

        self._subprocess_timeout = 15

        if platform.python_implementation() == 'PyPy':
            self._subprocess_timeout = 30

        self.stderr = NativeStringIO()
        self.stdout = NativeStringIO()

        self.publisher = LogPublisher()
        self.beginner = LogBeginner(LogPublisher(), self.stderr, sys, warnings)

        self.patch(_logging, "_stderr", self.stderr)
        self.patch(_logging, "_stdout", self.stdout)
        self.patch(_logging, "log_publisher", self.publisher)
        self.patch(_logging, "globalLogBeginner", self.beginner)
        self.patch(_logging, "_loggers", WeakKeyDictionary())
        self.patch(_logging, "_loglevel", "info")
Beispiel #6
0
 def _get_stdlib_logger(self, name):
     fout = StringIO()
     handler = TwistedLoggerLogHandler()
     handler.publisher = LogPublisher(
         FileLogObserver(fout, formatForSystemd))
     log = logging.getLogger(name)
     log.setLevel(logging.DEBUG)
     log.propagate = False
     log.addHandler(handler)
     return log, fout
 def __init__(self, observerPublisher=None, publishPublisher=None,
              logBeginner=None, warningsModule=warnings):
     if publishPublisher is None:
         publishPublisher = NewPublisher()
         if observerPublisher is None:
             observerPublisher = publishPublisher
     if observerPublisher is None:
         observerPublisher = NewPublisher()
     self._observerPublisher = observerPublisher
     self._publishPublisher = publishPublisher
     self._legacyObservers = []
     if logBeginner is None:
         # This default behavior is really only used for testing.
         beginnerPublisher = NewPublisher()
         beginnerPublisher.addObserver(observerPublisher)
         logBeginner = LogBeginner(beginnerPublisher, NullFile(), sys,
                                   warnings)
     self._logBeginner = logBeginner
     self._warningsModule = warningsModule
     self._oldshowwarning = warningsModule.showwarning
     self.showwarning = self._logBeginner.showwarning
    def test_utf8(self):
        """
        L{Logger} handles utf8 log strings and format args.
        """
        observer = LogPublisher()

        observed = []
        observer.addObserver(observed.append)

        sio = StringIO()
        observer.addObserver(textFileLogObserver(sio))

        logger = Logger(observer=observer)

        index = 0
        logger.info("t\xc3\xa9st")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"{msg}")
        self.assertEqual(observed[index]["msg"], u"t\xe9st")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "t\xc3\xa9st")

        index += 1
        logger.info("{str}", str="t\xc3\xa9st")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"{str}")
        self.assertEqual(observed[index]["str"], u"t\xe9st")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "t\xc3\xa9st")

        index += 1
        logger.info("T\xc3\xa9st {str}", str="t\xc3\xa9st")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"T\xe9st {str}")
        self.assertEqual(observed[index]["str"], u"t\xe9st")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "T\xc3\xa9st t\xc3\xa9st")
    def test_old_style(self):
        """
        L{Logger} handles old style log strings.
        """
        observer = LogPublisher()

        observed = []
        observer.addObserver(observed.append)

        sio = StringIO()
        observer.addObserver(textFileLogObserver(sio))

        logger = Logger(observer=observer)

        index = 0
        logger.info("test")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"{msg}")
        self.assertEqual(observed[index]["msg"], u"test")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test")

        index += 1
        logger.info("test {}")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"{msg}")
        self.assertEqual(observed[index]["msg"], u"test {}")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test {}")

        index += 1
        logger.info("test {foo}")
        self.assertEqual(observed[index]["log_level"], LogLevel.info)
        self.assertEqual(observed[index]["log_format"], u"{msg}")
        self.assertEqual(observed[index]["msg"], u"test {foo}")
        self.assertEqual(sio.getvalue().splitlines()[index].split("#info] ")[1], "test {foo}")
Beispiel #10
0
    def makeFilteredFileLogObserver(cls, stream, withTime=True):
        """
        For a child process that has its stdout captured by the master process to be logged by the master,
        we strip out the time from the log entry since the master process will always add one. Setting
        C{withTime} to L{False} will ensure no time is generated.
        """
        assert (cls.filterPublisher is None
                and cls.filterObserver is None), "Only call this once"

        timeFormat = formatTime if withTime else lambda _: u""
        cls.filterObserver = FileLogObserver(
            stream,
            lambda event: formatEventAsClassicLogText(event,
                                                      formatTime=timeFormat))
        cls.filterPublisher = LogPublisher(cls.filterObserver)
        return cls.filterPublisher
Beispiel #11
0
    def test_log_legacy(self):
        fout = StringIO()
        p = LegacyLogPublisher(publishPublisher=LogPublisher(
            FileLogObserver(fout, formatForSystemd)))

        p.msg("msg")
        p.msg("msg", system="system")
        p.msg("m\ns\ng", logLevel=logging.DEBUG)

        self.assertEqual(
            ("<6>[-] msg\n"
             "<6>[system] msg\n"
             "<7>[-] m\n"
             "<7>  s\n"
             "<7>  g\n"),
            fout.getvalue(),
        )
Beispiel #12
0
async def main(reactor, loops):
    """
    Benchmark how long it takes to send `loops` messages.
    """
    servers = []

    def protocol():
        p = LineCounter()
        servers.append(p)
        return p

    logger_factory = ServerFactory.forProtocol(protocol)
    logger_factory.wait_for = loops
    logger_factory.on_done = Deferred()
    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")

    # A fake homeserver config.
    class Config:
        server_name = "synmark-" + str(loops)
        no_redirect_stdio = True

    hs_config = Config()

    # To be able to sleep.
    clock = Clock(reactor)

    errors = StringIO()
    publisher = LogPublisher()
    mock_sys = Mock()
    beginner = LogBeginner(publisher,
                           errors,
                           mock_sys,
                           warnings,
                           initialBufferSize=loops)

    log_config = {
        "version": 1,
        "loggers": {
            "synapse": {
                "level": "DEBUG",
                "handlers": ["tersejson"]
            }
        },
        "formatters": {
            "tersejson": {
                "class": "synapse.logging.TerseJsonFormatter"
            }
        },
        "handlers": {
            "tersejson": {
                "class": "synapse.logging.RemoteHandler",
                "host": "127.0.0.1",
                "port": port.getHost().port,
                "maximum_buffer": 100,
                "_reactor": reactor,
            }
        },
    }

    logger = logging.getLogger("synapse.logging.test_terse_json")
    _setup_stdlib_logging(
        hs_config,
        log_config,
        logBeginner=beginner,
    )

    # Wait for it to connect...
    for handler in logging.getLogger("synapse").handlers:
        if isinstance(handler, RemoteHandler):
            break
    else:
        raise RuntimeError("Improperly configured: no RemoteHandler found.")

    await handler._service.whenConnected()

    start = perf_counter()

    # Send a bunch of useful messages
    for i in range(0, loops):
        logger.info("test message %s", i)

        if len(handler._buffer) == handler.maximum_buffer:
            while len(handler._buffer) > handler.maximum_buffer / 2:
                await clock.sleep(0.01)

    await logger_factory.on_done

    end = perf_counter() - start

    handler.close()
    port.stopListening()

    return end
Beispiel #13
0
from json import JSONEncoder

from functools import partial

from zope.interface import provider

from twisted.logger import ILogObserver, formatEvent, Logger, LogPublisher
from twisted.logger import LogLevel, globalLogBeginner, formatTime

from twisted.python.constants import NamedConstant
from twisted.python.reflect import qual

from weakref import WeakKeyDictionary

log_publisher = LogPublisher()

record_separator = u"\x1e"
cb_logging_aware = u"CROSSBAR_RICH_LOGGING_ENABLE=True"

_loggers = WeakKeyDictionary()
_loglevel = "info"  # Default is "info"


def set_global_log_level(level):
    """
    Set the global log level on all the loggers that have the level not
    explicitly set.
    """
    for item in _loggers.keys():
        if not item._log_level_explicitly_set:
Beispiel #14
0
def setup_structured_logging(
    hs,
    config,
    log_config: dict,
    logBeginner: LogBeginner,
    redirect_stdlib_logging: bool = True,
) -> LogPublisher:
    """
    Set up Twisted's structured logging system.

    Args:
        hs: The homeserver to use.
        config (HomeserverConfig): The configuration of the Synapse homeserver.
        log_config (dict): The log configuration to use.
    """
    if config.no_redirect_stdio:
        raise ConfigError(
            "no_redirect_stdio cannot be defined using structured logging.")

    logger = Logger()

    if "drains" not in log_config:
        raise ConfigError(
            "The logging configuration requires a list of drains.")

    observers = []  # type: List[ILogObserver]

    for observer in parse_drain_configs(log_config["drains"]):
        # Pipe drains
        if observer.type == DrainType.CONSOLE:
            logger.debug("Starting up the {name} console logger drain",
                         name=observer.name)
            observers.append(SynapseFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON:
            logger.debug("Starting up the {name} JSON console logger drain",
                         name=observer.name)
            observers.append(jsonFileLogObserver(observer.location))
        elif observer.type == DrainType.CONSOLE_JSON_TERSE:
            logger.debug(
                "Starting up the {name} terse JSON console logger drain",
                name=observer.name,
            )
            observers.append(
                TerseJSONToConsoleLogObserver(observer.location, metadata={}))

        # File drains
        elif observer.type == DrainType.FILE:
            logger.debug("Starting up the {name} file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(SynapseFileLogObserver(log_file))
        elif observer.type == DrainType.FILE_JSON:
            logger.debug("Starting up the {name} JSON file logger drain",
                         name=observer.name)
            log_file = open(observer.location,
                            "at",
                            buffering=1,
                            encoding="utf8")
            observers.append(jsonFileLogObserver(log_file))

        elif observer.type == DrainType.NETWORK_JSON_TERSE:
            metadata = {"server_name": hs.config.server_name}
            log_observer = TerseJSONToTCPLogObserver(
                hs=hs,
                host=observer.location[0],
                port=observer.location[1],
                metadata=metadata,
                maximum_buffer=observer.options.maximum_buffer,
            )
            log_observer.start()
            observers.append(log_observer)
        else:
            # We should never get here, but, just in case, throw an error.
            raise ConfigError("%s drain type cannot be configured" %
                              (observer.type, ))

    publisher = LogPublisher(*observers)
    log_filter = LogLevelFilterPredicate()

    for namespace, namespace_config in log_config.get("loggers",
                                                      DEFAULT_LOGGERS).items():
        # Set the log level for twisted.logger.Logger namespaces
        log_filter.setLogLevelForNamespace(
            namespace,
            stdlib_log_level_to_twisted(namespace_config.get("level", "INFO")),
        )

        # Also set the log levels for the stdlib logger namespaces, to prevent
        # them getting to PythonStdlibToTwistedLogger and having to be formatted
        if "level" in namespace_config:
            logging.getLogger(namespace).setLevel(
                namespace_config.get("level"))

    f = FilteringLogObserver(publisher, [log_filter])
    lco = LogContextObserver(f)

    if redirect_stdlib_logging:
        stuff_into_twisted = PythonStdlibToTwistedLogger(lco)
        stdliblogger = logging.getLogger()
        stdliblogger.addHandler(stuff_into_twisted)

    # Always redirect standard I/O, otherwise other logging outputs might miss
    # it.
    logBeginner.beginLoggingTo([lco], redirectStandardIO=True)

    return publisher
Beispiel #15
0
async def main(reactor, loops):
    """
    Benchmark how long it takes to send `loops` messages.
    """
    servers = []

    def protocol():
        p = LineCounter()
        servers.append(p)
        return p

    logger_factory = ServerFactory.forProtocol(protocol)
    logger_factory.wait_for = loops
    logger_factory.on_done = Deferred()
    port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1")

    hs, wait, cleanup = await make_homeserver(reactor)

    errors = StringIO()
    publisher = LogPublisher()
    mock_sys = Mock()
    beginner = LogBeginner(publisher,
                           errors,
                           mock_sys,
                           warnings,
                           initialBufferSize=loops)

    log_config = {
        "loggers": {
            "synapse": {
                "level": "DEBUG"
            }
        },
        "drains": {
            "tersejson": {
                "type": "network_json_terse",
                "host": "127.0.0.1",
                "port": port.getHost().port,
                "maximum_buffer": 100,
            }
        },
    }

    logger = Logger(namespace="synapse.logging.test_terse_json",
                    observer=publisher)
    logging_system = setup_structured_logging(hs,
                                              hs.config,
                                              log_config,
                                              logBeginner=beginner,
                                              redirect_stdlib_logging=False)

    # Wait for it to connect...
    await logging_system._observers[0]._service.whenConnected()

    start = perf_counter()

    # Send a bunch of useful messages
    for i in range(0, loops):
        logger.info("test message %s" % (i, ))

        if (len(logging_system._observers[0]._buffer) ==
                logging_system._observers[0].maximum_buffer):
            while (len(logging_system._observers[0]._buffer) >
                   logging_system._observers[0].maximum_buffer / 2):
                await wait(0.01)

    await logger_factory.on_done

    end = perf_counter() - start

    logging_system.stop()
    port.stopListening()
    cleanup()

    return end