def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: handler = logging.FileHandler(self.log_file) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: logging.config.fileConfig(self.log_config) observer = PythonLoggingObserver() observer.start()
def setup_logging(log_config=None, log_file=None, verbosity=None): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: level = logging.INFO level_for_storage = logging.INFO if verbosity: level = logging.DEBUG if verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() globalLogBeginner.beginLoggingTo([observer])
def setup_logging(log_config=None, log_file=None, verbosity=None): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: level = logging.INFO level_for_storage = logging.INFO if verbosity: level = logging.DEBUG if verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def setup_logging(): """Configure the python logging appropriately for the tests. (Logs will end up in _trial_temp.) """ root_logger = logging.getLogger() log_format = ("%(asctime)s - %(name)s - %(lineno)d - " "%(levelname)s - %(request)s - %(message)s") handler = ToTwistedHandler() formatter = logging.Formatter(log_format) handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) root_logger.addHandler(handler) log_level = os.environ.get("SYNAPSE_TEST_LOG_LEVEL", "ERROR") root_logger.setLevel(log_level)
def setup_logging(self): log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if self.log_config is None: level = logging.INFO level_for_storage = logging.INFO if self.verbosity: level = logging.DEBUG if self.verbosity > 1: level_for_storage = logging.DEBUG # FIXME: we need a logging.WARN for a -q quiet option logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if self.log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( self.log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) else: handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) logger.info("Test") else: with open(self.log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) observer = PythonLoggingObserver() observer.start()
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding='utf8') def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(*args): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio, ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")
class ToTwistedHandler(logging.Handler): tx_log = twisted.logger.Logger() def emit(self, record): log_entry = self.format(record) log_level = record.levelname.lower().replace('warning', 'warn') self.tx_log.emit( twisted.logger.LogLevel.levelWithName(log_level), log_entry.replace("{", r"(").replace("}", r")"), ) handler = ToTwistedHandler() formatter = logging.Formatter(log_format) handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) rootLogger.addHandler(handler) def around(target): """A CLOS-style 'around' modifier, which wraps the original method of the given instance with another piece of code. @around(self) def method_name(orig, *args, **kwargs): return orig(*args, **kwargs) """ def _around(code): name = code.__name__ orig = getattr(target, name)
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(signum, stack): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.load(f)) def sighup(signum, stack): # it might be better to use a file watcher or something for this. logging.info("Reloading log config from %s due to SIGHUP", log_config) load_log_config() load_log_config() # TODO(paul): obviously this is a terrible mechanism for # stealing SIGHUP, because it means no other part of synapse # can use it instead. If we want to catch SIGHUP anywhere # else as well, I'd suggest we find a nicer way to broadcast # it around. if getattr(signal, "SIGHUP"): signal.signal(signal.SIGHUP, sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() globalLogBeginner.beginLoggingTo( [observer], redirectStandardIO=not config.no_redirect_stdio, )