def setup_logging(hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner) -> None: """ Set up the logging subsystem. Args: config (LoggingConfig | synapse.config.worker.WorkerConfig): configuration data use_worker_options (bool): True to use the 'worker_log_config' option instead of 'log_config'. logBeginner: The Twisted logBeginner to use. """ log_config_path = (config.worker_log_config if use_worker_options else config.log_config) # Perform one-time logging configuration. _setup_stdlib_logging(config, log_config_path, logBeginner=logBeginner) # Add a SIGHUP handler to reload the logging configuration, if one is available. appbase.register_sighup(_reload_logging_config, log_config_path) # Log immediately so we can grep backwards. logging.warning("***** STARTING SERVER *****") logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) logging.info("Instance name: %s", hs.get_instance_name())
def setup_logging( hs, config, use_worker_options=False, logBeginner: LogBeginner = globalLogBeginner) -> ILogObserver: """ Set up the logging subsystem. Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use the 'worker_log_config' option instead of 'log_config'. logBeginner: The Twisted logBeginner to use. Returns: The "root" Twisted Logger observer, suitable for sending logs to from a Logger instance. """ log_config = config.worker_log_config if use_worker_options else config.log_config def read_config(*args, callback=None): if log_config is None: return None with open(log_config, "rb") as f: log_config_body = yaml.safe_load(f.read()) if callback: callback(log_config=log_config_body) logging.info("Reloaded log config from %s due to SIGHUP", log_config) return log_config_body log_config_body = read_config() if log_config_body and log_config_body.get("structured") is True: logger = setup_structured_logging(hs, config, log_config_body, logBeginner=logBeginner) appbase.register_sighup(read_config, callback=reload_structured_logging) else: logger = _setup_stdlib_logging(config, log_config_body, logBeginner=logBeginner) appbase.register_sighup(read_config, callback=_reload_stdlib_logging) # make sure that the first thing we log is a thing we can grep backwards # for logging.warning("***** STARTING SERVER *****") logging.warning("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) return logger
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s") if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding='utf8') def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(*args): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith( "DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio, ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use the 'worker_log_config' option instead of 'log_config'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = config.worker_log_config if use_worker_options else config.log_config if log_config is None: log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) logger = logging.getLogger("") logger.setLevel(logging.INFO) logging.getLogger("synapse.storage.SQL").setLevel(logging.INFO) formatter = logging.Formatter(log_format) handler = logging.StreamHandler() handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, "r") as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn("Server %s version %s", sys.argv[0], get_version_string(synapse)) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")
def setup_logging(config, use_worker_options=False): """ Set up python logging Args: config (LoggingConfig | synapse.config.workers.WorkerConfig): configuration data use_worker_options (bool): True to use 'worker_log_config' and 'worker_log_file' options instead of 'log_config' and 'log_file'. register_sighup (func | None): Function to call to register a sighup handler. """ log_config = (config.worker_log_config if use_worker_options else config.log_config) log_file = (config.worker_log_file if use_worker_options else config.log_file) log_format = ( "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s" " - %(message)s" ) if log_config is None: # We don't have a logfile, so fall back to the 'verbosity' param from # the config or cmdline. (Note that we generate a log config for new # installs, so this will be an unusual case) level = logging.INFO level_for_storage = logging.INFO if config.verbosity: level = logging.DEBUG if config.verbosity > 1: level_for_storage = logging.DEBUG logger = logging.getLogger('') logger.setLevel(level) logging.getLogger('synapse.storage.SQL').setLevel(level_for_storage) formatter = logging.Formatter(log_format) if log_file: # TODO: Customisable file size / backup count handler = logging.handlers.RotatingFileHandler( log_file, maxBytes=(1000 * 1000 * 100), backupCount=3, encoding='utf8' ) def sighup(signum, stack): logger.info("Closing log file due to SIGHUP") handler.doRollover() logger.info("Opened new log file due to SIGHUP") else: handler = logging.StreamHandler() def sighup(*args): pass handler.setFormatter(formatter) handler.addFilter(LoggingContextFilter(request="")) logger.addHandler(handler) else: def load_log_config(): with open(log_config, 'r') as f: logging.config.dictConfig(yaml.safe_load(f)) def sighup(*args): # it might be better to use a file watcher or something for this. load_log_config() logging.info("Reloaded log config from %s due to SIGHUP", log_config) load_log_config() appbase.register_sighup(sighup) # make sure that the first thing we log is a thing we can grep backwards # for logging.warn("***** STARTING SERVER *****") logging.warn( "Server %s version %s", sys.argv[0], get_version_string(synapse), ) logging.info("Server hostname: %s", config.server_name) # It's critical to point twisted's internal logging somewhere, otherwise it # stacks up and leaks kup to 64K object; # see: https://twistedmatrix.com/trac/ticket/8164 # # Routing to the python logging framework could be a performance problem if # the handlers blocked for a long time as python.logging is a blocking API # see https://twistedmatrix.com/documents/current/core/howto/logger.html # filed as https://github.com/matrix-org/synapse/issues/1727 # # However this may not be too much of a problem if we are just writing to a file. observer = STDLibLogObserver() def _log(event): if "log_text" in event: if event["log_text"].startswith("DNSDatagramProtocol starting on "): return if event["log_text"].startswith("(UDP Port "): return if event["log_text"].startswith("Timing out client"): return return observer(event) globalLogBeginner.beginLoggingTo( [_log], redirectStandardIO=not config.no_redirect_stdio, ) if not config.no_redirect_stdio: print("Redirected stdout/stderr to logs")