def make_middleware(app, global_conf, verbose_log=None, trace_log=None, max_bodylen='3KB', max_logsize='100MB', backup_count='10', keep='100', ): """ Paste filter-app converter """ backup_count = int(backup_count) max_bytes = byte_size(max_logsize) max_bodylen = byte_size(max_bodylen) keep = int(keep) from logging import Logger from logging.handlers import RotatingFileHandler if verbose_log: handler = RotatingFileHandler(verbose_log, maxBytes=max_bytes, backupCount=backup_count) verbose_log = Logger('repoze.debug.verboselogger') verbose_log.handlers = [handler] if trace_log: handler = RotatingFileHandler(trace_log, maxBytes=max_bytes, backupCount=backup_count) trace_log = Logger('repoze.debug.tracelogger') trace_log.handlers = [handler] return ResponseLoggingMiddleware(app, max_bodylen, keep, verbose_log, trace_log)
def make_middleware( app, global_conf, verbose_log=None, trace_log=None, max_bodylen='3KB', max_logsize='100MB', backup_count='10', keep='100', ): """ Paste filter-app converter """ backup_count = int(backup_count) max_bytes = byte_size(max_logsize) max_bodylen = byte_size(max_bodylen) keep = int(keep) from logging import Logger from logging.handlers import RotatingFileHandler if verbose_log: handler = RotatingFileHandler(verbose_log, maxBytes=max_bytes, backupCount=backup_count) verbose_log = Logger('repoze.debug.verboselogger') verbose_log.handlers = [handler] if trace_log: handler = RotatingFileHandler(trace_log, maxBytes=max_bytes, backupCount=backup_count) trace_log = Logger('repoze.debug.tracelogger') trace_log.handlers = [handler] return ResponseLoggingMiddleware(app, max_bodylen, keep, verbose_log, trace_log)
def process_alerts_and_remediations(log: Logger, clusters, alerts: List[Tuple[str, Callable[..., BaseAlert]]]): for context in clusters: client: OpenShift = OpenShift(context=context, use_internal=environ.get( 'INTERNAL_CLUSTER', 'false') == 'true') cluster: str = client.client.configuration.host log.info(f'Processing alerts for Cluster: {cluster} ...') log.handlers = [logging_config.get_tabbed_formatter()] # Iterate over all alerts for _, alert_class in alerts: alert = alert_class(client) alert.process_alerts() if environ.get('REMEDIATION', 'false') == 'true': alert.process_remediations() if alert.failed_alerts: if environ.get('SKIP_EMAIL_FOR_SUCCESSFUL_REMEDIATION', 'false') == 'true': if not all( failure.get('remediated', False) for failure in alert.failed_alerts): alert.email_results() else: alert.email_results() log.handlers = [logging_config.get_normal_formatter()] log.info(f'Finished processing alerts for Cluster: {cluster}.')
def redirect_logging_to_tqdm(logger: logging.Logger = None): if logger is None: logger = logging.root tqdm_handler = TqdmLoggingHandler() original_handlers = logger.handlers tqdm_handler.setFormatter(_get_console_formatter(original_handlers)) try: logger.handlers = [ handler for handler in logger.handlers if not _is_console_logging_handler(handler) ] + [tqdm_handler] yield finally: logger.handlers = original_handlers
def configure_logger_for_colour(logger: logging.Logger, level: int = logging.INFO, remove_existing: bool = False, extranames: List[str] = None, with_process_id: bool = False, with_thread_id: bool = False) -> None: """ Applies a preconfigured datetime/colour scheme to a logger. Should ONLY be called from the ``if __name__ == 'main'`` script; see https://docs.python.org/3.4/howto/logging.html#library-config. Args: logger: logger to modify level: log level to set remove_existing: remove existing handlers from logger first? extranames: additional names to append to the logger's name with_process_id: include the process ID in the logger's name? with_thread_id: include the thread ID in the logger's name? """ if remove_existing: logger.handlers = [] # http://stackoverflow.com/questions/7484454 handler = get_colour_handler(extranames, with_process_id=with_process_id, with_thread_id=with_thread_id) handler.setLevel(level) logger.addHandler(handler) logger.setLevel(level)
def logging_config(folder: Optional[str] = None, name: Optional[str] = None, logger: logging.Logger = logging.root, level: int = logging.INFO, console_level: int = logging.INFO, console: bool = True, overwrite_handler: bool = False) -> str: """Config the logging module. It will set the logger to save to the specified file path. Parameters ---------- folder The folder to save the log name Name of the saved logger The logger level Logging level console_level Logging level of the console log console Whether to also log to console overwrite_handler Whether to overwrite the existing handlers in the logger Returns ------- folder The folder to save the log file. """ if name is None: name = inspect.stack()[-1][1].split('.')[0] if folder is None: folder = os.path.join(os.getcwd(), name) if not os.path.exists(folder): os.makedirs(folder, exist_ok=True) need_file_handler = True need_console_handler = True # Check all loggers. if overwrite_handler: logger.handlers = [] else: for handler in logger.handlers: if isinstance(handler, logging.StreamHandler): need_console_handler = False logpath = os.path.join(folder, name + ".log") print("All Logs will be saved to {}".format(logpath)) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') if need_file_handler: logfile = logging.FileHandler(logpath) logfile.setLevel(level) logfile.setFormatter(formatter) logger.addHandler(logfile) if console and need_console_handler: # Initialze the console logging logconsole = logging.StreamHandler() logconsole.setLevel(console_level) logconsole.setFormatter(formatter) logger.addHandler(logconsole) return folder
def configure_databricks_logger(log_level=logging.INFO, logger: logging.Logger = None, file_path: str = None) -> logging.Logger: """Configure logging for databricks. Args: log_level: the logging level logger: the logger to use, default is root logger file_path: the path to a file for storing logs to persistent disk if provided """ if logger is None: logger = logging.getLogger() logger.setLevel(log_level) log_handlers = [logging.StreamHandler(sys.stdout)] if file_path is not None: file_handler = TimedRotatingFileHandler(file_path, when="midnight", backupCount=7) file_handler.setFormatter( logging.Formatter( "%(asctime)s - %(name)s - %(levelname)s - %(message)s")) log_handlers.append(file_handler) logger.handlers = log_handlers logging.getLogger("py4j").setLevel( logging.ERROR) # To remove the unnecessary databricks logging output return logger
def default_logging_config(logger_: logging.Logger, ) -> logging.Logger: # pylint: disable=redefined-outer-name """Set up the default handler and formatter on the given logger.""" default_handler = logging.StreamHandler(stream=sys.stdout) default_handler.formatter = ColorFormatter() logger_.handlers = [default_handler] logger_.propagate = True return logger_
def basic_config(logger: logging.Logger) -> logging.Logger: """Configure the logger for reporting to stderr using click.""" handler = ClickErrHandler() handler.formatter = ColorFormatter() logger.handlers = [handler] return logger
def configure_logger_for_colour(log: logging.Logger, remove_existing: bool = True) -> None: """ Applies a preconfigured datetime/colour scheme to a logger. Should ONLY be called from the "if __name__ == 'main'" script: https://docs.python.org/3.4/howto/logging.html#library-config """ if remove_existing: log.handlers = [] # http://stackoverflow.com/questions/7484454 log.addHandler(COLOUR_HANDLER)
def get_logger(name: str) -> Logger: logger = _loggers.get(name) if logger: return logger handler = TimedRotatingFileHandler(f'{DATA_PATH}/logs/{name}.log', when='D', backupCount=10) handler.setFormatter(JSONLogFormatter()) logger = Logger(name, LOG_LEVEL) logger.handlers = [] logger.addHandler(handler) return logger
def configure_logger_for_colour(log: logging.Logger, level: int = logging.INFO, remove_existing: bool = False, extranames: List[str] = None) -> None: """ Applies a preconfigured datetime/colour scheme to a logger. Should ONLY be called from the "if __name__ == 'main'" script: https://docs.python.org/3.4/howto/logging.html#library-config """ if remove_existing: log.handlers = [] # http://stackoverflow.com/questions/7484454 log.addHandler(get_colour_handler(extranames)) log.setLevel(level)
def set_log_handler(logger: logging.Logger, file_name: str) -> None: if not BucketLoader.DEFAULT_LOG_FOLDER.exists(): BucketLoader.DEFAULT_LOG_FOLDER.mkdir(parents=True) logging.root.setLevel(level=logging.NOTSET) file_handler = logging.FileHandler(BucketLoader.DEFAULT_LOG_FOLDER / file_name) file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%m/%d/%Y %I:%M:%S %p') file_handler.setFormatter(formatter) logger.handlers = [file_handler] logger.write = get_write_method(logger) logger.flush = lambda: None
def setup_logger(logger: logging.Logger) -> None: """ Prepare logging for the provided logger :param logger: :return: """ log_level = config.get_log_level() logger.setLevel(log_level) logger.handlers = [] log_format = config.get_log_format() log_formatter = logging.Formatter(fmt=log_format) stream_handler = logging.StreamHandler() stream_handler.setFormatter(log_formatter) logger.addHandler(stream_handler) if config.get_log_to_file(): add_log_file_to_logger(logger, log_formatter)
def tee_logging_lines_to( *line_writers: Callable[[str], None], logger: logging.Logger = None, formatter: logging.Formatter = None, **kwargs): if logger is None: logger = logging.root if formatter is None: formatter = get_default_logging_formatter() prev_handlers = logger.handlers try: handler = LineWriterLoggingHandler(*line_writers, **kwargs) if formatter is not None: handler.setFormatter(formatter) logger.addHandler(handler) yield logger finally: flush_logging_handlers(logger.handlers) flush_logging_handlers(logging.root.handlers) logger.handlers = prev_handlers
def _setup_logger( logger: logging.Logger, fmt: str = '%(levelname)8s %(asctime)s <daemon> %(message)s', datefmt: str = '%Y-%m-%d %H:%M:%S', ): # Remove default handler if logger.handlers: logger.handlers = [] formatter = logging.Formatter(fmt, datefmt=datefmt) h_stdout = logging.StreamHandler(sys.stdout) h_stdout.setLevel(logging.NOTSET) h_stdout.addFilter(lambda record: record.levelno <= logging.WARNING) h_stdout.setFormatter(formatter) logger.addHandler(h_stdout) h_stderr = logging.StreamHandler(sys.stderr) h_stderr.setLevel(logging.ERROR) h_stderr.setFormatter(formatter) logger.addHandler(h_stderr)
def setLoggerDefaults( logger:logging.Logger, level:int=logging.INFO, logFileName:Optional[str]=None ) -> None: logger.setLevel(level) logger.handlers = [] formatter = logging.Formatter( '[%(asctime)s] %(levelname)s [%(module)s:%(lineno)s] - %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) def setHandlerDefaults(handler:Union[logging.StreamHandler, logging.FileHandler]) -> None: handler.setLevel(level) handler.setFormatter(formatter) logger.addHandler(handler) streamHandler = logging.StreamHandler(sys.stdout) setHandlerDefaults(streamHandler) if logFileName is not None: fileHandler = logging.FileHandler(logFileName, 'w') setHandlerDefaults(fileHandler)
def close_logger(log: logging.Logger): for handler in log.handlers: handler.close() log.handlers = []
def remove_all_handlers(self, logger: logging.Logger) -> None: logger.handlers = list()