示例#1
0
 def __init__(self):
     handlers = {
         logging.NOTSET: "logs/notset.log",
         logging.DEBUG: "logs/debug.log",
         logging.INFO: "logs/info.log",
         logging.WARNING: "logs/warning.log",
         logging.ERROR: "logs/error.log",
         logging.CRITICAL: "logs/critical.log",
     }
     self.__loggers = {}
     logLevels = handlers.keys()
     fmt = logging.Formatter(
         '[%(asctime)s] - %(pathname)s[line: % (lineno)d] - [%(levelname)s]: [%(message)s]'
     )
     for level in logLevels:
         # 创建logger
         logger = logging.getLogger(str(level))
         logger.setLevel(level)
         # 创建hander用于写日日志文件
         log_path = os.path.abspath(handlers[level])
         fh = logging.FileHandler(log_path)
         # 定义日志的输出格式
         fh.setFormatter(fmt)
         fh.setLevel(level)
         # 给logger添加hander
         logger.addHandler(fh)
         self.__loggers.update({level: logger})
示例#2
0
def createHandlers():
    logLevels = handlers.keys()
    for level in logLevels:
        path = os.path.abspath(handlers[level])
        #handlers[level] = logging.FileHandler(path)
        handlers[level] = logging.handlers.TimedRotatingFileHandler(
            path, when='H', interval=1, backupCount=120, encoding="utf8")
示例#3
0
def createHandlers():
    """
    创建全局handle
    :return:
    """
    logLevels = handlers.keys()
    for level in logLevels:
        path = os.path.abspath(handlers[level])
        handlers[level] = logging.FileHandler(path)
示例#4
0
    def __init__(self, level=logging.NOTSET):
        self.__loggers = {}

        logLevels = handlers.keys()
        for level in logLevels:
            logger = logging.getLogger(str(level))
            # 如果不指定level,获得的handler似乎是同一个handler?
            logger.addHandler(handlers[level])
            logger.setLevel(level)
            self.__loggers.update({level: logger})
示例#5
0
 def __init__(self, level=logging.NOTSET):
     #file_hanlder = logging.FileHandler(filename='example.log', encoding='utf-8')
     self.__loggers = {}
     logLevels = handlers.keys()
     for level in logLevels:
         logger = logging.getLogger(str(level))
         #如果不指定level,获得的handler似乎是同一个handler?
         logger.addHandler(handlers[level])
         logger.setLevel(level)
         self.__loggers.update({level: logger})
示例#6
0
 def __init__(self, level=logging.NOTSET):
     self.__loggers = {}
     for log_level in handlers.keys():
         logger = logging.getLogger(str(log_level))
         # 是否需要打印到屏幕?注释下面3行可以取消
         console = logging.StreamHandler()
         console.setLevel(logging.ERROR)  # log_level
         logger.addHandler(console)
         logger.addHandler(handlers[log_level])
         logger.setLevel(log_level)
         self.__loggers.update({log_level: logger})
示例#7
0
文件: log.py 项目: Izeni/ParselTONE
def get_formatter(level):
    # default to DEBUG if the level is invalid
    if level not in handlers.keys():
        level = logging.DEBUG
    # return the appropriate formatter
    if level == logging.DEBUG:
        return logging.Formatter(
            ' '.join(['[%(asctime)s]', '%(levelname)8s', '- (%(name)s)',
            '%(funcName)s :%(lineno)d', LINESEP, INDENT, '%(message)s']),
            DATEFORMAT)
    return logging.Formatter(
        ' '.join(['[%(asctime)s]', '%(levelname)8s', '- %(message)s']),
        DATEFORMAT)
示例#8
0
def create_logger(options):
    log_format = general_config.get("log_syntax", LOG_FORMAT)
    log_datefmt = general_config.get("log_timefmt", LOG_DATEFMT)
    log_level = logging.DEBUG if options.debug else logging.INFO

    handlers = {}
    if sys.stdout.isatty():
        handlers['console'] = {
            'level': log_level,
            'formatter': 'default',
            'class': 'logging.StreamHandler',
        }

    if options.logfile:
        handlers['file'] = {
            'level': log_level,
            'formatter': 'default',
            'class': 'fluxmonitor.diagnosis.log_helpers.RotatingFileHandler',
            'filename': options.logfile,
            'maxBytes': 5 * (2 ** 20),  # 10M
            'backupCount': 9
        }

    if os.path.exists("/etc/raven.dsn"):
        from fluxmonitor.diagnosis.log_helpers import create_raven_logger
        with open("/etc/raven.dsn", "r") as f:
            dsn = f.read()
            handlers['raven'] = create_raven_logger(dsn)

    logging.config.dictConfig({
        'version': 1,
        'disable_existing_loggers': True,
        'formatters': {
            'default': {
                'format': log_format,
                'datefmt': log_datefmt
            }
        },
        'handlers': handlers,
        'loggers': {},
        'root': {
            'handlers': list(handlers.keys()),
            'level': log_level,
            'propagate': True
        }
    })
示例#9
0
def consoleprintHandlers():
    """
    单独控制控制台输出
    """
    level_in = Config_value['log']['console_print_level']
    logLevels = handlers.keys()
    if level_in:
        if level_in == 'all':
            for level_ in logLevels:
                handlers[level_] = logging.StreamHandler()
        elif level_in in levelasnum().keys():
            level_ = levelasnum()[level_in]  # 对应等级的数字
            values_lsit = levelasnum().values()
            print_num_list = []
            for i in values_lsit:
                if level_ <= i:
                    print_num_list.append(i)
            for level_ in print_num_list:
                handlers[level_] = logging.StreamHandler()
示例#10
0
文件: log.py 项目: Izeni/ParselTONE
def configure_handlers(level):
    """
    Configure (and install) handlers appropriate for the level.
    """
    logging.info('Logging %s messages and higher.' % \
        logging.getLevelName(level))

    for hlevel in handlers.keys():
        # avoid duplicate handlers by removing the handler first
        logger.removeHandler(handlers.get(hlevel))
        if hlevel < level:
            logger.addHandler(null)
            continue
        handlers[hlevel].setLevel(hlevel)
        handlers[hlevel].setFormatter(get_formatter(hlevel))
        logger.addHandler(handlers.get(hlevel))

    # default to censoring the syslog handler
    if os.name == 'posix':
        syslog = handlers[1000]
        syslog.setLevel(logging.DEBUG)
        syslog.applyFormat()
        logger.addHandler(syslog)
示例#11
0
    def __init__(self):
        self.log_root = "logs"
        self.console_print = False

        try:
            os.makedirs(os.path.join(os.getcwd(), self.log_root))
        except Exception as ex:
            pass

        handlers = {
            logging.NOTSET: self.log_root + os.sep + rq + ".log",
            logging.DEBUG: self.log_root + os.sep + rq + ".log",
            logging.INFO: self.log_root + os.sep + rq + ".log",
            logging.WARNING: self.log_root + os.sep + rq + ".log",
            logging.ERROR: self.log_root + os.sep + rq + ".log",
            logging.CRITICAL: self.log_root + os.sep + rq + ".log",
        }
        self.__loggers = {}
        logLevels = handlers.keys()
        fmt = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')

        for level in logLevels:
            #创建logger
            logger = logging.getLogger(str(level))
            logger.setLevel(level)
            #创建hander用于写日日志文件
            log_path = os.path.abspath(handlers[level])
            # fh = logging.FileHandler(log_path)
            fh = logging.handlers.TimedRotatingFileHandler(
                log_path, 'D', 1, 10)
            #定义日志的输出格式
            fh.setFormatter(fmt)
            fh.setLevel(level)
            #给logger添加hander
            logger.addHandler(fh)
            self.__loggers.update({level: logger})
示例#12
0
def configure_logging(
    logger_level_config: Dict[str, str] = None,
    colorize: bool = True,
    log_json: bool = False,
    log_file: str = None,
    disable_debug_logfile: bool = False,
    debug_log_file_path: str = None,
    cache_logger_on_first_use: bool = True,
    _first_party_packages: FrozenSet[str] = _FIRST_PARTY_PACKAGES,
    _debug_log_file_additional_level_filters: Dict[str, str] = None,
) -> None:
    structlog.reset_defaults()

    logger_level_config = logger_level_config or dict()
    logger_level_config.setdefault("filelock", "ERROR")
    logger_level_config.setdefault("", DEFAULT_LOG_LEVEL)

    processors = [
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        add_greenlet_name,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]

    if log_json:
        formatter = "json"
    elif colorize and not log_file:
        formatter = "colorized"
    else:
        formatter = "plain"

    redact = redactor(LOG_BLACKLIST)

    handlers: Dict[str, Any] = dict()
    if log_file:
        handlers["file"] = {
            "class": "logging.handlers.WatchedFileHandler",
            "filename": log_file,
            "level": "DEBUG",
            "formatter": formatter,
            "filters": ["user_filter"],
        }
    else:
        handlers["default"] = {
            "class": "logging.StreamHandler",
            "level": "DEBUG",
            "formatter": formatter,
            "filters": ["user_filter"],
        }

    if not disable_debug_logfile:
        debug_logfile_path = configure_debug_logfile_path(debug_log_file_path)
        handlers["debug-info"] = {
            "class": "logging.handlers.RotatingFileHandler",
            "filename": debug_logfile_path,
            "level": "DEBUG",
            "formatter": "debug",
            "maxBytes": MAX_LOG_FILE_SIZE,
            "backupCount": LOG_BACKUP_COUNT,
            "filters": ["raiden_debug_file_filter"],
        }

    logging.config.dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "filters": {
            "user_filter": {
                "()": RaidenFilter,
                "log_level_config": logger_level_config
            },
            "raiden_debug_file_filter": {
                "()": RaidenFilter,
                "log_level_config": {
                    "": DEFAULT_LOG_LEVEL,
                    "raiden": "DEBUG",
                    **(_debug_log_file_additional_level_filters or {}),
                },
            },
        },
        "formatters": {
            "plain": {
                "()":
                structlog.stdlib.ProcessorFormatter,
                "processor":
                _chain(structlog.dev.ConsoleRenderer(colors=False), redact),
                "foreign_pre_chain":
                processors,
            },
            "json": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": _chain(structlog.processors.JSONRenderer(),
                                    redact),
                "foreign_pre_chain": processors,
            },
            "colorized": {
                "()":
                structlog.stdlib.ProcessorFormatter,
                "processor":
                _chain(structlog.dev.ConsoleRenderer(colors=True), redact),
                "foreign_pre_chain":
                processors,
            },
            "debug": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": _chain(structlog.processors.JSONRenderer(),
                                    redact),
                "foreign_pre_chain": processors,
            },
        },
        "handlers": handlers,
        "loggers": {
            "": {
                "handlers": handlers.keys(),
                "propagate": True
            }
        },
    })
    structlog.configure(
        processors=processors +
        [structlog.stdlib.ProcessorFormatter.wrap_for_formatter],
        wrapper_class=structlog.stdlib.BoundLogger,
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=cache_logger_on_first_use,
    )

    # set logging level of the root logger to DEBUG, to be able to intercept
    # all messages, which are then be filtered by the `RaidenFilter`
    structlog.get_logger("").setLevel(
        logger_level_config.get("", DEFAULT_LOG_LEVEL))
    for package in _first_party_packages:
        structlog.get_logger(package).setLevel("DEBUG")

    # rollover RotatingFileHandler on startup, to split logs also per-session
    root = logging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            handler.flush()
            if os.stat(handler.baseFilename).st_size > 0:
                handler.doRollover()

    # fix logging of py-evm (it uses a custom Trace logger from logging library)
    # if py-evm is not used this will throw, hence the try-catch block
    # for some reason it didn't work to put this into conftest.py
    try:
        from eth.tools.logging import setup_trace_logging

        setup_trace_logging()
    except ImportError:
        pass
示例#13
0
    def configure(self):
        """Do the configuration."""

        config = self.config
        version = self.convert(config.get("version", None))
        if version != 1:
            raise ValueError("Unsupported version: {}".format(config['version']))
        incremental = self.convert(config.pop('incremental', False))
        EMPTY_DICT = {}
        logging._acquireLock()
        try:
            if incremental:
                handlers = self.convert(config.get('handlers', EMPTY_DICT))
                for name in handlers.keys():
                    if name not in logging._handlers:
                        raise ValueError('No handler found with name {}'.format(name))
                    else:
                        try:
                            handler = logging._handlers[name]
                            hconfig = self.convert(handlers[name])
                            level = self.convert(hconfig.get('level', None))
                            if level:
                                handler.setLevel(logging._checkLevel(level))
                        except Exception as e:
                            raise ValueError('Unable to configure handler '
                                                             '{}'.format(name)) from e
                loggers = self.convert(config.get('loggers', EMPTY_DICT))
                for name in loggers.keys():
                    try:
                        self.configure_logger(name, self.convert(loggers[name]), True)
                    except Exception as e:
                        raise ValueError('Unable to configure logger {}'.format(name)) from e
                root = self.convert(config.get('root', None))
                if root:
                    try:
                        self.configure_root(root, True)
                    except Exception as e:
                        raise ValueError('Unable to configure root logger') from e
            else:
                disable_existing = config.pop('disable_existing_loggers', True)

                logging._handlers.clear()
                logging._handlerList[:] = []

                # Do formatters first - they don't refer to anything else
                formatters = self.convert(config.get('formatters', EMPTY_DICT))
                for name in formatters.keys():
                    try:
                        fmtConfig = self.convert(formatters.get(name))
                        formatters[name] = self.configure_formatter(fmtConfig)
                    except Exception as e:
                        raise ValueError('Unable to configure formatter {}'.format(name)) from e
                # Next, do filters - they don't refer to anything else, either
                filters = self.convert(config.get('filters', EMPTY_DICT))
                for name in filters.keys():
                    try:
                        filtConfig = self.convert(filters.get(name))
                        filters[name] = self.configure_filter(filtConfig)
                    except Exception as e:
                        raise ValueError('Unable to configure filter {}'.format(name)) from e

                # Next, do handlers - they refer to formatters and filters
                # As handlers can refer to other handlers, sort the keys
                # to allow a deterministic order of configuration
                handlers = self.convert(config.get('handlers', EMPTY_DICT))
                deferred = []
                for name in sorted(handlers.keys()):
                    try:
                        handlerConfig = self.convert(handlers.get(name))
                        handler = self.configure_handler(handlerConfig)
                        handler.name = name
                        handlers[name] = handler
                    except UnresolvableError as exc:
                        raise exc
                    except Exception as e:
                        if 'target not configured yet' in str(e.__cause__):
                            deferred.append(name)
                        else:
                            raise ValueError(
                            'Unable to config handler {}'.format(name)
                            ) from e

                # Now do any that were deferred
                for name in deferred:
                    try:
                        handlerConfig = self.convert(handlers.get(name))
                        handler = self.configure_handler(handlerConfig)
                        handler.name = name
                        handlers[name] = handler
                    except UnresolvableError as exc:
                        raise exc
                    except Exception as e:
                        raise ValueError(
                            'Unable to configure handler {}'.format(name)
                        ) from e

                # Next, do loggers - they refer to handlers and filters

                #we don't want to lose the existing loggers,
                #since other threads may have pointers to them.
                #existing is set to contain all existing loggers,
                #and as we go through the new configuration we
                #remove any which are configured. At the end,
                #what's left in existing is the set of loggers
                #which were in the previous configuration but
                #which are not in the new configuration.
                root = logging.root
                existing = list(root.manager.loggerDict.keys())
                #The list needs to be sorted so that we can
                #avoid disabling child loggers of explicitly
                #named loggers. With a sorted list it is easier
                #to find the child loggers.
                existing.sort()
                #We'll keep the list of existing loggers
                #which are children of named loggers here...
                child_loggers = []
                #now set up the new ones...
                loggers = self.convert(config.get('loggers', EMPTY_DICT))
                for name in loggers.keys():
                    if name in existing:
                        i = existing.index(name) + 1 # look after name
                        prefixed = name + "."
                        pflen = len(prefixed)
                        num_existing = len(existing)
                        while i < num_existing:
                            if existing[i][:pflen] == prefixed:
                                child_loggers.append(existing[i])
                            i += 1
                        existing.remove(name)
                    try:
                        loggerConfig = loggers.get(name)
                        self.configure_logger(name, loggerConfig)
                    except Exception as e:
                        raise ValueError(
                            'Unable to configure logger {}'.format(name)
                        )from e

                #Disable any old loggers. There's no point deleting
                #them as other threads may continue to hold references
                #and by disabling them, you stop them doing any logging.
                #However, don't disable children of named loggers, as that's
                #probably not what was intended by the user.
                #for log in existing:
                #        logger = root.manager.loggerDict[log]
                #        if log in child_loggers:
                #                logger.level = logging.NOTSET
                #                logger.handlers = []
                #                logger.propagate = True
                #        elif disable_existing:
                #                logger.disabled = True
                _handle_existing_loggers(existing, child_loggers,
                                                                 disable_existing)

                # And finally, do the root logger
                root = self.convert(config.get('root', None))
                if root:
                    try:
                        self.configure_root(root)
                    except Exception as e:
                        raise ValueError('Unable to configure root '
                                                         'logger') from e
        finally:
            logging._releaseLock()
示例#14
0
def logging_setup(path: str = None,
                  log_level: str = 'INFO',
                  append: bool = True,
                  stdout: bool = True) -> None:
    """ 
    Configure logging such that log records of all loggers are formatted according to same format and are written
    to a file and/or printed to stdout.

    Specifically, add a stdout and/or a rotating file handler to the root logger. Both handlers format log records according to
    the template `[datetime] [logger name] [log level] MESSAGE`. Additionally, log uncaught exceptions in the root
    logger.

    The root logger receives log records emitted by all other loggers that are configured to propagate their records up
    the loggers hierarchy, which is the default when instantiating a logger.

    :param path: Path of the file where the file handler will write log records to. Defaults to `None`, in which case
    the file handler is omitted from root logger.
    :param log_level: Log level of the root logger. Defaults to `INFO`.
    :param append: Append the log records to the log file. Defaults to `True`. Is ignored if `path` for the log file
        is not set.
    :param stdout: Whether to add stdout handler to the root logger.
    :returns: Nothing, only side effects.
    """

    if not path and not stdout:
        raise ValueError(
            'No place to send logs to: path is empty and stdout is false.')

    handlers = {}

    if path:
        handlers['file'] = {
            'class': 'logging.handlers.RotatingFileHandler',
            'maxBytes': 250000,
            'backupCount': 5,
            'formatter': 'simple',
            'filename': expandvars(expanduser(path)),
            'encoding': 'utf8',
            'mode': 'a' if append else 'w',
        }

    if stdout:
        handlers['stdout'] = {
            'class': 'logging.StreamHandler',
            'formatter': 'simple',
            'stream': sys.stdout  # stderr will be used if not specified
        }

    config = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'simple': {
                'format':
                "[%(asctime)s] [%(name)-15.15s] [%(levelname)-7.7s] %(message)s",
                'datefmt': "%Y-%m-%d %H:%M:%S"
            }
        },
        'handlers': handlers,
        'root': {
            'level': log_level,
            'handlers': list(handlers.keys())
        }
    }

    logging.config.dictConfig(config)
    _log_exceptions_in_root_logger()
示例#15
0
def configure_logging(
    logger_level_config: Dict[str, str] = None,
    colorize: bool = True,
    log_json: bool = False,
    log_file: str = None,
    disable_debug_logfile: bool = False,
    debug_log_file_name: str = None,
    cache_logger_on_first_use: bool = True,
    _first_party_packages: FrozenSet[str] = _FIRST_PARTY_PACKAGES,
    _debug_log_file_additional_level_filters: Dict[str, str] = None,
):
    structlog.reset_defaults()

    logger_level_config = logger_level_config or dict()
    logger_level_config.setdefault('filelock', 'ERROR')
    logger_level_config.setdefault('', DEFAULT_LOG_LEVEL)

    processors = [
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        add_greenlet_name,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]

    if log_json:
        formatter = 'json'
    elif colorize and not log_file:
        formatter = 'colorized'
    else:
        formatter = 'plain'

    redact = redactor({
        re.compile(r'\b(access_?token=)([a-z0-9_-]+)', re.I):
        r'\1<redacted>',
    })
    _wrap_tracebackexception_format(redact)

    handlers: Dict[str, Any] = dict()
    if log_file:
        handlers['file'] = {
            'class': 'logging.handlers.WatchedFileHandler',
            'filename': log_file,
            'level': 'DEBUG',
            'formatter': formatter,
            'filters': ['user_filter'],
        }
    else:
        handlers['default'] = {
            'class': 'logging.StreamHandler',
            'level': 'DEBUG',
            'formatter': formatter,
            'filters': ['user_filter'],
        }

    if not disable_debug_logfile:
        if debug_log_file_name is None:
            time = datetime.datetime.utcnow().isoformat()
            debug_log_file_name = f'raiden-debug_{time}.log'
        handlers['debug-info'] = {
            'class': 'logging.handlers.RotatingFileHandler',
            'filename': debug_log_file_name,
            'level': 'DEBUG',
            'formatter': 'debug',
            'maxBytes': MAX_LOG_FILE_SIZE,
            'backupCount': LOG_BACKUP_COUNT,
            'filters': ['raiden_debug_file_filter'],
        }

    logging.config.dictConfig(
        {
            'version': 1,
            'disable_existing_loggers': False,
            'filters': {
                'user_filter': {
                    '()': RaidenFilter,
                    'log_level_config': logger_level_config,
                },
                'raiden_debug_file_filter': {
                    '()': RaidenFilter,
                    'log_level_config': {
                        '': DEFAULT_LOG_LEVEL,
                        'raiden': 'DEBUG',
                        **(_debug_log_file_additional_level_filters or {}),
                    },
                },
            },
            'formatters': {
                'plain': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=False),
                           redact),
                    'foreign_pre_chain':
                    processors,
                },
                'json': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.processors.JSONRenderer(), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'colorized': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=True), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'debug': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.processors.JSONRenderer(), redact),
                    'foreign_pre_chain':
                    processors,
                },
            },
            'handlers': handlers,
            'loggers': {
                '': {
                    'handlers': handlers.keys(),
                    'propagate': True,
                },
            },
        }, )
    structlog.configure(
        processors=processors + [
            structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
        ],
        wrapper_class=structlog.stdlib.BoundLogger,
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=cache_logger_on_first_use,
    )

    # set logging level of the root logger to DEBUG, to be able to intercept
    # all messages, which are then be filtered by the `RaidenFilter`
    structlog.get_logger('').setLevel(
        logger_level_config.get('', DEFAULT_LOG_LEVEL))
    for package in _first_party_packages:
        structlog.get_logger(package).setLevel('DEBUG')

    # rollover RotatingFileHandler on startup, to split logs also per-session
    root = logging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            handler.flush()
            if os.stat(handler.baseFilename).st_size > 0:
                handler.doRollover()

    # fix logging of py-evm (it uses a custom Trace logger from logging library)
    # if py-evm is not used this will throw, hence the try-catch block
    # for some reason it didn't work to put this into conftest.py
    try:
        from eth.tools.logging import setup_trace_logging
        setup_trace_logging()
    except ImportError:
        pass
示例#16
0
    def configure(self):
        """Do the configuration."""

        config = self.config
        version = self.convert(config.get("version", None))
        if version != 1:
            raise ValueError("Unsupported version: {}".format(config['version']))
        incremental = self.convert(config.pop('incremental', False))
        EMPTY_DICT = {}
        logging._acquireLock()
        try:
            if incremental:
                handlers = self.convert(config.get('handlers', EMPTY_DICT))
                for name in handlers.keys():
                    if name not in logging._handlers:
                        raise ValueError('No handler found with name {}'.format(name))
                    else:
                        try:
                            handler = logging._handlers[name]
                            hconfig = self.convert(handlers[name])
                            level = self.convert(hconfig.get('level', None))
                            if level:
                                handler.setLevel(logging._checkLevel(level))
                        except Exception as e:
                            raise ValueError('Unable to configure handler '
                                                             '{}'.format(name)) from e
                loggers = self.convert(config.get('loggers', EMPTY_DICT))
                for name in loggers.keys():
                    try:
                        self.configure_logger(name, self.convert(loggers[name]), True)
                    except Exception as e:
                        raise ValueError('Unable to configure logger {}'.format(name)) from e
                root = self.convert(config.get('root', None))
                if root:
                    try:
                        self.configure_root(root, True)
                    except Exception as e:
                        raise ValueError('Unable to configure root logger') from e
            else:
                disable_existing = config.pop('disable_existing_loggers', True)

                logging._handlers.clear()
                logging._handlerList[:] = []

                # Do formatters first - they don't refer to anything else
                formatters = self.convert(config.get('formatters', EMPTY_DICT))
                for name in formatters.keys():
                    try:
                        fmtConfig = self.convert(formatters.get(name))
                        formatters[name] = self.configure_formatter(fmtConfig)
                    except Exception as e:
                        raise ValueError('Unable to configure formatter {}'.format(name)) from e
                # Next, do filters - they don't refer to anything else, either
                filters = self.convert(config.get('filters', EMPTY_DICT))
                for name in filters.keys():
                    try:
                        filtConfig = self.convert(filters.get(name))
                        filters[name] = self.configure_filter(filtConfig)
                    except Exception as e:
                        raise ValueError('Unable to configure filter {}'.format(name)) from e

                # Next, do handlers - they refer to formatters and filters
                # As handlers can refer to other handlers, sort the keys
                # to allow a deterministic order of configuration
                handlers = self.convert(config.get('handlers', EMPTY_DICT))
                deferred = []
                for name in sorted(handlers.keys()):
                    try:
                        handlerConfig = self.convert(handlers.get(name))
                        handler = self.configure_handler(handlerConfig)
                        handler.name = name
                        handlers[name] = handler
                    except UnresolvableError as exc:
                        raise exc
                    except Exception as e:
                        if 'target not configured yet' in str(e.__cause__):
                            deferred.append(name)
                        else:
                            raise ValueError(
                            'Unable to config handler {}'.format(name)
                            ) from e

                # Now do any that were deferred
                for name in deferred:
                    try:
                        handlerConfig = self.convert(handlers.get(name))
                        handler = self.configure_handler(handlerConfig)
                        handler.name = name
                        handlers[name] = handler
                    except UnresolvableError as exc:
                        raise exc
                    except Exception as e:
                        raise ValueError(
                            'Unable to configure handler {}'.format(name)
                        ) from e

                # Next, do loggers - they refer to handlers and filters

                #we don't want to lose the existing loggers,
                #since other threads may have pointers to them.
                #existing is set to contain all existing loggers,
                #and as we go through the new configuration we
                #remove any which are configured. At the end,
                #what's left in existing is the set of loggers
                #which were in the previous configuration but
                #which are not in the new configuration.
                root = logging.root
                existing = list(root.manager.loggerDict.keys())
                #The list needs to be sorted so that we can
                #avoid disabling child loggers of explicitly
                #named loggers. With a sorted list it is easier
                #to find the child loggers.
                existing.sort()
                #We'll keep the list of existing loggers
                #which are children of named loggers here...
                child_loggers = []
                #now set up the new ones...
                loggers = self.convert(config.get('loggers', EMPTY_DICT))
                for name in loggers.keys():
                    if name in existing:
                        i = existing.index(name) + 1 # look after name
                        prefixed = name + "."
                        pflen = len(prefixed)
                        num_existing = len(existing)
                        while i < num_existing:
                            if existing[i][:pflen] == prefixed:
                                child_loggers.append(existing[i])
                            i += 1
                        existing.remove(name)
                    try:
                        loggerConfig = loggers.get(name)
                        self.configure_logger(name, loggerConfig)
                    except Exception as e:
                        raise ValueError(
                            'Unable to configure logger {}'.format(name)
                        )from e

                #Disable any old loggers. There's no point deleting
                #them as other threads may continue to hold references
                #and by disabling them, you stop them doing any logging.
                #However, don't disable children of named loggers, as that's
                #probably not what was intended by the user.
                #for log in existing:
                #        logger = root.manager.loggerDict[log]
                #        if log in child_loggers:
                #                logger.level = logging.NOTSET
                #                logger.handlers = []
                #                logger.propagate = True
                #        elif disable_existing:
                #                logger.disabled = True
                _handle_existing_loggers(existing, child_loggers,
                                                                 disable_existing)

                # And finally, do the root logger
                root = self.convert(config.get('root', None))
                if root:
                    try:
                        self.configure_root(root)
                    except Exception as e:
                        raise ValueError('Unable to configure root '
                                                         'logger') from e
        finally:
            logging._releaseLock()
示例#17
0
def setup_logging(default_level: str, log_to_file: Optional[Path]):
    import logging.config
    import logging.handlers

    handlers = {
        'default': {
            'level': default_level,
            'formatter': 'default',
            'class': 'logging.StreamHandler',
            'stream': 'ext://sys.stdout',  # Default is stderr
        },
    }
    if log_to_file is not None:
        handlers['local_app_data'] = {
            'level': 'DEBUG',
            'formatter': 'default',
            'class': 'logging.handlers.TimedRotatingFileHandler',
            'filename': log_to_file,
            'encoding': 'utf-8',
            'backupCount': 10,
        }

    logging.config.dictConfig({
        'version': 1,
        'formatters': {
            'default': {
                'format':
                '[%(asctime)s] [%(levelname)s] [%(name)s] %(funcName)s: %(message)s',
            }
        },
        'handlers': handlers,
        'loggers': {
            'randovania.network_client.network_client': {
                'level': 'DEBUG',
            },
            'randovania.game_connection.connection_backend': {
                'level': 'DEBUG',
            },
            'randovania.gui.multiworld_client': {
                'level': 'DEBUG',
            },
            'NintendontBackend': {
                'level': 'DEBUG',
            },
            'DolphinBackend': {
                'level': 'DEBUG',
            },
            'randovania.gui.qt': {
                'level': 'INFO',
            },
            'qasync': {
                'level': 'INFO',
            },
            # 'socketio.client': {
            #     'level': 'DEBUG',
            # }
        },
        'root': {
            'level': default_level,
            'handlers': list(handlers.keys()),
        },
    })
    logging.info("Logging initialized.")
示例#18
0
dirname = 'log'
log_name = 'process_classify_houdu'

if not os.path.exists(dirname):
    os.mkdir(dirname)

handlers = {
    # logging.NOTSET: os.path.join(dir, bname + '_notset.log'),
    # logging.DEBUG: os.path.join(dir, bname + '_debug.log'),
    logging.INFO: os.path.join(dirname, log_name + '_info.log'),
    logging.WARNING: os.path.join(dirname, log_name + '_warning.log'),
    logging.ERROR: os.path.join(dirname, log_name + '_error.log'),
    # logging.CRITICAL: os.path.join(dir, bname + '_critical.log'),
}

for level in handlers.keys():
    path = os.path.abspath(handlers[level])
    handlers[level] = logging.handlers.TimedRotatingFileHandler(path, when='W0', interval=1, backupCount=12,
                                                                encoding='utf-8', delay=False)
    # 设置追加格式:
    handlers[level].suffix = "%Y-%m-%d-%H-%M-%S.log"
    handlers[level].extMatch = r"^\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}.log$"
    handlers[level].extMatch = re.compile(handlers[level].extMatch)


class Log(object):
    def __init__(self, level=logging.NOTSET):
        self.__loggers = {}
        for log_level in handlers.keys():
            logger = logging.getLogger(str(log_level))
            # 是否需要打印到屏幕?注释下面3行可以取消