示例#1
0
def configure_logging(default_level, log_path=None, verbose_count=0, quiet_count=0, silent=False):
    logging.setLogRecordFactory(ContextLogRecord)

    formatter = logging.Formatter(
        fmt="{asctime} [{levelname}] [{log_context}] {name}:{lineno} {message}",
        style="{",
    )

    formatter_callback = logging.Formatter(
        fmt="[{levelname}] {name}:{lineno} {message}",
        style="{",
    )

    root_logger = logging.getLogger()

    if log_path is not None:
        file_log = logging.FileHandler(log_path)
        file_log.setFormatter(formatter)
        root_logger.addHandler(file_log)

    if not silent:
        console_log = logging.StreamHandler()
        console_log.setFormatter(formatter)
        root_logger.addHandler(console_log)

    ws_log = websocket_log.WebsocketLoggerHandler()
    ws_log.setFormatter(formatter_callback)
    root_logger.addHandler(ws_log)

    callback_id_log = file_callback_log.FileCallbackHandler()
    callback_id_log.setFormatter(formatter_callback)
    root_logger.addHandler(callback_id_log)

    log_level = default_level + (10 * quiet_count) - (10 * verbose_count)
    root_logger.setLevel(log_level)
示例#2
0
    def _define_logger(self):

        # Use double-checked locking to avoid taking lock unnecessarily.
        if self._logger is not None:
            return self._logger

        with self._logger_lock:
            try:
                self._logger = _logging.getLogger("nemo_logger")
                # By default, silence all loggers except the logger for rank 0
                self.remove_stream_handlers()
                if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
                    old_factory = _logging.getLogRecordFactory()

                    def record_factory(*args, **kwargs):
                        record = old_factory(*args, **kwargs)
                        record.rank = get_envint("RANK", 0)
                        return record

                    _logging.setLogRecordFactory(record_factory)
                    self.add_stream_handlers(formatter=DebugNeMoFormatter)
                elif get_envint("RANK", 0) == 0:
                    self.add_stream_handlers()

            finally:
                level = Logger.INFO
                if get_envbool(NEMO_ENV_VARNAME_TESTING, False):
                    level = Logger.DEBUG
                self.set_verbosity(verbosity_level=level)

        self._logger.propagate = False
示例#3
0
文件: log.py 项目: esozh/eso_zh_ui
def init_log():
    """初始化log"""
    global G_LOG
    if G_LOG is not None:
        return

    logging.setLogRecordFactory(ColoredLogRecord)
    logger = logging.getLogger(sys.argv[0])
    logger.setLevel(logging.DEBUG)

    formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    if hasattr(ctypes, 'windll') and hasattr(ctypes.windll, 'kernel32'):
        ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
    color_formatter = logging.Formatter('%(asctime)s - %(name)s - %(colorlevelname)s - %(message)s')

    # to file
    log_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../logs')
    log_path = str(os.path.join(log_dir, 'esozh.log'))
    fh = logging.handlers.RotatingFileHandler(log_path, maxBytes=10 * 1024 * 1024, backupCount=10)
    fh.setLevel(logging.DEBUG)
    fh.setFormatter(formatter)
    logger.addHandler(fh)

    # to screen
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    ch.setFormatter(color_formatter)
    logger.addHandler(ch)

    G_LOG = logger
示例#4
0
def print_logger(lgs=0):
    l = logging.getLogger()
    prefix = [""]
    l.setPrefix = lambda x: prefix.remove(prefix[0]) or prefix.append(x)

    class MyRecord(logging.LogRecord):
        def __init__(self,
                     name,
                     level,
                     pathname,
                     lineno,
                     msg,
                     args,
                     exc_info,
                     func=None,
                     sinfo=None,
                     **kwargs):
            nmsg = f'{prefix[0]} {" ".join(map(str, args))}'
            logging.LogRecord.__init__(self, name, 90, pathname, lineno, nmsg,
                                       None, exc_info, func, sinfo, **kwargs)

    def setPartials(obj, name=0):
        for k in "debug,info,warning,error,fatal,critical".split(","):
            setattr(obj, k, partial(getattr(l, k), 0))
        name = name or getattr(obj, "logname")
        obj.logger = l
        obj.logname = name
        return l

    l.ok = lambda s: 1
    l.thisClassLogger = l.classFilter = setPartials
    logging.setLogRecordFactory(MyRecord)
    l.setLevel("DEBUG")
    return l
示例#5
0
def setup_module_logging(name, levels=(logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)):
    """
    Do all the necessary setup for a module.
    Dereferences the record factory, and returns four logging functions.

    Parameters
    ----------
    name : str

    Returns
    -------
    error : func
    warning : func
    info : func
    debug : func

    """
    original_record_factory = logging.getLogRecordFactory()

    def dereferenced_log_record_factory(*args, **kwargs):
        record = original_record_factory(*args, **kwargs)
        record.lineno = get_lineno_from_deeper_in_stack(1, record.lineno, 'log_rstrip')
        return record

    logging.setLogRecordFactory(dereferenced_log_record_factory)

    module_logger = log_rstrip(logging.getLogger(name))
    return tuple(module_logger(level) for level in levels)
示例#6
0
def init_logger(sd_loglevel=logging.WARN, stream_loglevel=logging.CRITICAL):
    logging.setLogRecordFactory(LogRecordWithHexThereadID)
    logger = logging.getLogger('deepfx')
    logger.setLevel(sd_loglevel)
    formatter = logging.Formatter('[%(hex_threadid)s] %(message)s')

    if sd_loglevel:
        import google
        from google.cloud.logging import Client
        from google.cloud.logging.handlers import CloudLoggingHandler
        client = google.cloud.logging.Client.from_service_account_json(
            os.environ.get('GOOGLE_SERVICE_ACCOUNT_JSON_PATH'))
        handler = CloudLoggingHandler(client, name='deepfx')
        handler.setLevel(sd_loglevel)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        handler = None

    if stream_loglevel:
        handler = StreamHandler()
        handler.setLevel(stream_loglevel)
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        handler = None

    return logger
示例#7
0
def setup_logging(config):
    level = logging.DEBUG if config['DEBUG'] else logging.INFO
    # Our logging routines signal the start and end of the routes,
    # so the Werkzeug defaults aren't required. Keep warnings and above.
    logging.getLogger('werkzeug').setLevel(logging.WARN)
    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
        logging.WARN)

    global app_name
    app_name = config['APPLICATION_NAME']

    root_logger = logging.getLogger()
    logging.setLogRecordFactory(record_factory)
    formatter = logging.Formatter(
        '%(asctime)s.%(msecs)03d %(levelname)s [%(appname)s]'
        ' %(message)s', "%Y-%m-%d %H:%M:%S")

    out_handler = logging.StreamHandler(sys.stdout)
    out_handler.addFilter(OutputFilter(False))
    out_handler.setFormatter(formatter)
    root_logger.addHandler(out_handler)

    err_handler = logging.StreamHandler(sys.stderr)
    err_handler.addFilter(OutputFilter(True))
    err_handler.setFormatter(formatter)
    root_logger.addHandler(err_handler)
    root_logger.setLevel(level)
示例#8
0
def setupLogging(fpath):
    global logCurrentFile
    logCurrentFile = ""

    formatter = logging.Formatter(
        '%(asctime)s\t[%(levelname)s]:\t%(logCurrentFile)s%(message)s')

    ch = logging.StreamHandler(sys.stdout)
    ch.setFormatter(formatter)
    ch.setLevel(consoleLogLevel)

    logFile = os.path.join(
        os.path.dirname(os.path.abspath(fpath)),
        "csv_avg_" + datetime.now().strftime("%Y-%m-%d_%H%M%S") + ".log")
    fh = logging.FileHandler(logFile, mode='w')
    fh.setFormatter(formatter)
    fh.setLevel(logging.DEBUG)

    logging.basicConfig(level=logging.DEBUG,
                        format=formatter,
                        handlers=[fh, ch])
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        global logCurrentFile
        record = old_factory(*args, **kwargs)
        record.logCurrentFile = logCurrentFile
        return record

    logging.setLogRecordFactory(record_factory)
示例#9
0
def prepareLogger(level: str, mergeArgs: bool = False) -> None:
    """Call this before first usage of logging or getLogger().

    :param level Log level as str as of all, info, debug, warning, error or critical
    :param mergeArgs: If True we're merging args into resulting message resulting in
    possible duplicated output or get the 'raw' message output if False.
    """
    if loggers:
        return

    if level == "all":
        level = logging.NOTSET
    elif level == "info":
        level = logging.INFO
    elif level == "debug":
        level = logging.DEBUG
    elif level == "warning":
        level = logging.WARNING
    elif level == "error":
        level = logging.ERROR
    elif level == "critical":
        level = logging.CRITICAL
    else:
        level = logging.DEBUG

    logging.setLogRecordFactory(partial(FlareLogRecord, mergeArgs=mergeArgs))
    logger = logging.getLogger()
    logger.setLevel(level)
    ch = JSConsoleHandler()
    ch.setLevel(level)
    formatter = logging.Formatter(
        "%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s")
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    loggers.append(logger)
示例#10
0
def main(*args, task=None):
    # Load up preferences
    opts = parse_args(args)

    # Config file is always required. The logging file may fall back
    # to the default yaml file below if unspecified.
    main_config = yaml.safe_load(open(opts.config).read())
    if opts.logging is None:
        logging_yaml = logging_yaml_default
    else:
        logging_yaml = open(opts.logging).read()
    logging_config = yaml.safe_load(logging_yaml)

    # Apply any command-line overrides
    if opts.port is not None:
        main_config["start"]["port"] = opts.port
    if opts.addr is not None:
        main_config["start"]["host"] = opts.addr
    if opts.debug:
        logging_config["handlers"]["console"]["level"] = "DEBUG"

    # Go
    logging.config.dictConfig(logging_config)
    logging.setLogRecordFactory(logger.FormattedLogRecord)
    start(main_config, task)
    log.info("Closing down gracefully")
def setup_logging(config):
    level = logging.DEBUG if config['DEBUG'] else logging.INFO
    # Our logging routines signal the start and end of the routes,
    # so the Werkzeug defaults aren't required. Keep warnings and above.
    logging.getLogger('werkzeug').setLevel(logging.WARN)

    global app_name
    app_name = config['APPLICATION_NAME']

    root_logger = logging.getLogger()
    logging.setLogRecordFactory(record_factory)
    formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s [%(appname)s]'
                                  ' (PID %(process)d) Message: %(message)s',
                                  "%Y-%m-%d %H:%M:%S")

    out_handler = logging.StreamHandler(sys.stdout)
    out_handler.addFilter(OutputFilter(False))
    out_handler.setFormatter(formatter)
    root_logger.addHandler(out_handler)

    err_handler = logging.StreamHandler(sys.stderr)
    err_handler.addFilter(OutputFilter(True))
    err_handler.setFormatter(formatter)
    root_logger.addHandler(err_handler)
    root_logger.setLevel(level)
示例#12
0
def _create_logger(name: str) -> logging.Logger:
    """
    Creates a logger with a `StreamHandler` that has level and formatting
    set from `prefect.config`.

    Args:
        - name (str): Name to use for logger.

    Returns:
        - logging.Logger: a configured logging object
    """
    logging.setLogRecordFactory(_log_record_context_injector)

    logger = logging.getLogger(name)
    handler = logging.StreamHandler(sys.stdout)
    formatter = logging.Formatter(context.config.logging.format)
    formatter.converter = time.gmtime  # type: ignore
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(context.config.logging.level)

    cloud_handler = CloudHandler()
    cloud_handler.setLevel("DEBUG")
    logger.addHandler(cloud_handler)
    return logger
示例#13
0
def set_log_record_factory():
    """
    Custom attribute additions to logging are addressed in this function
    Override the logging format with a new log record factory
    Return:
        None
    """
    logging.setLogRecordFactory(record_factory)
    def config(self) -> NoReturn:
        logging.basicConfig(
            level=logging.DEBUG,
            format=text_log_format,
        )
        logging.setLogRecordFactory(self.__record_factory)

        logging.debug(msg="LogFormatter loaded")
示例#15
0
    def setup_logger(self):
        self.log = logging.getLogger(self.app)
        self.log.setLevel(logging.DEBUG)

        # create file handler which logs even debug messages
        log_file = os.path.join('/tmp', self.app + '.log')
        in_dev_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.development.log'.format(self.app))
        )
        in_dev_debug_file_handler.setLevel(logging.DEBUG)

        readable_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.debug.log'.format(self.app))
        )
        readable_debug_file_handler.setLevel(logging.DEBUG)

        # create console handler with a higher log level
        command_line_logging = logging.StreamHandler()

        if self.arguments.verbose:
            command_line_logging.setLevel(logging.DEBUG)

            # add relpathname log format attribute so as to only show the file
            #  in which a log was initiated, relative to the project path
            #  e.g. pathname = /full/path/to/project/package/module.py
            #       relpathname = package/module.py
            default_record_factory = logging.getLogRecordFactory()
            project_path = os.path.dirname(os.path.abspath(sys.argv[0])) + \
                           os.sep
            def relpathname_record_factory(*args, **kwargs):
                record = default_record_factory(*args, **kwargs)
                record.relpathname = record.pathname.replace(project_path, '')
                return record
            logging.setLogRecordFactory(relpathname_record_factory)

            # add colors to the logs!
            colored_files_funcs_linenos_formatter = colorlog.ColoredFormatter(
                fmt=(
                    "%(asctime)s - %(log_color)s%(levelname)-8s%(reset)s"
                    " [ %(relpathname)s::%(funcName)s():%(lineno)s ] "
                    "%(message)s"
                ),
                datefmt='%Y-%m-%d %H:%M:%S',
                reset=True,
            )
            in_dev_debug_file_handler.setFormatter(
                colored_files_funcs_linenos_formatter)
            command_line_logging.setFormatter(
                colored_files_funcs_linenos_formatter)

        else:
            command_line_logging.setLevel(logging.INFO)

        # add the handlers to the logger
        self.log.addHandler(in_dev_debug_file_handler)
        self.log.addHandler(command_line_logging)
        self.log.addHandler(readable_debug_file_handler)
示例#16
0
def add_tag_factory(tag, callback):
    old_factory = logging.getLogRecordFactory()

    def new_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.__dict__[tag] = callback()
        return record

    logging.setLogRecordFactory(new_factory)
示例#17
0
文件: log.py 项目: Kozea/Radicale
def setup():
    """Set global logging up."""
    global register_stream
    handler = ThreadStreamsHandler(sys.stderr, get_default_handler())
    logging.basicConfig(format=LOGGER_FORMAT, handlers=[handler])
    register_stream = handler.register_stream
    log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
    logging.setLogRecordFactory(log_record_factory)
    set_level(logging.DEBUG)
示例#18
0
def patch_logging():
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.threadName = get_task_name_with_thread()
        return record

    logging.setLogRecordFactory(record_factory)
示例#19
0
    def test_date_format(self):
        """Dates are outout in the correct default format"""
        logging.setLogRecordFactory(self.fixed_date_record_factory)

        logger, stream = self.create_logger(['asctime'])
        logger.info("message")
        self.assertOutputIs(stream, [[
            ('asctime', '"25-02-2019 14:15:00.430000"'),
        ]])
示例#20
0
def setup():
    """Set global logging up."""
    global register_stream, unregister_stream
    handler = ThreadStreamsHandler(sys.stderr, get_default_handler())
    logging.basicConfig(format=LOGGER_FORMAT, handlers=[handler])
    register_stream = handler.register_stream
    log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
    logging.setLogRecordFactory(log_record_factory)
    set_level(logging.DEBUG)
示例#21
0
def patch_log_record_factory():
    default_record_factory = logging.getLogRecordFactory()

    def patch_time_factory(*args, **kwargs):
        record = default_record_factory(*args, **kwargs)
        record.created = TimeWrapper.time
        return record

    logging.setLogRecordFactory(patch_time_factory())
示例#22
0
    def setup_logger(self):
        self.log = logging.getLogger(self.app)
        self.log.setLevel(logging.DEBUG)

        # create file handler which logs even debug messages
        log_file = os.path.join('/tmp', self.app + '.log')
        in_dev_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.development.log'.format(self.app))
        )
        in_dev_debug_file_handler.setLevel(logging.DEBUG)

        readable_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.debug.log'.format(self.app))
        )
        readable_debug_file_handler.setLevel(logging.DEBUG)

        # create console handler with a higher log level
        command_line_logging = logging.StreamHandler()

        if self.cli_arguments.verbose:
            command_line_logging.setLevel(logging.DEBUG)

            # add relpathname log format attribute so as to only show the file
            #  in which a log was initiated, relative to the project path
            #  e.g. pathname = /full/path/to/project/package/module.py
            #       relpathname = package/module.py
            default_record_factory = logging.getLogRecordFactory()
            project_path = os.path.dirname(os.path.abspath(sys.argv[0])) + \
                           os.sep
            def relpathname_record_factory(*args, **kwargs):
                record = default_record_factory(*args, **kwargs)
                record.relpathname = record.pathname.replace(project_path, '')
                return record
            logging.setLogRecordFactory(relpathname_record_factory)

            # add colors to the logs!
            colored_files_funcs_linenos_formatter = colorlog.ColoredFormatter(
                fmt=(
                    "%(asctime)s - %(log_color)s%(levelname)-8s%(reset)s"
                    " [ %(relpathname)s::%(funcName)s():%(lineno)s ] "
                    "%(message)s"
                ),
                datefmt='%Y-%m-%d %H:%M:%S',
                reset=True,
            )
            in_dev_debug_file_handler.setFormatter(
                colored_files_funcs_linenos_formatter)
            command_line_logging.setFormatter(
                colored_files_funcs_linenos_formatter)

        else:
            command_line_logging.setLevel(logging.INFO)

        # add the handlers to the logger
        self.log.addHandler(in_dev_debug_file_handler)
        self.log.addHandler(command_line_logging)
        self.log.addHandler(readable_debug_file_handler)
示例#23
0
def add_hostname_to_log_records():
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.hostname = gethostname()
        return record

    logging.setLogRecordFactory(record_factory)
示例#24
0
    def test_custom_date_format(self):
        """Date formats can customized"""
        logging.setLogRecordFactory(self.fixed_date_record_factory)

        logger, stream = self.create_logger(['asctime'], '%d')
        logger.info("message")
        self.assertOutputIs(stream, [[
            ('asctime', '"25"'),
        ]])
示例#25
0
def register():
    from mixer import bl_panels
    from mixer import bl_operators
    from mixer import bl_properties, bl_preferences
    from mixer import blender_data
    from mixer.blender_data import debug_addon
    from mixer.log_utils import Formatter, get_log_file
    from mixer import icons
    from mixer import ui
    from mixer.utils import utils_ui_operators
    from mixer import vrtist

    print("\n ------ UAS: Loading Mixer Add-on ------- ")

    if len(logger.handlers) == 0:
        # Add the pid to the log. Just enough for the tests, that merge the logs and need to distinguish
        # two Blender on the same machine. Pids might collide during regular networked operation
        old_factory = logging.getLogRecordFactory()
        pid = str(os.getpid())

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            record.custom_attribute = pid
            return record

        logging.setLogRecordFactory(record_factory)

        logger.setLevel(logging.WARNING)
        formatter = Formatter(
            "{asctime} {custom_attribute:<6} {levelname[0]} {name:<36}  - {message:<80}",
            style="{")
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logger.addHandler(handler)

        handler = logging.FileHandler(get_log_file())
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    if not faulthandler.is_enabled():
        faulthandler.enable()
        global _disable_fault_handler
        _disable_fault_handler = True

    debug_addon.register()

    icons.register()
    bl_preferences.register()
    bl_properties.register()
    bl_panels.register()
    bl_operators.register()
    utils_ui_operators.register()
    ui.register()
    blender_data.register()
    vrtist.register()

    atexit.register(cleanup)
示例#26
0
 def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
     """Close the listener and restore original logging config"""
     for handler in self.logger.handlers:
         handler.removeFilter(self.context_filter)
     self.logger.removeFilter(self.context_filter)
     for handler in self.handlers:
         self.logger.removeHandler(handler)
     self.logger.setLevel(self.old_root_log_level)
     logging.setLogRecordFactory(self.old_factory)
示例#27
0
    def init_log_uniq_id(self, uniq_id):
        old_factory = logging.getLogRecordFactory()

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            record.uniq_id = uniq_id
            return record

        logging.setLogRecordFactory(record_factory)
示例#28
0
    def init(self):
        if self.initialized:
            return self
        self.initialized = True

        old_factory = getLogRecordFactory()

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            if record.pathname.startswith(topdir):
                record.pathname = record.pathname[len(topdir) + 1:]
            if len(record.pathname) > 32:
                record.pathname = record.pathname[-32:]
            record.codelocation = "%s:%d" % (record.pathname, record.lineno)
            return record

        setLogRecordFactory(record_factory)

        if app.debug:
            formatter = Formatter(
                "[%(asctime)s] %(codelocation)-32s %(levelname)s - %(message)s"
            )
            for handler in app.logger.handlers:
                handler.setFormatter(formatter)
            app.logger.setLevel(DEBUG)
            app.logger.info('DEBUG mode')
        else:
            app.logger.setLevel(INFO)
            app.logger.info('PRODUCTION mode')

        if not app.debug:
            import logging
            from config import logdir
            from logging.handlers import RotatingFileHandler
            file_handler = RotatingFileHandler(
                os.path.join(logdir, 'server.log'), 'a', 1 * 1024 * 1024, 10)
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'
                ))
            app.logger.addHandler(file_handler)
            app.logger.setLevel(logging.INFO)
            app.logger.info('startup')

        from app import sessions

        sessions.register(self)

        from app import views
        from app import account, runner, workspace, code_template

        from .api.v1_0 import api as api_blueprint
        app.register_blueprint(api_blueprint, url_prefix='/api/v1.0')

        return self
示例#29
0
    def addQueueLogHandler(tracer: logging.Logger, ctx) -> None:
        # Provide access to custom (payload-specific) fields
        oldFactory = logging.getLogRecordFactory()

        def recordFactory(name,
                          level,
                          pathname,
                          lineno,
                          msg,
                          args,
                          exc_info,
                          func=None,
                          sinfo=None,
                          **kwargs):
            record = oldFactory(name,
                                level,
                                pathname,
                                lineno,
                                msg,
                                args,
                                exc_info,
                                func=func,
                                sinfo=sinfo,
                                kwargs=kwargs)
            record.sapmonid = ctx.sapmonId
            record.payloadversion = PAYLOAD_VERSION
            return record

        tracer.info("adding storage queue log handler")
        try:
            queueName = STORAGE_QUEUE_NAMING_CONVENTION % ctx.sapmonId
            storageAccount = AzureStorageAccount(
                tracer, ctx.sapmonId, ctx.msiClientId,
                ctx.vmInstance["subscriptionId"],
                ctx.vmInstance["resourceGroupName"])
            storageKey = tracing.getAccessKeys(tracer, ctx)
            queueStorageLogHandler = QueueStorageHandler(
                account_name=storageAccount.accountName,
                account_key=storageKey,
                protocol="https",
                queue=queueName)
            queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
            jsonFormatter = JsonFormatter(
                tracing.config["formatters"]["json"]["fieldMapping"])
            queueStorageLogHandler.setFormatter(jsonFormatter)
            logging.setLogRecordFactory(recordFactory)

        except Exception as e:
            tracer.error(
                "could not add handler for the storage queue logging (%s) " %
                e)
            return

        queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
        tracer.addHandler(queueStorageLogHandler)
        return
示例#30
0
def setMyLogRecord(myFuncName='nameNotPassed', lineNO=0):
    if fDEBUG:
        print ('setting funcName to:', myFuncName)

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.LnFuncName = myFuncName
        # record.LnLineNO   = lineNO   # non posso altrimenti rimane sempre lo stesso
        return record
    logging.setLogRecordFactory(record_factory)
示例#31
0
文件: __init__.py 项目: mdevaev/powny
def init(name, description, args=None, raw_config=None):
    global _config
    assert _config is None, "init() has already been called"

    args_parser = argparse.ArgumentParser(prog=name, description=description)
    args_parser.add_argument("-v", "--version", action="version", version=tools.get_version())
    args_parser.add_argument("-c", "--config", dest="config_file_path", default=None, metavar="<file>")
    args_parser.add_argument("-l", "--level", dest="log_level", default=None)
    args_parser.add_argument("-m", "--dump-config", dest="dump_config", action="store_true")
    args_parser.add_argument("-o", "--set-options", dest="set_options", default=[], nargs="+")
    options = args_parser.parse_args(args)

    # Load configs
    raw_config = (raw_config or {})
    if options.config_file_path is not None:
        raw_config = load_yaml_file(options.config_file_path)
    _merge_dicts(raw_config, build_raw_from_options(options.set_options))
    scheme = _get_config_scheme()
    config = make_config(raw_config, scheme)

    # Configure logging
    patch_logging()
    patch_threading()
    logging.setLogRecordFactory(_ClusterLogRecord)
    logging.captureWarnings(True)
    logging_config = raw_config.get("logging")
    if logging_config is None:
        logging_config = yaml.load(pkgutil.get_data(__name__, "configs/logging.yaml"))
    if options.log_level is not None:
        logging_config.setdefault("root", {})
        logging_config["root"]["level"] = _valid_log_level(options.log_level)
    logging.config.dictConfig(logging_config)

    # Update scheme for backend opts
    backend_scheme = backends.get_backend_class(config.core.backend).get_options()
    _merge_dicts(scheme, {"backend": backend_scheme})
    config = make_config(raw_config, scheme)

    # Provide global configuration
    _config = make_config(raw_config, scheme)

    # Print config dump and exit
    if options.dump_config:
        dump = make_config_dump(config)
        if sys.stdout.isatty():
            dump = pygments.highlight(
                dump,
                pygments.lexers.data.YamlLexer(),
                pygments.formatters.TerminalFormatter(bg="dark"),
            )
        print(dump)
        sys.exit(0)

    return _config
示例#32
0
 def start(self):
     '''Called when the engine starts'''
     # create a mqtt client
     self.client = mqtt.Client()
     self.client.on_connect = self.on_connect
     self.client.connect("127.0.0.1", 1883, 60)
     logging.setLogRecordFactory(self.record_factory)
     applyai.log('$Info - Vision System starting ...' + self.logname,
                 self.logname)
     #self.client.loop_forever(timeout=1.0, max_packets=1, retry_first_connection=False)
     self.client.loop_start()
示例#33
0
def init_logging(logger_name, log_filename):
    # logger setup + rotating file handler
    log = logging.getLogger(logger_name)
    log.setLevel(logging.DEBUG)
    log_handler = logging.handlers.RotatingFileHandler(log_filename,
                                                       maxBytes=MAX_LOG_SIZE,
                                                       backupCount=5)
    log.addHandler(log_handler)

    # adapt logging-record-factory for a more condensed log
    module_mapping = {
        "command_runner": "cmd_run",
        "status_board": "board",
        "raw_backup_restore": "rbackup",
        "proxy_tunnel": "ptun",
        "certificates": "certs",
        "partitions": "parts",
        "system_files": "sysfiles",
    }

    level_mapping = {
        "CRITICAL": "[!]",
        "ERROR": "[E]",
        "WARNING": "[W]",
        "INFO": "[i]",
        "DEBUG": "[D]"
    }

    record_factory = logging.getLogRecordFactory()

    def my_record_factory(*va, **kw):
        rec = record_factory(*va, **kw)
        rec.origin = module_mapping.get(rec.module, rec.module)
        rec.symlvl = level_mapping.get(rec.levelname, rec.levelname)

        return rec

    # apply wrapped record factory
    logging.setLogRecordFactory(my_record_factory)

    # apply repeating messages filter
    log.addFilter(RepeatingFilter())

    # logging format
    log_format = logging.Formatter("{asctime} {symlvl} {origin:<9} {message}",
                                   style='{')
    log_handler.setFormatter(log_format)

    # welcome banner (into log)
    log.info("=" * 60)
    log.info("====> starting nextbox-daemon")

    return log
示例#34
0
def logging_record_add_host():
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        if not hasattr(record, 'host'):
            record.host = get_hostname()
        if not hasattr(record, 'ip'):
            record.ip = get_ip_address()
        return record

    logging.setLogRecordFactory(record_factory)
示例#35
0
文件: __init__.py 项目: nikicat/powny
def init(name, description, args=None, raw_config=None):
    global _config
    assert _config is None, "init() has already been called"

    args_parser = argparse.ArgumentParser(prog=name, description=description)
    args_parser.add_argument("-v", "--version", action="version", version=tools.get_version())
    args_parser.add_argument("-c", "--config", dest="config_file_path", default=None, metavar="<file>")
    args_parser.add_argument("-l", "--level", dest="log_level", default=None)
    args_parser.add_argument("-m", "--dump-config", dest="dump_config", action="store_true")
    options = args_parser.parse_args(args)

    # Load configs
    raw_config = (raw_config or {})
    if options.config_file_path is not None:
        raw_config = load_yaml_file(options.config_file_path)
    scheme = _get_config_scheme()
    config = optconf.make_config(raw_config, scheme)

    # Configure logging
    contextlog.patch_logging()
    contextlog.patch_threading()
    logging.setLogRecordFactory(_ClusterLogRecord)
    logging.captureWarnings(True)
    logging_config = raw_config.get("logging")
    if logging_config is None:
        logging_config = yaml.load(pkgutil.get_data(__name__, "configs/logging.yaml"))
    if options.log_level is not None:
        logging_config.setdefault("root", {})
        logging_config["root"]["level"] = _valid_log_level(options.log_level)
    logging.config.dictConfig(logging_config)

    # Update scheme for backend opts
    backend_scheme = backends.get_backend_class(config.core.backend).get_options()
    typetools.merge_dicts(scheme, {"backend": backend_scheme})
    config = optconf.make_config(raw_config, scheme)

    # Update scheme for selected helpers/modules
    for helper_name in config.helpers.configure:
        helper = importlib.import_module(helper_name)
        get_options = getattr(helper, "get_options", None)
        if get_options is None:
            raise RuntimeError("Helper '{}' requires no configuration".format(helper_name))
        typetools.merge_dicts(scheme, {"helpers": get_options()})

    # Provide global configuration for helpers
    _config = optconf.make_config(raw_config, scheme)

    # Print config dump and exit
    if options.dump_config:
        print(make_config_dump(_config, split_by=((), ("helpers",))))
        sys.exit(0)

    return _config
示例#36
0
def setup_log_record_customization():
    """
    Some black magic to add fields to the log records.
    See https://docs.python.org/3/howto/logging-cookbook.html#customizing-logrecord.
    """
    def record_factory(*args, **kwargs):
        log_record = previous_log_record_factory(*args, **kwargs)
        add_correlation_fields(log_record)
        return log_record

    previous_log_record_factory = logging.getLogRecordFactory()
    logging.setLogRecordFactory(record_factory)
示例#37
0
def set_func_metadata(func):
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        if record.funcName == 'wrapper':
            record.funcName = func.__name__
        if record.module == 'decorator':
            record.module = str(func.__module__).split('.')[-1]
        return record

    logging.setLogRecordFactory(record_factory)
示例#38
0
def init(app,
         use_queue=True,
         level=logging.root.level,
         request_logger_level=middleware_logger.level):
    access_token = app.config.get('SYNCHROLOG_ACCESS_TOKEN', None)
    assert bool(
        access_token), 'SYNCHROLOG_ACCESS_TOKEN app config can not be empty'

    handler = _RequestHandler(access_token)
    handler.setLevel(level)

    logger = logging.root
    logger.setLevel(level)
    if use_queue:
        queue_handler = QueueHandler(queue)
        queue_handler.setLevel(level)
        logger.addHandler(QueueHandler(queue))
        listener = QueueListener(queue, handler)
        listener.start()
    else:
        logger.addHandler(handler)

    logging.setLogRecordFactory(_build_make_record_function())
    middleware_logger.setLevel(request_logger_level)

    @app.route('/synchrolog-time')
    def synchrolog_time():
        return jsonify({'time': datetime.now().isoformat()}), 200

    @app.errorhandler(HTTPException)
    def http_exception_handler(exception):
        logger.error(
            msg='HTTP exception during web request',
            exc_info=exception,
        )
        return exception

    @app.before_request
    def before_request():
        environ = request.environ.copy()
        anonymous_id = request.cookies.get(ANONYMOUS_KEY, _generate_uuid())
        environ[ANONYMOUS_KEY] = anonymous_id
        request.environ = environ

    @app.after_request
    def after_response(response):
        anonymous_key = request.environ.get(ANONYMOUS_KEY)
        if anonymous_key is not None:
            response.set_cookie(key=ANONYMOUS_KEY, value=anonymous_key)
        message = f'"{request.method} {request.path}" {response.status_code}'
        middleware_logger.info(message)
        return response
示例#39
0
def setup(level: Union[str, int], structured: bool, config_path: str = None):
    """
    Make stdout and stderr unicode friendly in case of misconfigured \
    environments, initializes the logging, structured logging and \
    enables colored logs if it is appropriate.

    Args:
        level: The global logging level.
        structured: Output JSON logs to stdout.
        config_path: Path to a yaml file that configures the level of output of the loggers. \
                        Root logger level is set through the level argument and will override any \
                        root configuration found in the conf file.

    Returns:
        None

    """
    global logs_are_structured
    logs_are_structured = structured

    if not isinstance(level, int):
        level = logging._nameToLevel[level]

    def ensure_utf8_stream(stream):
        if not isinstance(stream, io.StringIO) and hasattr(stream, "buffer"):
            stream = codecs.getwriter("utf-8")(stream.buffer)
            stream.encoding = "utf-8"
        return stream

    sys.stdout, sys.stderr = (ensure_utf8_stream(s) for s in (sys.stdout, sys.stderr))

    # basicConfig is only called to make sure there is at least one handler for the root logger.
    # All the output level setting is down right afterwards.
    logging.basicConfig()
    logging.setLogRecordFactory(NumpyLogRecord)
    if config_path is not None and os.path.isfile(config_path):
        with open(config_path) as fh:
            config = yaml.safe_load(fh)
        for key, val in config.items():
            logging.getLogger(key).setLevel(logging._nameToLevel.get(val, level))
    root = logging.getLogger()
    root.setLevel(level)

    if not structured:
        handler = root.handlers[0]
        handler.emit = check_trailing_dot(handler.emit)
        if not hasattr(sys.stdin, "closed"):
            handler.setFormatter(AwesomeFormatter())
        elif not sys.stdin.closed and sys.stdout.isatty():
            handler.setFormatter(AwesomeFormatter())
    else:
        root.handlers[0] = StructuredHandler(level)
    def init(self):
        if self.initialized:
            return self
        self.initialized = True

        old_factory = getLogRecordFactory()
        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            if record.pathname.startswith(topdir):
                record.pathname = record.pathname[len(topdir) + 1:]
            if len(record.pathname) > 32:
                record.pathname = record.pathname[-32:]
            record.codelocation = "%s:%d" % (record.pathname, record.lineno)
            return record
        setLogRecordFactory(record_factory)

        if app.debug:
            formatter = Formatter("[%(asctime)s] %(codelocation)-32s %(levelname)s - %(message)s")
            for handler in app.logger.handlers:
                handler.setFormatter(formatter)
            app.logger.setLevel(DEBUG)
            app.logger.info('DEBUG mode')
        else:
            app.logger.setLevel(INFO)
            app.logger.info('PRODUCTION mode')

        if not app.debug:
            import logging
            from config import logdir
            from logging.handlers import RotatingFileHandler
            file_handler = RotatingFileHandler(os.path.join(logdir, 'server.log'), 'a',
                                               1 * 1024 * 1024, 10)
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
            app.logger.addHandler(file_handler)
            app.logger.setLevel(logging.INFO)
            app.logger.info('startup')

        from app import sessions

        sessions.register(self)

        from app import views
        from app import account, runner, workspace, code_template

        from .api.v1_0 import api as api_blueprint
        app.register_blueprint(api_blueprint, url_prefix='/api/v1.0')

        return self
示例#41
0
def demo_6():

    # Note that the factory is somehow bypassed by a LoggerAdapter
    # ::

    from collections.abc import Callable
    class UserLogRecord( Callable ):
        def __init__( self ):
            self.previous = logging.getLogRecordFactory()
        def __call__( self, *args, **kwargs ):
            print( "Building log with ", args, kwargs, getattr(self,'extra',{}) )
            user= kwargs.pop('user',None)
            record = self.previous(*args, **kwargs)
            record.user= user
            return record

    # Adapter. This kind of extension may not be needed.
    # The "extra" is set as the default behavior.
    # However, the processing is obscure. It behaves as if it bypassed the factory.
    # Yet. The code looks like it won't bypass the factory.
    # ::

    class UserLogAdapter( logging.LoggerAdapter ):
        def process( self, msg, kwargs ):
            kwargs['user']= self.extra.get('user',None)
            return msg, kwargs

    # Installation
    # ::

    logging.config.dictConfig( yaml.load(config5) )
    logging.setLogRecordFactory(UserLogRecord())

    # Use
    # ::

    log= logging.getLogger( "test.demo6" )
    for h in logging.getLogger().handlers:
        h.setFormatter( logging.Formatter( fmt="{user}:{name}:{levelname}:{message}", style="{") )

    import threading
    data= threading.local()
    data.user= "******"
    data.ip_address= "127.0.0.1"

    log.info( "message without User" )
def init_logging(conf, console_level):
    console_fmt = "| {levelname:^8} | {message} (from {name}; {threadName})"
    file_fmt = "| {asctime} " + console_fmt
    asyncirc_logger = logging.getLogger("asyncirc")

    class NewStyleLogRecord(logging.LogRecord):
        def getMessage(self):  # noqa
            msg = self.msg
            if not isinstance(self.msg, str):
                msg = str(self.msg)
            if not isinstance(self.args, tuple):
                self.args = (self.args,)
            return msg.rstrip().format(*self.args)
    logging.setLogRecordFactory(NewStyleLogRecord)

    handler = ColorStreamHandler(sys.stdout)
    handler.setFormatter(logging.Formatter(console_fmt, style='{'))
    handler.addFilter(lambda r: r.levelno >= console_level)

    l.addHandler(handler)
    asyncirc_logger.addHandler(handler)

    conf_level = console_level
    if conf.logging.active:
        conf_level = getattr(logging, (conf.logging.level or "WARN").upper())

        handler = logging.handlers.TimedRotatingFileHandler(conf.logging.path or "log",
                                                            **conf.logging.rotate)
        handler.setFormatter(logging.Formatter(file_fmt, style='{'))
        handler.addFilter(lambda r: r.levelno >= conf_level)

        l.addHandler(handler)
        asyncirc_logger.addHandler(handler)

    min_level = min(console_level, conf_level)
    max_level = max(console_level, conf_level)
    l.setLevel(min_level)
    asyncirc_logger.setLevel(min_level)

    l.log(max_level,
          "application started; console logging level: {}; file logging level: {}",
          console_level,
          conf_level if conf.logging.active else "disabled")

    # return minimum level required to pass all filters
    return max_level
示例#43
0
def init_logger(level, logfile, quiet=False):
    if sys.platform.startswith("win"):
        stream_handler = logging.StreamHandler(sys.stdout)
        stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {name}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{")
    else:
        stream_handler = ColouredStreamHandler(sys.stdout)
        stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {name}:{lineno}#RESET# {message}", "%Y-%m-%d %H:%M:%S", "{")
    logging.basicConfig(level=level, handlers=[stream_handler])
    log = logging.getLogger()
    log.addHandler(stream_handler)

    log_factory = logging.getLogRecordFactory()

    def factory(name, level, fn, lno, msg, args, exc_info, func=None, sinfo=None, **kwargs):
        """
        Reformat the log message to get something more clean
        """
        # When qt message box is display the correct line number is a part of
        # the name
        if ":" in name:
            name, lno = name.split(":")
            lno = int(lno)
        name = name.replace("gns3.", "")
        try:
            return log_factory(name, level, fn, lno, msg, args, exc_info, func=func, sinfo=sinfo, **kwargs)
        except Exception as e:  # To avoid recursion we just print the message if something is wrong when logging
            print(msg)
            return
    logging.setLogRecordFactory(factory)

    try:
        try:
            os.makedirs(os.path.dirname(logfile))
        except FileExistsError:
            pass
        handler = logging.FileHandler(logfile, "w")
        handler.formatter = logging.Formatter("{asctime} {levelname} {filename}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{")
        log.addHandler(handler)
    except OSError as e:
        log.warn("could not log to {}: {}".format(logfile, e))

    log.info('Log level: {}'.format(logging.getLevelName(level)))

    return logging.getLogger()
示例#44
0
def default_config(file=None, level=_logging.INFO):
    """Set up the logging system with a default configuration."""
    format='{asctime} {levelname} {name}: {message}'
    datefmt='%Y-%m-%dT%H:%M:%S'
    style='{'
    # Handle filenames, streams, and default.  This has to be done with
    # separate calls because basicConfig won't allow multiple
    # conflicting arguments to be specified, even if they are None.
    if file is None:
        _logging.basicConfig(
            format=format, datefmt=datefmt, style=style, level=level)
    elif isinstance(file, str):
        _logging.basicConfig(
            format=format, datefmt=datefmt, style=style, level=level,
            filename=file)
    elif isinstance(file, io.IOBase):
        _logging.basicConfig(
            format=format, datefmt=datefmt, style=style, level=level,
            stream=file)
    else:
        raise ValueError('Not a file or filename: {}'.format(file))
    # Set factory to handle {}-style formatting in messages
    _logging.setLogRecordFactory(log_record_factory)
示例#45
0
def setup_logging(config):
    level = logging.DEBUG if config['DEBUG'] else logging.INFO
    # Our logging routines signal the start and end of the routes,
    # so the Werkzeug defaults aren't required. Keep warnings and above.
    logging.getLogger('werkzeug').setLevel(logging.WARN)
    logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(logging.WARN)
    logging.addLevelName(25, 'AUDIT')

    logging.audit = audit

    global app_name
    app_name = config['APPLICATION_NAME']

    root_logger = logging.getLogger()
    logging.setLogRecordFactory(record_factory)
    formatter = logging.Formatter('%(levelname)s %(asctime)s.%(msecs)03d [%(appname)s] %(file)s #%(line)s %(method)s'
                                  ' %(message)s',
                                  "%Y-%m-%d %H:%M:%S")

    out_handler = logging.StreamHandler(sys.stdout)
    out_handler.addFilter(OutputFilter(False, False))
    out_handler.setFormatter(formatter)
    root_logger.addHandler(out_handler)

    err_handler = logging.StreamHandler(sys.stderr)
    err_handler.addFilter(OutputFilter(True, False))
    err_handler.setFormatter(formatter)
    root_logger.addHandler(err_handler)

    audit_handler = logging.FileHandler(config['AUDIT_LOG_FILENAME'])
    # audit_handler = logging.StreamHandler(sys.stdout)
    audit_handler.addFilter(OutputFilter(False, True))
    audit_handler.setFormatter(formatter)
    root_logger.addHandler(audit_handler)

    root_logger.setLevel(level)
示例#46
0
                          action="store",
                          required=True,
                          type=int,
                          dest="port",
                          help="Network port to use to communicate with server")
  cli_parser.add_argument("-v", 
                          "--verbosity",
                          action="store",
                          choices=("quiet","warning","info","debug"),
                          default="info",
                          dest="verbosity",
                          help="Level of output to diplay")
  options = cli_parser.parse_args()

  # setup logger
  logging.basicConfig(format="%(message)s")
  logger = logging.getLogger()
  if options.verbosity == "quiet":
    logger.setLevel(logging.CRITICAL+1)
  elif options.verbosity == "warning":
    logger.setLevel(logging.WARNING)
  elif options.verbosity == "info":
    logger.setLevel(logging.INFO)
  elif options.verbosity == "debug":
    logger.setLevel(logging.DEBUG)
    logrecord_factory = DebugLogRecordFactory()
    logging.setLogRecordFactory(logrecord_factory.log)

  # start client
  client = DistributedCrawlerClient(options.server,options.port)
  client.start()
    {} formatting for your message.  This is stupid, so this class will fix it.
    """

    def getMessage(self):
        msg = str(self.msg)
        if self.args:
            # potential bug here, if they provide a 0th argument, but don't use it in the message.
            # the old formatting would have thrown an exception in that case, and it still will.
            if '{}' in msg or '{0}' in msg:
                msg = msg.format(*self.args)
            else:
                msg = msg % self.args
        return msg

# Allow {} style formatting of log messages, which is far superior!
logging.setLogRecordFactory(StrFormatLogRecord)

def initialize_logging(configuration):
    assert(isinstance(configuration, configparser.ConfigParser))
    logfile = configuration['Logging'].get('logfile')
    level = configuration['Logging'].get('level', 'DEBUG')
    stdout = configuration['Logging'].getboolean('stdout', True)
    format = configuration['Logging'].get('format', '[{asctime}|{levelname:<8}|{name}]: {message}')
    dateformat = configuration['Logging'].get('dateformat', '%x %I:%M:%S %p')
    handlers = []
    formatter = logging.Formatter(fmt=format, datefmt=dateformat, style='{')
    root_logger = logging.getLogger()
    exc_info = None
    try:
        if logfile is not None and logfile != '':
            handlers.append(logging.handlers.WatchedFileHandler(logfile))
示例#48
0
文件: logger.py 项目: panoptes/POCS
def get_root_logger(profile='panoptes', log_config=None):
    """Creates a root logger for PANOPTES used by the PanBase object.

    Args:
        profile (str, optional): The name of the logger to use, defaults
            to 'panoptes'.
        log_config (dict|None, optional): Configuration options for the logger.
            See https://docs.python.org/3/library/logging.config.html for
            available options. Default is `None`, which then looks up the
            values in the `log.yaml` config file.

    Returns:
        logger(logging.logger): A configured instance of the logger
    """

    # Get log info from config
    log_config = log_config if log_config else load_config('log').get('logger', {})

    # If we already created a logger for this profile and log_config, return that.
    logger_key = (profile, json.dumps(log_config, sort_keys=True))
    try:
        return all_loggers[logger_key]
    except KeyError:
        pass

    # Alter the log_config to use UTC times
    if log_config.get('use_utc', True):
        for name, formatter in log_config['formatters'].items():
            log_config['formatters'][name].setdefault('()', _UTCFormatter)
        log_fname_datetime = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%SZ')
    else:
        log_fname_datetime = datetime.datetime.now().strftime('%Y%m%dT%H%M%SZ')

    # Setup log file names
    invoked_script = os.path.basename(sys.argv[0])
    log_dir = '{}/logs'.format(os.getenv('PANDIR', gettempdir()))
    log_fname = '{}-{}-{}'.format(invoked_script, os.getpid(), log_fname_datetime)
    log_symlink = '{}/{}.log'.format(log_dir, invoked_script)

    # Set log filename and rotation
    for handler in log_config.get('handlers', []):
        # Set the filename
        full_log_fname = '{}/{}-{}.log'.format(log_dir, log_fname, handler)
        log_config['handlers'][handler].setdefault('filename', full_log_fname)

        # Setup the TimedRotatingFileHandler for middle of day
        log_config['handlers'][handler].setdefault('atTime', datetime.time(hour=11, minute=30))

        if handler == 'all':
            # Create a symlink to the log file with just the name of the script,
            # not the date and pid, as this makes it easier to find the latest file.
            try:
                os.unlink(log_symlink)
            except FileNotFoundError:  # pragma: no cover
                pass
            finally:
                os.symlink(full_log_fname, log_symlink)

    # Configure the logger
    logging.config.dictConfig(log_config)

    # Get the logger and set as attribute to class
    logger = logging.getLogger(profile)

    # Don't want log messages from state machine library, it is very noisy and
    # we have our own way of logging state transitions
    logging.getLogger('transitions.core').setLevel(logging.WARNING)

    # Set custom LogRecord
    logging.setLogRecordFactory(StrFormatLogRecord)

    # Add a filter for better filename/lineno
    logger.addFilter(FilenameLineFilter())

    logger.info('{:*^80}'.format(' Starting PanLogger '))
    all_loggers[logger_key] = logger
    return logger
示例#49
0
def init_logging(
    filename: str=None,
    main_logger: str='',
    on_error: Callable[
        [Type[BaseException], BaseException, TracebackType],
        None,
    ]=None,
) -> logging.Logger:
    """Setup the logger and logging handlers.

    If filename is set, all logs will be written to this file as well.
    This also sets sys.except_hook, so uncaught exceptions are captured.
    on_error should be a function to call when uncaught exceptions are thrown.
    (taking type, value, traceback).
    If the exception is a BaseException, the app will quit silently.
    """

    class NewLogRecord(logging.getLogRecordFactory()):
        """Allow passing an alias for log modules."""
        # This breaks %-formatting, so only set when init_logging() is called.

        alias = None  # type: str

        def getMessage(self):
            """We have to hook here to change the value of .module.

            It's called just before the formatting call is made.
            """
            if self.alias is not None:
                self.module = self.alias
            return str(self.msg)
    logging.setLogRecordFactory(NewLogRecord)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    # Put more info in the log file, since it's not onscreen.
    long_log_format = logging.Formatter(
        '[{levelname}] {module}.{funcName}(): {message}',
        style='{',
    )
    # Console messages, etc.
    short_log_format = logging.Formatter(
        # One letter for level name
        '[{levelname[0]}] {module}.{funcName}(): {message}',
        style='{',
    )

    if filename is not None:
        # Make the directories the logs are in, if needed.
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        # The log contains DEBUG and above logs.
        log_handler = get_handler(filename)
        log_handler.setLevel(logging.DEBUG)
        log_handler.setFormatter(long_log_format)
        logger.addHandler(log_handler)

        name, ext = os.path.splitext(filename)

        # The .error log has copies of WARNING and above.
        err_log_handler = get_handler(name + '.error' + ext)
        err_log_handler.setLevel(logging.WARNING)
        err_log_handler.setFormatter(long_log_format)

        logger.addHandler(err_log_handler)

    if sys.stdout:
        stdout_loghandler = logging.StreamHandler(sys.stdout)
        stdout_loghandler.setLevel(logging.INFO)
        stdout_loghandler.setFormatter(short_log_format)
        logger.addHandler(stdout_loghandler)

        if sys.stderr:
            def ignore_warnings(record: logging.LogRecord):
                """Filter out messages higher than WARNING.

                Those are handled by stdError, and we don't want duplicates.
                """
                return record.levelno < logging.WARNING
            stdout_loghandler.addFilter(ignore_warnings)
    else:
        sys.stdout = NullStream()

    if sys.stderr:
        stderr_loghandler = logging.StreamHandler(sys.stderr)
        stderr_loghandler.setLevel(logging.WARNING)
        stderr_loghandler.setFormatter(short_log_format)
        logger.addHandler(stderr_loghandler)
    else:
        sys.stderr = NullStream()

    # Use the exception hook to report uncaught exceptions, and finalise the
    # logging system.
    old_except_handler = sys.excepthook

    def except_handler(
        exc_type: Type[BaseException],
        exc_value: BaseException,
        exc_tb: TracebackType,
    ) -> None:
        """Log uncaught exceptions."""
        if not issubclass(exc_type, Exception):
            # It's subclassing BaseException (KeyboardInterrupt, SystemExit),
            # so we should quit without messages.
            logging.shutdown()
            return

        logger._log(
            level=logging.ERROR,
            msg='Uncaught Exception:',
            args=(),
            exc_info=(exc_type, exc_value, exc_tb),
        )
        logging.shutdown()
        if on_error is not None:
            on_error(exc_type, exc_value, exc_tb)
        # Call the original handler - that prints to the normal console.
        old_except_handler(exc_type, exc_value, exc_tb)

    sys.excepthook = except_handler

    if main_logger:
        return get_logger(main_logger)
    else:
        return LoggerAdapter(logger)
示例#50
0

class Extension:
    def __init__(self):
        self.start_time = time.time()

    def elapsed_time(self):
        return time.time() - self.start_time


class LogRecordExtension(logging.LogRecord):
    extension = Extension()

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.elapsed_time = self.extension.elapsed_time()


if __name__ == "__main__":
    logging.basicConfig(level=logging.DEBUG, format="%(elapsed_time)10.10fs %(levelname)7s %(message)s")
    logging.setLogRecordFactory(LogRecordExtension)

    logger.warning("hello")
    logger.info("sleep 0.2s")
    time.sleep(0.2)
    logger.info("----------------------------------------")
    logger.info("sleep 0.4s")
    time.sleep(0.4)
    logger.info("----------------------------------------")
    logger.error("hai")
示例#51
0
文件: utils.py 项目: Coolasp1e/BEE2.4
                stack_info=stack_info,
                extra={'alias': self.alias},
            )

class NewLogRecord(logging.getLogRecordFactory()):
    """Allow passing an alias for log modules."""

    def getMessage(self):
        """We have to hook here to change the value of .module.

        It's called just before the formatting call is made.
        """
        if self.alias is not None:
            self.module = self.alias
        return str(self.msg)
logging.setLogRecordFactory(NewLogRecord)


def init_logging(filename: str=None) -> logging.Logger:
    """Setup the logger and logging handlers.

    If filename is set, all logs will be written to this file.
    """
    global short_log_format, long_log_format
    global stderr_loghandler, stdout_loghandler, file_loghandler
    import logging
    from logging import handlers
    import sys, io, os

    logger = logging.getLogger('BEE2')
    logger.setLevel(logging.DEBUG)
示例#52
0
CMS_MARKUP_OPTIONS = (
    'cmsplugin_markup.plugins.creole',
    'cmsplugin_markup.plugins.html',
    'cmsplugin_markup.plugins.markdown',
    'cmsplugin_markup.plugins.textile',
    'cmsplugin_markup.plugins.restructuredtext',
)
CMS_MARKUP_RENDER_ALWAYS = True
CMS_MARKDOWN_EXTENSIONS = ()


#_____________________________________________________________________________

# Adds 'cut_path' attribute on log record. So '%(cut_path)s' can be used in log formatter.
# django_tools.unittest_utils.logging_utils.CutPathnameLogRecordFactory
logging.setLogRecordFactory(CutPathnameLogRecordFactory(max_length=50))

# Filter warnings and pipe them to logging system:
# django_tools.unittest_utils.logging_utils.FilterAndLogWarnings
warnings.showwarning = FilterAndLogWarnings()

warnings.simplefilter("always") # Turns on all warnings

#-----------------------------------------------------------------------------


# https://docs.python.org/3/library/logging.html#logging-levels
LOGGING = {
    'version': 1,
    'disable_existing_loggers': True,
    'formatters': {
示例#53
0
文件: netsvc.py 项目: Tecnativa/odoo
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record
    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)
示例#54
0
    """
    _name=name.split(".")[-1]
    if func and not func=="__lshift":
        func = _name + "." + func
        return old_factory(name, level, fn, lno, msg, args, exc_info, func, sinfo, **kwargs)

    ## get info for actual calling function
    ## (value of 5 determined by trial and error)
    f = sys._getframe(5)
    # pathname = f.f_code.co_filename
    # lineno = f.f_lineno
    funcName=_name + "." + f.f_code.co_name

    return old_factory(name, level, f.f_code.co_filename, f.f_lineno, msg, args, exc_info, funcName, sinfo, **kwargs)

logging.setLogRecordFactory(recordFactory)

def __lshift(caller, value):
    """
    Overload the << op to allow a shortcut to debug() calls:

    logger << "Here's your problem: " + str(thisiswrong)

    ==

    logger.debug("Here's your problem: {}".format(thisiswrong))

    :param value: the message to send
    """
    caller.debug(value)
示例#55
0
def init_logging(filename: str=None, main_logger='', on_error=None) -> logging.Logger:
    """Setup the logger and logging handlers.

    If filename is set, all logs will be written to this file as well.
    This also sets sys.except_hook, so uncaught exceptions are captured.
    on_error should be a function to call when this is done
    (taking type, value, traceback).
    """
    global short_log_format, long_log_format
    global stderr_loghandler, stdout_loghandler
    import logging
    from logging import handlers
    import sys, io, os

    class NewLogRecord(logging.getLogRecordFactory()):
        """Allow passing an alias for log modules."""
        # This breaks %-formatting, so only set when init_logging() is called.

        alias = None  # type: str

        def getMessage(self):
            """We have to hook here to change the value of .module.

            It's called just before the formatting call is made.
            """
            if self.alias is not None:
                self.module = self.alias
            return str(self.msg)
    logging.setLogRecordFactory(NewLogRecord)

    logger = logging.getLogger('BEE2')
    logger.setLevel(logging.DEBUG)

    # Put more info in the log file, since it's not onscreen.
    long_log_format = logging.Formatter(
        '[{levelname}] {module}.{funcName}(): {message}',
        style='{',
    )
    # Console messages, etc.
    short_log_format = logging.Formatter(
        # One letter for level name
        '[{levelname[0]}] {module}: {message}',
        style='{',
    )

    if filename is not None:
        # Make the directories the logs are in, if needed.
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        # The log contains DEBUG and above logs.
        # We rotate through logs of 500kb each, so it doesn't increase too much.
        log_handler = handlers.RotatingFileHandler(
            filename,
            maxBytes=500 * 1024,
            backupCount=10,
        )
        log_handler.setLevel(logging.DEBUG)
        log_handler.setFormatter(long_log_format)

        logger.addHandler(log_handler)

    # This is needed for multiprocessing, since it tries to flush stdout.
    # That'll fail if it is None.
    class NullStream(io.IOBase):
        """A stream object that discards all data."""
        def __init__(self):
            super(NullStream, self).__init__()

        @staticmethod
        def write(self, *args, **kwargs):
            pass

        @staticmethod
        def read(*args, **kwargs):
            return ''

    if sys.stdout:
        stdout_loghandler = logging.StreamHandler(sys.stdout)
        stdout_loghandler.setLevel(logging.INFO)
        stdout_loghandler.setFormatter(long_log_format)
        logger.addHandler(stdout_loghandler)

        if sys.stderr:
            def ignore_warnings(record: logging.LogRecord):
                """Filter out messages higher than WARNING.

                Those are handled by stdError, and we don't want duplicates.
                """
                return record.levelno < logging.WARNING
            stdout_loghandler.addFilter(ignore_warnings)
    else:
        sys.stdout = NullStream()

    if sys.stderr:
        stderr_loghandler = logging.StreamHandler(sys.stderr)
        stderr_loghandler.setLevel(logging.WARNING)
        stderr_loghandler.setFormatter(long_log_format)
        logger.addHandler(stderr_loghandler)
    else:
        sys.stderr = NullStream()

    # Use the exception hook to report uncaught exceptions, and finalise the
    # logging system.
    old_except_handler = sys.excepthook

    def except_handler(*exc_info):
        """Log uncaught exceptions."""
        if on_error is not None:
            on_error(*exc_info)
        logger._log(
            level=logging.ERROR,
            msg='Uncaught Exception:',
            args=(),
            exc_info=exc_info,
        )
        logging.shutdown()
        # Call the original handler - that prints to the normal console.
        old_except_handler(*exc_info)

    sys.excepthook = except_handler

    if main_logger:
        return getLogger(main_logger)
    else:
        return LoggerAdapter(logger)
示例#56
0
    logging.INFO: Colors.blue,
    logging.DEBUG: Colors.cyan,
    logging.NOTSET: Colors.white,
}


class ClogRecord(logging.LogRecord):
    def __init__(self, name, level, pathname, lineno,
                 msg, args, exc_info, func=None, sinfo=None, **kwargs):
        super().__init__(name, level, pathname, lineno,
                         msg, args, exc_info, func=None, sinfo=None, **kwargs)
        self.reset = Colors.reset
        self.levelcolor = _levelToColor[level]

# Override the LogRecordFactory to use ours, which provides %(color)s and %(reset)s
logging.setLogRecordFactory(ClogRecord)


if __name__ == '__main__':
    logger = logging.getLogger()
    # This is needed to make the logger care about messages DEBUG and over
    logger.setLevel(logging.DEBUG)

    # An uncolored formatter
    logFormatter = logging.Formatter('[%(asctime)s] [ %(levelname)-8s ]: %(message)-1s')
    # A formatter that makes use of the new fields 'levelcolor' and 'reset'
    clogFormatter = logging.Formatter('[%(asctime)s] [ %(levelcolor)s%(levelname)-8s%(reset)s ]: %(message)-1s')

    fileHandler = logging.FileHandler('tst.log')
    # make it care about messages DEBUG and over
    fileHandler.setLevel(logging.DEBUG)
示例#57
0
            "asynchronous code and coroutines work.  Blocking calls (notably HTTP requests) can take "
            "a long time, during which the bot is unable to do anything but wait for it.  "
            "If you're sure you know what you're doing, simply add `allow_requests = True` above your "
            "import statement, that being `import requests` or whatever requests dependent module.",

            footnote="Import traceback (most recent call last):\n" + import_tb
        )

sys.meta_path.insert(0, Yikes())

from .bot import MusicBot
from .constructs import BetterLogRecord

__all__ = ['MusicBot']

logging.setLogRecordFactory(BetterLogRecord)

_func_prototype = "def {logger_func_name}(self, message, *args, **kwargs):\n" \
                  "    if self.isEnabledFor({levelname}):\n" \
                  "        self._log({levelname}, message, args, **kwargs)"

def _add_logger_level(levelname, level, *, func_name = None):
    """

    :type levelname: str
        The reference name of the level, e.g. DEBUG, WARNING, etc
    :type level: int
        Numeric logging level
    :type func_name: str
        The name of the logger function to log to a level, e.g. "info" for log.info(...)
    """
示例#58
0
def init(options, appname):
    logging.config.dictConfig(json.load(open(options.logging_config)))
    glob.appname = appname
    logging.setLogRecordFactory(factory)
示例#59
0
文件: netsvc.py 项目: akretion/odoo
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record
    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if tools.config['logrotate'] is not False:
                if tools.config['workers'] and tools.config['workers'] > 1:
                    # TODO: fallback to regular file logging in master for safe(r) defaults?
                    #
                    # Doing so here would be a good idea but also might break
                    # situations were people do log-shipping of rotated data?
                    _logger.warn("WARNING: built-in log rotation is not reliable in multi-worker scenarios and may incur significant data loss. "
                                 "It is strongly recommended to use an external log rotation utility or use system loggers (--syslog) instead.")
                handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
            elif os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)