示例#1
0
def setup_module_logging(name, levels=(logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)):
    """
    Do all the necessary setup for a module.
    Dereferences the record factory, and returns four logging functions.

    Parameters
    ----------
    name : str

    Returns
    -------
    error : func
    warning : func
    info : func
    debug : func

    """
    original_record_factory = logging.getLogRecordFactory()

    def dereferenced_log_record_factory(*args, **kwargs):
        record = original_record_factory(*args, **kwargs)
        record.lineno = get_lineno_from_deeper_in_stack(1, record.lineno, 'log_rstrip')
        return record

    logging.setLogRecordFactory(dereferenced_log_record_factory)

    module_logger = log_rstrip(logging.getLogger(name))
    return tuple(module_logger(level) for level in levels)
示例#2
0
    def setup_logger(self):
        self.log = logging.getLogger(self.app)
        self.log.setLevel(logging.DEBUG)

        # create file handler which logs even debug messages
        log_file = os.path.join('/tmp', self.app + '.log')
        in_dev_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.development.log'.format(self.app))
        )
        in_dev_debug_file_handler.setLevel(logging.DEBUG)

        readable_debug_file_handler = logging.FileHandler(
            os.path.join('/tmp', '{}.debug.log'.format(self.app))
        )
        readable_debug_file_handler.setLevel(logging.DEBUG)

        # create console handler with a higher log level
        command_line_logging = logging.StreamHandler()

        if self.cli_arguments.verbose:
            command_line_logging.setLevel(logging.DEBUG)

            # add relpathname log format attribute so as to only show the file
            #  in which a log was initiated, relative to the project path
            #  e.g. pathname = /full/path/to/project/package/module.py
            #       relpathname = package/module.py
            default_record_factory = logging.getLogRecordFactory()
            project_path = os.path.dirname(os.path.abspath(sys.argv[0])) + \
                           os.sep
            def relpathname_record_factory(*args, **kwargs):
                record = default_record_factory(*args, **kwargs)
                record.relpathname = record.pathname.replace(project_path, '')
                return record
            logging.setLogRecordFactory(relpathname_record_factory)

            # add colors to the logs!
            colored_files_funcs_linenos_formatter = colorlog.ColoredFormatter(
                fmt=(
                    "%(asctime)s - %(log_color)s%(levelname)-8s%(reset)s"
                    " [ %(relpathname)s::%(funcName)s():%(lineno)s ] "
                    "%(message)s"
                ),
                datefmt='%Y-%m-%d %H:%M:%S',
                reset=True,
            )
            in_dev_debug_file_handler.setFormatter(
                colored_files_funcs_linenos_formatter)
            command_line_logging.setFormatter(
                colored_files_funcs_linenos_formatter)

        else:
            command_line_logging.setLevel(logging.INFO)

        # add the handlers to the logger
        self.log.addHandler(in_dev_debug_file_handler)
        self.log.addHandler(command_line_logging)
        self.log.addHandler(readable_debug_file_handler)
示例#3
0
def patch_logging():
    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.threadName = get_task_name_with_thread()
        return record

    logging.setLogRecordFactory(record_factory)
示例#4
0
文件: log.py 项目: Kozea/Radicale
def setup():
    """Set global logging up."""
    global register_stream
    handler = ThreadStreamsHandler(sys.stderr, get_default_handler())
    logging.basicConfig(format=LOGGER_FORMAT, handlers=[handler])
    register_stream = handler.register_stream
    log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
    logging.setLogRecordFactory(log_record_factory)
    set_level(logging.DEBUG)
示例#5
0
def setMyLogRecord(myFuncName='nameNotPassed', lineNO=0):
    if fDEBUG:
        print ('setting funcName to:', myFuncName)

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.LnFuncName = myFuncName
        # record.LnLineNO   = lineNO   # non posso altrimenti rimane sempre lo stesso
        return record
    logging.setLogRecordFactory(record_factory)
示例#6
0
    def __init__(self, name, *, capacity=1000):
        if not isinstance(capacity, int):
            raise TypeError('capacity must be an integer')

        if capacity <= 0:
            raise ValueError('capacity must be a positive integer')

        self.name = name
        self._capacity = capacity
        self._records = []
        self._make_record = logging.getLogRecordFactory()
示例#7
0
def log_record_factory(name, level, fn, lno, msg, args, exc_info, func=None,
                       sinfo=None, old_log_record_factory=logging.getLogRecordFactory(), **kwargs):
    """Allow str.format style for log messages"""
    msg = str(msg)
    if args:
        try:
            msg = msg % args
        except TypeError:
            msg = msg.format(*args)

    return old_log_record_factory(name, level, fn, lno, msg, (), exc_info,
                                  func, sinfo, **kwargs)
    def init(self):
        if self.initialized:
            return self
        self.initialized = True

        old_factory = getLogRecordFactory()
        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            if record.pathname.startswith(topdir):
                record.pathname = record.pathname[len(topdir) + 1:]
            if len(record.pathname) > 32:
                record.pathname = record.pathname[-32:]
            record.codelocation = "%s:%d" % (record.pathname, record.lineno)
            return record
        setLogRecordFactory(record_factory)

        if app.debug:
            formatter = Formatter("[%(asctime)s] %(codelocation)-32s %(levelname)s - %(message)s")
            for handler in app.logger.handlers:
                handler.setFormatter(formatter)
            app.logger.setLevel(DEBUG)
            app.logger.info('DEBUG mode')
        else:
            app.logger.setLevel(INFO)
            app.logger.info('PRODUCTION mode')

        if not app.debug:
            import logging
            from config import logdir
            from logging.handlers import RotatingFileHandler
            file_handler = RotatingFileHandler(os.path.join(logdir, 'server.log'), 'a',
                                               1 * 1024 * 1024, 10)
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(logging.Formatter(
                '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
            app.logger.addHandler(file_handler)
            app.logger.setLevel(logging.INFO)
            app.logger.info('startup')

        from app import sessions

        sessions.register(self)

        from app import views
        from app import account, runner, workspace, code_template

        from .api.v1_0 import api as api_blueprint
        app.register_blueprint(api_blueprint, url_prefix='/api/v1.0')

        return self
示例#9
0
def test_apply_filters_record_factory_as_default():
    original_factory = logging.getLogRecordFactory()

    # TODO: setup a faux test handler, or I guess, use mock to check the calls
    try:
        filters = [AddBlippynessFilter(blippyness="moderate blip likely"),
                   add_mood_filter_method]
        factory = apply_filters.ApplyFiltersRecordFactory(filters=filters,
                                                          base_factory=original_factory)
        logging.setLogRecordFactory(factory)

        log.debug('This is a test msg: %s', "foo", extra={'tsx_id': 42})
    except Exception as exc:
        log.exception(exc)
    finally:
        logging.setLogRecordFactory(original_factory)
示例#10
0
class LOGSetup(object):
    """log setup with configuration"""
    old_factory = logging.getLogRecordFactory()

    def setup_log(self):
        dictConfig(settings['LOG_CONFIG'])
        logging.setLogRecordFactory(self.record_factory)

    def record_factory(self, *args, **kwargs):
        """
        adds some custom field in log formatter
        """
        record = self.old_factory(*args, **kwargs)
        record.environment = settings['ENV']
        record.project = settings['PROJECT_NAME']
        return record
示例#11
0
def log_record_factory(*args, factory=logging.getLogRecordFactory(), **kwargs):
    """Allow str.format style for log messages"""
    msg, format_args = args[4:6]
    msg = str(msg)
    if format_args:
        try:
            msg = msg % format_args
        except TypeError:
            try:
                msg = msg.format(*format_args)
            except:
                pass
        except:
            pass

    return factory(*(args[:4] + (msg, ()) + args[6:]), **kwargs)
示例#12
0
def register():
    from mixer import bl_panels
    from mixer import bl_operators
    from mixer import bl_properties, bl_preferences
    from mixer.blender_data import debug_addon
    from mixer.log_utils import Formatter, get_log_file

    if len(logger.handlers) == 0:
        # Add the pid to the log. Just enough for the tests, that merge the logs and need to distinguish
        # two Blender on the same machine. Pids might collide during regular networked operation
        old_factory = logging.getLogRecordFactory()
        pid = str(os.getpid())

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            record.custom_attribute = pid
            return record

        logging.setLogRecordFactory(record_factory)

        logger.setLevel(logging.WARNING)
        formatter = Formatter(
            "{asctime} {custom_attribute:<6} {levelname[0]} {name:<36}  - {message:<80}",
            style="{")
        handler = logging.StreamHandler()
        handler.setFormatter(formatter)
        logger.addHandler(handler)

        handler = logging.FileHandler(get_log_file())
        handler.setFormatter(formatter)
        logger.addHandler(handler)

    if not faulthandler.is_enabled():
        faulthandler.enable()
        global _disable_fault_handler
        _disable_fault_handler = True

    debug_addon.register()

    bl_preferences.register()
    bl_properties.register()
    bl_panels.register()
    bl_operators.register()

    atexit.register(cleanup)
示例#13
0
def set_logging_config(verbose: bool, chain="main"):
    old_factory = logging.getLogRecordFactory()
    if chain != "main":
        chain = f"chain-{chain}"

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.chain = chain
        record.host = socket.gethostname()
        return record

    logging.setLogRecordFactory(record_factory)

    log_format = "%(asctime)s %(host)s [%(chain)s] %(levelname)s %(message)s"
    logfile = f"log/task-{chain}.log"
    root_logger = {"level": "INFO", "handlers": ["file"]}
    handlers = {
        "file": {
            "level": "INFO",
            "class": "logging.FileHandler",
            "filename": logfile,
            "formatter": "app",
            "encoding": "utf-8",
        }
    }
    if verbose:
        root_logger["handlers"].append("stream")
        handlers["stream"] = {
            "level": "INFO",
            "class": "logging.StreamHandler",
            "formatter": "app",
        }

    logging.config.dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "root": root_logger,
        "handlers": handlers,
        "formatters": {
            "app": {
                "format": log_format,
                "datefmt": "%Y-%m-%d %H:%M:%S",
            },
        },
    })
示例#14
0
    def _instrument(self, **kwargs):
        service_name = ""
        provider = kwargs.get("tracer_provider", None) or get_tracer_provider()
        resource = provider.resource if provider else None
        if resource:
            service_name = resource.attributes.get("service.name")

        old_factory = logging.getLogRecordFactory()
        LoggingInstrumentor._old_factory = old_factory

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)

            record.otelSpanID = "0"
            record.otelTraceID = "0"
            record.otelServiceName = service_name

            span = get_current_span()
            if span != INVALID_SPAN:
                ctx = span.get_span_context()
                if ctx != INVALID_SPAN_CONTEXT:
                    record.otelSpanID = format(ctx.span_id, "016x")
                    record.otelTraceID = format(ctx.trace_id, "032x")
            return record

        logging.setLogRecordFactory(record_factory)

        set_logging_format = kwargs.get(
            "set_logging_format",
            environ.get(OTEL_PYTHON_LOG_CORRELATION, "false").lower()
            == "true",
        )

        if set_logging_format:
            log_format = kwargs.get(
                "logging_format", environ.get(OTEL_PYTHON_LOG_FORMAT, None)
            )
            log_format = log_format or DEFAULT_LOGGING_FORMAT

            log_level = kwargs.get(
                "log_level", LEVELS.get(environ.get(OTEL_PYTHON_LOG_LEVEL))
            )
            log_level = log_level or logging.INFO

            logging.basicConfig(format=log_format, level=log_level)
示例#15
0
def log_wrapper(func_overrider):
    old_factory = logging.getLogRecordFactory()

    def new_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.funcName = func_overrider.__name__
        return record

    def decorator(func):
        def wrapper(*args, **kwargs):
            logging.setLogRecordFactory(new_factory)
            result = func(*args, **kwargs)
            logging.setLogRecordFactory(old_factory)
            return result

        return wrapper

    return decorator
示例#16
0
def set_logger_timesource(dtm_now):
    """Set a custom record factory, with a bespoke source of timestamps.

    Used to have records with the same datetime as the most recent packet log record.
    """

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)

        ct = dtm_now().timestamp()
        record.created = ct
        record.msecs = (ct - int(ct)) * 1000

        return record

    old_factory = logging.getLogRecordFactory()

    logging.setLogRecordFactory(record_factory)
示例#17
0
def init_logger(level, logfile, quiet=False):
    if sys.platform.startswith("win"):
        stream_handler = logging.StreamHandler(sys.stdout)
        stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {name}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{")
    else:
        stream_handler = ColouredStreamHandler(sys.stdout)
        stream_handler.formatter = ColouredFormatter("{asctime} {levelname} {name}:{lineno}#RESET# {message}", "%Y-%m-%d %H:%M:%S", "{")
    logging.basicConfig(level=level, handlers=[stream_handler])
    log = logging.getLogger()
    log.addHandler(stream_handler)

    log_factory = logging.getLogRecordFactory()

    def factory(name, level, fn, lno, msg, args, exc_info, func=None, sinfo=None, **kwargs):
        """
        Reformat the log message to get something more clean
        """
        # When qt message box is display the correct line number is a part of
        # the name
        if ":" in name:
            name, lno = name.split(":")
            lno = int(lno)
        name = name.replace("gns3.", "")
        try:
            return log_factory(name, level, fn, lno, msg, args, exc_info, func=func, sinfo=sinfo, **kwargs)
        except Exception as e:  # To avoid recursion we just print the message if something is wrong when logging
            print(msg)
            return
    logging.setLogRecordFactory(factory)

    try:
        try:
            os.makedirs(os.path.dirname(logfile))
        except FileExistsError:
            pass
        handler = logging.FileHandler(logfile, "w")
        handler.formatter = logging.Formatter("{asctime} {levelname} {filename}:{lineno} {message}", "%Y-%m-%d %H:%M:%S", "{")
        log.addHandler(handler)
    except OSError as e:
        log.warn("could not log to {}: {}".format(logfile, e))

    log.info('Log level: {}'.format(logging.getLevelName(level)))

    return logging.getLogger()
示例#18
0
def setup():
    """Setup logging"""
    # Set the region on log records.
    default_factory = logging.getLogRecordFactory()
    logging.setLogRecordFactory(partial(region_record_factory,
                                        default_factory))

    logger = logging.getLogger()

    # Send logs directly via the logging client if possible. This ensures trace
    # ids are propogated and allows us to send structured messages.
    if environment.in_gcp():
        client = Client()
        handler = StructuredAppEngineHandler(client)
        handlers.setup_logging(handler, log_level=logging.INFO)

        # Streams unstructured logs to stdout - these logs will still show up
        # under the appengine.googleapis.com/stdout Stackdriver logs bucket,
        # even if other logs are stalled on the global interpreter lock or some
        # other issue.
        stdout_handler = logging.StreamHandler(sys.stdout)
        handlers.setup_logging(stdout_handler, log_level=logging.INFO)
        for handler in logger.handlers:
            if not isinstance(
                    handler,
                (StructuredAppEngineHandler, logging.StreamHandler)):
                logger.removeHandler(handler)
    else:
        logging.basicConfig()

    for handler in logger.handlers:
        # If writing directly to Stackdriver, send a structured message.
        if isinstance(handler, StructuredAppEngineHandler):
            handler.setFormatter(StructuredLogFormatter())
        # Otherwise, the default stream handler requires a string.
        else:
            handler.setFormatter(
                logging.Formatter(
                    "(%(region)s) %(module)s/%(funcName)s : %(message)s"))

    # Export gunicorn errors using the same handlers as other logs, so that they
    # go to Stackdriver in production.
    gunicorn_logger = logging.getLogger("gunicorn.error")
    gunicorn_logger.handlers = logger.handlers
示例#19
0
    def _record_attrs_to_ignore(self) -> Iterable[str]:
        # Doing log.info(..., extra={'foo': 2}) sets extra properties on
        # record, i.e. record.foo. And we need to filter those too. Fun
        #
        # Create a record, and look at what attributes are on it, and ignore
        # all the default ones!

        record = logging.getLogRecordFactory()(
            # name, level, pathname, lineno, msg, args, exc_info, func=None, sinfo=None,
            "x",
            logging.INFO,
            __file__,
            1,
            "",
            tuple(),
            exc_info=None,
            func="funcname",
        )
        return frozenset(record.__dict__).difference({'msg', 'args'})
示例#20
0
def get_log_record_constructor():
    old_factory = logging.getLogRecordFactory()

    def log_record(name,
                   level,
                   path,
                   lno,
                   msg,
                   args,
                   exc_info,
                   func=None,
                   sinfo=None,
                   **kwargs):
        start = path.rfind('/tide/')
        if start > -1:
            path = path[start + 1:]
        return old_factory(name, level, path, lno, msg, args, exc_info, func,
                           sinfo, **kwargs)

    return log_record
示例#21
0
 def makeRecord(
     self,
     name,
     level,
     fn,
     lno,
     msg,
     args,
     exc_info,
     func=None,
     extra=None,
     sinfo=None,
 ):
     # Unlike the standard Logger class, we allow overwriting
     # all attributes of the log record with stuff from *extra*.
     factory = logging.getLogRecordFactory()
     rv = factory(name, level, fn, lno, msg, args, exc_info, func, sinfo)
     if extra is not None:
         rv.__dict__.update(extra)
     return rv
示例#22
0
    def setup_loggerhook(
        self,
        level: int,
        tags: Optional[List[str]] = None,
        publish: bool = True,
    ) -> None:
        if not self.is_loggerhook_set:
            old_factory = logging.getLogRecordFactory()

            def record_factory(*args, **kwargs):
                record = old_factory(*args, **kwargs)
                if record.levelno >= level:
                    self.logging_report(record=record,
                                        tags=tags,
                                        publish=publish)
                return record

            logging.setLogRecordFactory(record_factory)

            self.is_loggerhook_set = True
示例#23
0
 def makeRecord(
     self,
     name,
     level,
     fn,
     lno,
     msg,
     args,
     exc_info,
     func=None,
     extra=None,
     sinfo=None,
 ):
     rv = logging.getLogRecordFactory()(
         name, level, fn, lno, msg, args, exc_info, func, sinfo
     )
     if extra is not None:
         for key in extra:
             rv.__dict__[key] = extra[key]
     return rv
示例#24
0
文件: log.py 项目: ekaats/Radicale
def setup():
    """Set global logging up."""
    global register_stream
    handler = ThreadedStreamHandler()
    logging.basicConfig(format=LOGGER_FORMAT, datefmt=DATE_FORMAT,
                        handlers=[handler])
    register_stream = handler.register_stream
    log_record_factory = IdentLogRecordFactory(logging.getLogRecordFactory())
    logging.setLogRecordFactory(log_record_factory)

    """If a logfile is provided, also add a filehandler as logger"""
    configuration = config.load()
    logfile = configuration.get("logging", "logfile")

    if logfile:
        logfile_handler = logging.FileHandler(logfile, mode="w")
        logger.addHandler(logfile_handler)
        logger.info(f"Registered filelogger to {logfile}")
        
    set_level(logging.WARNING)
示例#25
0
文件: log.py 项目: zhatin/conjure-up
def setup_logging(app, logfile, debug=True):
    old_factory = logging.getLogRecordFactory()

    def spell_record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        if record.name != 'conjure-up':
            record.filename = '{}: {}'.format(record.name, record.filename)
        spell_name = app.config.get('spell', consts.UNSPECIFIED_SPELL)
        record.name = 'conjure-up/{}'.format(spell_name)
        return record

    logging.setLogRecordFactory(spell_record_factory)

    cmdslog = TimedRotatingFileHandler(logfile,
                                       when='D',
                                       interval=1,
                                       backupCount=7)
    cmdslog.setFormatter(
        logging.Formatter("%(asctime)s [%(levelname)s] %(name)s - "
                          "%(filename)s:%(lineno)d - %(message)s"))

    root_logger = logging.getLogger()
    app_logger = logging.getLogger('conjure-up')

    if debug:
        app_logger.setLevel(logging.DEBUG)
        root_logger.setLevel(logging.DEBUG)
    else:
        # always use DEBUG level for app, for now
        app_logger.setLevel(logging.DEBUG)
        root_logger.setLevel(logging.INFO)

    root_logger.addHandler(cmdslog)
    if os.path.exists('/dev/log'):
        st_mode = os.stat('/dev/log').st_mode
        if stat.S_ISSOCK(st_mode):
            syslog_h = SysLogHandler(address='/dev/log')
            syslog_h.set_name('conjure-up')
            app_logger.addHandler(syslog_h)

    return app_logger
    def addQueueLogHandler(tracer: logging.Logger, ctx) -> None:
        # Provide access to custom (payload-specific) fields
        oldFactory = logging.getLogRecordFactory()

        def recordFactory(*args, **kwargs):
            record = oldFactory(*args, **kwargs)
            record.sapmonid = ctx.sapmonId
            record.payloadversion = PAYLOAD_VERSION
            return record

        tracer.info("adding storage queue log handler")
        try:
            storageQueue = AzureStorageQueue(
                tracer,
                ctx.sapmonId,
                ctx.msiClientId,
                ctx.vmInstance["subscriptionId"],
                ctx.vmInstance["resourceGroupName"],
                queueName=STORAGE_QUEUE_NAMING_CONVENTION % ctx.sapmonId)
            storageKey = tracing.getAccessKeys(tracer, ctx)
            queueStorageLogHandler = QueueStorageHandler(
                account_name=storageQueue.accountName,
                account_key=storageKey,
                protocol="https",
                queue=storageQueue.name)
            queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
            jsonFormatter = JsonFormatter(
                tracing.config["formatters"]["json"]["fieldMapping"])
            queueStorageLogHandler.setFormatter(jsonFormatter)
            logging.setLogRecordFactory(recordFactory)

        except Exception as e:
            tracer.error(
                "could not add handler for the storage queue logging (%s) " %
                e)
            return

        queueStorageLogHandler.level = DEFAULT_QUEUE_TRACE_LEVEL
        tracer.addHandler(queueStorageLogHandler)
        return
示例#27
0
    def flush(self) -> None:
        buf = self.line_buffer.getvalue()
        if not buf:
            return
        fact = logginglib.getLogRecordFactory()
        logger = logging.getLogger(self.logger_name)
        created = None
        for line in buf.splitlines(keepends=False):
            record = fact(
                name="demandprinter",
                level=logging.INFO,
                pathname=__file__,
                lineno=1,
                msg=line,
                args=(),
                exc_info=None,
                created=created,
            )
            created = created or record.created
            logger.handle(record)

        self.line_buffer = io.StringIO()
示例#28
0
    def wrapper(*args, **kwargs):
        old_factory = logging.getLogRecordFactory()

        def _record_factory(*args, **kwargs):
            """Make function print wrapped function's name instead of a wrapper"""
            record = old_factory(*args, **kwargs)
            record.funcName = f.__name__
            return record

        dispatcher: telegram.ext.Dispatcher = args[1].dispatcher

        logging.setLogRecordFactory(_record_factory)
        dispatcher.logger.debug(args)
        dispatcher.logger.debug(kwargs)
        dispatcher.logger.debug(dispatcher.bot_data)
        dispatcher.logger.debug(dispatcher.chat_data)
        dispatcher.logger.debug(dispatcher.user_data)
        result = f(*args, **kwargs)
        dispatcher.logger.debug(f"{f.__name__} : {result}")
        logging.setLogRecordFactory(old_factory)

        return result
示例#29
0
def configure_pylog_MDC(level: str, MDC_class: Optional[type] = MDCDict):
    """Configure log4cxx to send messages to Python logging, with MDC support.

    Parameters
    ----------
    level : `str`
        Name of the logging level for root log4cxx logger.
    MDC_class : `type`, optional
        Type of dictionary which is added to `logging.LogRecord` as an ``MDC``
        attribute. Any dictionary or ``defaultdict``-like class can be used as
        a type. If `None` the `logging.LogRecord` will not be augmented.

    Notes
    -----
    This method does two things:

    - Configures log4cxx with a given logging level and a ``PyLogAppender``
      appender class which forwards all messages to Python `logging`.
    - Installs a record factory for Python `logging` that adds ``MDC``
      attribute to every `logging.LogRecord` object (instance of
      ``MDC_class``). This will happen by default but can be disabled
      by setting the ``MDC_class`` parameter to `None`.
    """
    if MDC_class is not None:
        old_factory = logging.getLogRecordFactory()

        def record_factory(*args, **kwargs):
            record = old_factory(*args, **kwargs)
            record.MDC = MDC_class()
            return record

        logging.setLogRecordFactory(record_factory)

    properties = """\
log4j.rootLogger = {}, PyLog
log4j.appender.PyLog = PyLogAppender
""".format(level)
    configure_prop(properties)
示例#30
0
def init_logger() -> None:
    """ init_logger is a garbage function that sets a logger with owo format """

    # Set up root logger to INFO level and only print the owo'd log message
    logging.basicConfig(level=logging.INFO, format='%(owo)s')

    # Raise the module logger to DEBUG
    logging.getLogger(__name__).setLevel(logging.DEBUG)

    # Get the original log factory
    factory = logging.getLogRecordFactory()

    def owo_factory(*args, **kwargs) -> logging.LogRecord:
        """ owo_factory is a log factory that owos log messages """
        # Run original factory
        record = factory(*args, **kwargs)

        # OwO the provided message and return it
        record.owo = whats_this(record.msg)  # type: ignore
        return record

    # Set the logger to use the owo factory
    logging.setLogRecordFactory(owo_factory)
    def flush(self) -> None:
        buf = self.line_buffer.getvalue()
        if not buf:
            return
        fact = logginglib.getLogRecordFactory()
        root = logging.getLogger()

        if not root.filters:
            root.addFilter(ExcludeDemandPrinterFilter("root"))

        for line in buf.splitlines(keepends=False):
            record = fact(
                name="demandprinter",
                level=logging.INFO,
                pathname=__file__,
                lineno=1,
                msg=line,
                args=(),
                exc_info=None,
            )
            root.handle(record)

        self.line_buffer = io.StringIO()
示例#32
0
    def test_custom_record_factory(self):
        """
        Test that custom LogRecord factories are supported.

        This test is a bit convoluted because the logging module suppresses
        exceptions. We monkey patch the method suspected of encountering
        exceptions so that we can tell after it was called whether any
        exceptions occurred (despite the exceptions not propagating).
        """
        if not hasattr(logging, 'getLogRecordFactory'):
            return self.skipTest("this test requires Python >= 3.2")

        exceptions = []
        original_method = ColoredFormatter.format
        original_factory = logging.getLogRecordFactory()

        def custom_factory(*args, **kwargs):
            record = original_factory(*args, **kwargs)
            record.custom_attribute = 0xdecafbad
            return record

        def custom_method(*args, **kw):
            try:
                return original_method(*args, **kw)
            except Exception as e:
                exceptions.append(e)
                raise

        with PatchedAttribute(ColoredFormatter, 'format', custom_method):
            logging.setLogRecordFactory(custom_factory)
            try:
                demonstrate_colored_logging()
            finally:
                logging.setLogRecordFactory(original_factory)

        # Ensure that no exceptions were triggered.
        assert not exceptions
    def test_unknown(self):
        from django.conf import settings
        from automated_logging.settings import settings as conf

        logger = logging.getLogger(__name__)

        default_factory = logging.getLogRecordFactory()

        def factory(*args, **kwargs):
            """
            force setting the pathname and module
            wrong so that we can pretend to exclude unknowns
            """

            record = default_factory(*args, **kwargs)

            record.pathname = '/example.py'
            record.module = 'default'
            return record

        self.clear()
        logging.setLogRecordFactory(factory=factory)

        settings.AUTOMATED_LOGGING['unspecified']['exclude']['unknown'] = True
        conf.load.cache_clear()

        logger.info(random_string())
        self.assertEqual(UnspecifiedEvent.objects.count(), 0)

        settings.AUTOMATED_LOGGING['unspecified']['exclude']['unknown'] = False
        conf.load.cache_clear()

        logger.info(random_string())
        self.assertEqual(UnspecifiedEvent.objects.count(), 1)

        logging.setLogRecordFactory(default_factory)
示例#34
0
def setup_logging():

    logging.basicConfig(
        format="%(levelname)s:%(name)s:%(asctime)s: %(message)s",
        datefmt="%I:%M:%S:%p",
        level=logging.ERROR,
    )

    old_record_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs) -> logging.LogRecord:
        record: logging.LogRecord = old_record_factory(*args, **kwargs)
        level = record.levelno
        _format = None
        if level <= logging.INFO:
            _format = crayons.green
        elif level <= logging.WARNING:
            _format = crayons.yellow
        else:
            _format = crayons.red
        record.msg = _format(record.msg)
        return record

    logging.setLogRecordFactory(record_factory)
示例#35
0
 def __init__(self, max_length=40):
     self.max_length = max_length
     self.origin_factory = logging.getLogRecordFactory()
示例#36
0
 def __init__( self ):
     self.previous = logging.getLogRecordFactory()
示例#37
0
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()

    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record

    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler(
                "%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if tools.config['logrotate'] is not False:
                if tools.config['workers'] and tools.config['workers'] > 1:
                    # TODO: fallback to regular file logging in master for safe(r) defaults?
                    #
                    # Doing so here would be a good idea but also might break
                    # situations were people do log-shipping of rotated data?
                    _logger.warn(
                        "WARNING: built-in log rotation is not reliable in multi-worker scenarios and may incur significant data loss. "
                        "It is strongly recommended to use an external log rotation utility or use system loggers (--syslog) instead."
                    )
                handler = logging.handlers.TimedRotatingFileHandler(
                    filename=logf, when='D', interval=1, backupCount=30)
            elif os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write(
                "ERROR: couldn't create the logfile directory. Logging to the standard output.\n"
            )

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(
            handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(
            int(
                db_levels.get(tools.config['log_db_level'],
                              tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)
示例#38
0
文件: netsvc.py 项目: GSLabIt/odoo
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record
    logging.setLogRecordFactory(record_factory)

    # enable deprecation warnings (disabled by default)
    warnings.simplefilter('default', category=DeprecationWarning)
    # ignore deprecation warnings from invalid escape (there's a ton and it's
    # pretty likely a super low-value signal)
    warnings.filterwarnings('ignore', r'^invalid escape sequence \'?\\.', category=DeprecationWarning)
    # recordsets are both sequence and set so trigger warning despite no issue
    warnings.filterwarnings('ignore', r'^Sampling from a set', category=DeprecationWarning, module='odoo')
    # ignore a bunch of warnings we can't really fix ourselves
    for module in [
        'babel.util', # deprecated parser module, no release yet
        'zeep.loader',# zeep using defusedxml.lxml
        'reportlab.lib.rl_safe_eval',# reportlab importing ABC from collections
        'ofxparse',# ofxparse importing ABC from collections
        'astroid',  # deprecated imp module (fixed in 2.5.1)
        'requests_toolbelt', # importing ABC from collections (fixed in 0.9)
    ]:
        warnings.filterwarnings('ignore', category=DeprecationWarning, module=module)

    # the SVG guesser thing always compares str and bytes, ignore it
    warnings.filterwarnings('ignore', category=BytesWarning, module='odoo.tools.image')
    # reportlab does a bunch of bytes/str mixing in a hashmap
    warnings.filterwarnings('ignore', category=BytesWarning, module='reportlab.platypus.paraparser')

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and (is_a_tty(handler.stream) or os.environ.get("ODOO_PY_COLORS")):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.strip().split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)
示例#39
0
    try:
        # logger.setLevel(level.upper())
        logger.setLevel(level)
    except ValueError:
        pass # let it default to warning

    # check for handlers, or we could get one logger spitting out
    # dozens of duplicate messages everytime it's called
    if not logger.hasHandlers():
        logger.addHandler(q_handler)

    return logger


old_factory = logging.getLogRecordFactory()
def recordFactory(name, level, fn, lno, msg, args, exc_info, func=None, sinfo=None, **kwargs):
    """
    intercept log record creation to clean up output a bit, as well as to correct the information the << overload was giving us (it was reporting module name and line numbers as coming from the skylog module, rather than from where the debug call originated)
    """
    _name=name.split(".")[-1]
    if func and not func=="__lshift":
        func = _name + "." + func
        return old_factory(name, level, fn, lno, msg, args, exc_info, func, sinfo, **kwargs)

    ## get info for actual calling function
    ## (value of 5 determined by trial and error)
    f = sys._getframe(5)
    # pathname = f.f_code.co_filename
    # lineno = f.f_lineno
    funcName=_name + "." + f.f_code.co_name
示例#40
0
    def __init__(self, config=None, **inline):
        # configure loggers first
        cls = self.__class__
        self.logger = get_logger("%s.%s" % (cls.__module__, cls.__name__))
        self.error_logger = get_logger("elasticapm.errors")

        self.tracer = None
        self.processors = []
        self.filter_exception_types_dict = {}
        self._service_info = None

        config = Config(config, inline_dict=inline)
        if config.errors:
            for msg in config.errors.values():
                self.error_logger.error(msg)
            config.disable_send = True
        self.config = VersionedConfig(config, version=None)

        # Insert the log_record_factory into the logging library
        # The LogRecordFactory functionality is only available on python 3.2+
        if compat.PY3 and not self.config.disable_log_record_factory:
            record_factory = logging.getLogRecordFactory()
            # Only way to know if it's wrapped is to create a log record
            throwaway_record = record_factory(__name__, logging.DEBUG, __file__, 252, "dummy_msg", [], None)
            if not hasattr(throwaway_record, "elasticapm_labels"):
                self.logger.debug("Inserting elasticapm log_record_factory into logging")

                # Late import due to circular imports
                import elasticapm.handlers.logging as elastic_logging

                new_factory = elastic_logging.log_record_factory(record_factory)
                logging.setLogRecordFactory(new_factory)

        headers = {
            "Content-Type": "application/x-ndjson",
            "Content-Encoding": "gzip",
            "User-Agent": "elasticapm-python/%s" % elasticapm.VERSION,
        }

        if self.config.secret_token:
            headers["Authorization"] = "Bearer %s" % self.config.secret_token
        transport_kwargs = {
            "metadata": self._build_metadata(),
            "headers": headers,
            "verify_server_cert": self.config.verify_server_cert,
            "server_cert": self.config.server_cert,
            "timeout": self.config.server_timeout,
            "max_flush_time": self.config.api_request_time / 1000.0,
            "max_buffer_size": self.config.api_request_size,
            "processors": self.load_processors(),
        }
        self._api_endpoint_url = compat.urlparse.urljoin(
            self.config.server_url if self.config.server_url.endswith("/") else self.config.server_url + "/",
            constants.EVENTS_API_PATH,
        )
        self._transport = import_string(self.config.transport_class)(self._api_endpoint_url, **transport_kwargs)

        for exc_to_filter in self.config.filter_exception_types or []:
            exc_to_filter_type = exc_to_filter.split(".")[-1]
            exc_to_filter_module = ".".join(exc_to_filter.split(".")[:-1])
            self.filter_exception_types_dict[exc_to_filter_type] = exc_to_filter_module

        if platform.python_implementation() == "PyPy":
            # PyPy introduces a `_functools.partial.__call__` frame due to our use
            # of `partial` in AbstractInstrumentedModule
            skip_modules = ("elasticapm.", "_functools")
        else:
            skip_modules = ("elasticapm.",)

        self.tracer = Tracer(
            frames_collector_func=lambda: list(
                stacks.iter_stack_frames(
                    start_frame=inspect.currentframe(), skip_top_modules=skip_modules, config=self.config
                )
            ),
            frames_processing_func=lambda frames: self._get_stack_info_for_trace(
                frames,
                library_frame_context_lines=self.config.source_lines_span_library_frames,
                in_app_frame_context_lines=self.config.source_lines_span_app_frames,
                with_locals=self.config.collect_local_variables in ("all", "transactions"),
                locals_processor_func=lambda local_var: varmap(
                    lambda k, v: shorten(
                        v,
                        list_length=self.config.local_var_list_max_length,
                        string_length=self.config.local_var_max_length,
                        dict_length=self.config.local_var_dict_max_length,
                    ),
                    local_var,
                ),
            ),
            queue_func=self.queue,
            config=self.config,
            agent=self,
        )
        self.include_paths_re = stacks.get_path_regex(self.config.include_paths) if self.config.include_paths else None
        self.exclude_paths_re = stacks.get_path_regex(self.config.exclude_paths) if self.config.exclude_paths else None
        self._metrics = MetricsRegistry(
            self.config.metrics_interval / 1000.0, self.queue, ignore_patterns=self.config.disable_metrics
        )
        for path in self.config.metrics_sets:
            self._metrics.register(path)
        if self.config.breakdown_metrics:
            self._metrics.register("elasticapm.metrics.sets.breakdown.BreakdownMetricSet")
        compat.atexit_register(self.close)
        if self.config.central_config:
            self._config_updater = IntervalTimer(
                update_config, 1, "eapm conf updater", daemon=True, args=(self,), evaluate_function_interval=True
            )
            self._config_updater.start()
        else:
            self._config_updater = None
示例#41
0
MDContext = new_log_context
MDC = new_log_context

LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.ERROR)


def patch(old_factory):
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)

        for key, value in get_mdc_fields().items():
            setattr(record, key, value)

        return record

    return record_factory


try:
    logging.setLogRecordFactory(patch(logging.getLogRecordFactory()))
except AttributeError:
    logging.LogRecord = patch(logging.LogRecord)


# legacy handler to avoid breaking existing implementations, this will be removed with 2.x
class MDCHandler(logging.StreamHandler):
    def __init__(self, *args, **kwargs):
        logging.StreamHandler.__init__(self, *args, **kwargs)
        self.setFormatter(jsonlogger.JsonFormatter())
示例#42
0
文件: _sync.py 项目: knobix/OCRmyPDF
log = logging.getLogger(__name__)


class PageResult(NamedTuple):  # pylint: disable=inherit-non-class
    pageno: int
    pdf_page_from_image: Optional[Path]
    ocr: Optional[Path]
    text: Optional[Path]
    orientation_correction: int


tls = threading.local()
tls.pageno = None


old_factory = logging.getLogRecordFactory()


def record_factory(*args, **kwargs):
    record = old_factory(*args, **kwargs)
    if hasattr(tls, 'pageno'):
        record.pageno = tls.pageno
    return record


logging.setLogRecordFactory(record_factory)


def preprocess(
    page_context: PageContext,
    image: Path,
示例#43
0
 def __init__(self):
   self.default_logrecord_factory = logging.getLogRecordFactory()
示例#44
0
文件: netsvc.py 项目: akretion/odoo
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record
    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if tools.config['logrotate'] is not False:
                if tools.config['workers'] and tools.config['workers'] > 1:
                    # TODO: fallback to regular file logging in master for safe(r) defaults?
                    #
                    # Doing so here would be a good idea but also might break
                    # situations were people do log-shipping of rotated data?
                    _logger.warn("WARNING: built-in log rotation is not reliable in multi-worker scenarios and may incur significant data loss. "
                                 "It is strongly recommended to use an external log rotation utility or use system loggers (--syslog) instead.")
                handler = logging.handlers.TimedRotatingFileHandler(filename=logf, when='D', interval=1, backupCount=30)
            elif os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)
示例#45
0
文件: netsvc.py 项目: Tecnativa/odoo
def init_logger():
    global _logger_init
    if _logger_init:
        return
    _logger_init = True

    old_factory = logging.getLogRecordFactory()
    def record_factory(*args, **kwargs):
        record = old_factory(*args, **kwargs)
        record.perf_info = ""
        return record
    logging.setLogRecordFactory(record_factory)

    logging.addLevelName(25, "INFO")
    logging.captureWarnings(True)

    from .tools.translate import resetlocale
    resetlocale()

    # create a format for log messages and dates
    format = '%(asctime)s %(pid)s %(levelname)s %(dbname)s %(name)s: %(message)s %(perf_info)s'
    # Normal Handler on stderr
    handler = logging.StreamHandler()

    if tools.config['syslog']:
        # SysLog Handler
        if os.name == 'nt':
            handler = logging.handlers.NTEventLogHandler("%s %s" % (release.description, release.version))
        elif platform.system() == 'Darwin':
            handler = logging.handlers.SysLogHandler('/var/run/log')
        else:
            handler = logging.handlers.SysLogHandler('/dev/log')
        format = '%s %s' % (release.description, release.version) \
                + ':%(dbname)s:%(levelname)s:%(name)s:%(message)s'

    elif tools.config['logfile']:
        # LogFile Handler
        logf = tools.config['logfile']
        try:
            # We check we have the right location for the log files
            dirname = os.path.dirname(logf)
            if dirname and not os.path.isdir(dirname):
                os.makedirs(dirname)
            if os.name == 'posix':
                handler = logging.handlers.WatchedFileHandler(logf)
            else:
                handler = logging.FileHandler(logf)
        except Exception:
            sys.stderr.write("ERROR: couldn't create the logfile directory. Logging to the standard output.\n")

    # Check that handler.stream has a fileno() method: when running OpenERP
    # behind Apache with mod_wsgi, handler.stream will have type mod_wsgi.Log,
    # which has no fileno() method. (mod_wsgi.Log is what is being bound to
    # sys.stderr when the logging.StreamHandler is being constructed above.)
    def is_a_tty(stream):
        return hasattr(stream, 'fileno') and os.isatty(stream.fileno())

    if os.name == 'posix' and isinstance(handler, logging.StreamHandler) and is_a_tty(handler.stream):
        formatter = ColoredFormatter(format)
        perf_filter = ColoredPerfFilter()
    else:
        formatter = DBFormatter(format)
        perf_filter = PerfFilter()
    handler.setFormatter(formatter)
    logging.getLogger().addHandler(handler)
    logging.getLogger('werkzeug').addFilter(perf_filter)

    if tools.config['log_db']:
        db_levels = {
            'debug': logging.DEBUG,
            'info': logging.INFO,
            'warning': logging.WARNING,
            'error': logging.ERROR,
            'critical': logging.CRITICAL,
        }
        postgresqlHandler = PostgreSQLHandler()
        postgresqlHandler.setLevel(int(db_levels.get(tools.config['log_db_level'], tools.config['log_db_level'])))
        logging.getLogger().addHandler(postgresqlHandler)

    # Configure loggers levels
    pseudo_config = PSEUDOCONFIG_MAPPER.get(tools.config['log_level'], [])

    logconfig = tools.config['log_handler']

    logging_configurations = DEFAULT_LOG_CONFIGURATION + pseudo_config + logconfig
    for logconfig_item in logging_configurations:
        loggername, level = logconfig_item.split(':')
        level = getattr(logging, level, logging.INFO)
        logger = logging.getLogger(loggername)
        logger.setLevel(level)

    for logconfig_item in logging_configurations:
        _logger.debug('logger level set: "%s"', logconfig_item)