def __init__(self,
                 stream_type="stdout",
                 filename=None,
                 loglevel=None,
                 assertion_log=False,
                 options=None):
        if loglevel is None:
            loglevel = logging.INFO

        self.shouldStop = False
        self.failFast = self.failfast = False
        self.pausable_runner = None

        logging_manager = logging.Manager(
            picklablelogger.RootLogger(logging.WARNING))
        logging_manager.setLoggerClass(picklablelogger.Logger)
        self.logger = logging_manager.getLogger("pausable_unittest")
        self.logger.setLevel(loglevel)
        self.logger.addHandler(
            picklablelogger.PicklableStreamHandler(stream_type))
        if filename is not False:
            self.logger.addHandler(
                picklablelogger.PicklableFileHandler(filename))

        self.assertion_log = assertion_log

        self._stream_type = stream_type
        self._results = []
        self._file = None
        self._running_test = None
        self._total_start_time = None
        self._options = options
Esempio n. 2
0
def test_logger_adapter():
    stream = io.StringIO()
    stream_handler = logging.StreamHandler(stream)
    stream_handler.setFormatter(
        ExtraAppendingFormatter(
            '%(asctime)s %(levelname)-8s %(processName)-5s %(threadName)-5s %(name)-12s %(message)s <%(extra_str)s>'
        ))

    root_logger = logging.RootLogger(logging.DEBUG)
    root_logger.addHandler(stream_handler)
    manager = logging.Manager(root_logger)

    logger = manager.getLogger('dummy')
    logger.info('Some message n=%d m=%s', 111, 'qqq1', extra={'www1': 'eee1'})

    logger2 = InjectExtraLoggerAdapter(logger, {'a': 'hello'})
    logger2.info('Some message n=%d m=%s', 111, 'qqq1', extra={'www1': 'eee1'})

    logger3 = InjectExtraLoggerAdapter(logger2, {'b': 'world'})
    logger3.info('Some message n=%d m=%s', 111, 'qqq1', extra={'www1': 'eee1'})

    log = stream.getvalue()
    assert 'INFO     MainProcess MainThread dummy        Some message n=111 m=qqq1 <www1=eee1>' in log
    assert 'INFO     MainProcess MainThread dummy        Some message n=111 m=qqq1 <www1=eee1 a=hello>' in log
    assert 'INFO     MainProcess MainThread dummy        Some message n=111 m=qqq1 <www1=eee1 b=world a=hello>' in log
Esempio n. 3
0
class Logger(logging.Logger):

    __author__ = "Vladya"

    root = logging.RootLogger(logging.CRITICAL)
    manager = logging.Manager(root)

    @classmethod
    def getLogger(cls, name):

        cls.manager.setLoggerClass(cls)
        _logger = cls.manager.getLogger(name)

        _formatter = logging.Formatter(
            "%(asctime)s %(levelname)s:%(name)s\n%(message)s\n")

        utils.create_dir_for_file(_paths._logfile)
        _handler = logging.FileHandler(_paths._logfile)
        _handler.setFormatter(_formatter)
        _logger.addHandler(_handler)

        if not utils.renpy:
            _handler = logging.StreamHandler()
            _handler.setFormatter(_formatter)
            _logger.addHandler(_handler)

        return _logger
Esempio n. 4
0
def setup_logging(logname, logdir, args, monkey = False):
    """Given an existing logdir (setup with setup_logdir above), sets up the
    logging streams for this process. 
    """
    log = logging.getLogger(logname)
    log.setLevel(logging.INFO)
    log_fmt = logging.Formatter(LOG_FRMT)
    if args.verbose:
        log.setLevel(logging.DEBUG)
    # always have a stream handler to stdout
    #
    if args.logfile is None:
        stream_hndlr = logging.StreamHandler(sys.stdout)
        stream_hndlr.setFormatter(log_fmt)
        log.addHandler(stream_hndlr)

    else:
        logfile = os.path.join(logdir, args.logfile)
        file_hndlr = logging.handlers.RotatingFileHandler(
            logfile, 'a', LOG_SIZE_MAX, LOG_COUNT_MAX)
        file_hndlr.setFormatter(log_fmt)
        log.addHandler(file_hndlr)

    if monkey:
        logging.root = log
        logging.Logger.root = logging.root 
        logging.manager = logging.Manager(logging.Logger.root)
    return log
Esempio n. 5
0
def _override_root_logger():
    """
    Override the root logger with a `StructuredRootLogger`.
    """

    logging.root = StructuredRootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)
Esempio n. 6
0
 def _cleanupAdmin(self):
     if getattr(self, 'asLogger', None):
         if hasattr(self, 'oldLoggerRoot'):
             logging.root = self.oldLoggerRoot
             logging.Logger.root = self.oldLoggerRoot
             logging.Logger.manager = logging.Manager(logging.Logger.root)
         self.transport.scheduleTransmit(None, TransmitIntent(self.asLogger,
                                                              LoggerExitRequest()))
         self.transport.run(TransmitOnly)
Esempio n. 7
0
def setup_stdlib_logging(level, fmt):
    logging.root = logging.RootLogger(level)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)
    stream = sys.stderr
    hdlr = logging.StreamHandler(stream)
    fmt = logging.Formatter(fmt, None)
    hdlr.setFormatter(fmt)
    logging.root.addHandler(hdlr)
Esempio n. 8
0
def logging_sandbox():
    # Monkeypatch a replacement root logger, so that our changes to logging
    # configuration don't persist outside of the test
    root_logger = logging.RootLogger(logging.WARNING)

    with mock.patch.object(logging, 'root', root_logger):
        with mock.patch.object(logging.Logger, 'root', root_logger):
            with mock.patch.object(logging.Logger, 'manager',
                                   logging.Manager(root_logger)):
                yield
Esempio n. 9
0
def patch_logging_for_tracking():
    import logging
    import six
    if six.PY2:
        log_fn = _log_py2
    else:
        log_fn = _log
    setattr(logging.RootLogger, "_log", log_fn)
    logging.root = logging.RootLogger(logging.WARNING)
    logging.Logger.manager = logging.Manager(logging.root)
    setattr(logging.Logger, "_log", log_fn)
Esempio n. 10
0
    def __init__(self, loglevel):
        # initialize a new manager with an instantiation of a custom Logger
        # instance
        class _IsolatedLogger(logging.Logger):
            pass
        self.logger = logging.RootLogger(loglevel)
        _IsolatedLogger.root = self.logger
        _IsolatedLogger.manager = logging.Manager(self.logger)

        self.handler = QueueForIteratorHandler()
        self.handler.setFormatter(JSONFormatter())
        self.handler.setLevel(loglevel)
        self.logger.addHandler(self.handler)
        self.logger.setLevel(loglevel)
Esempio n. 11
0
 def _cleanupAdmin(self):
     if getattr(self, 'asLogger', None):
         if hasattr(self, 'oldLoggerRoot'):
             logging.root = self.oldLoggerRoot
             logging.Logger.root = self.oldLoggerRoot
             logging.Logger.manager = logging.Manager(logging.Logger.root)
         self.transport.scheduleTransmit(
             None, TransmitIntent(self.asLogger, LoggerExitRequest()))
         self.transport.run(TransmitOnly)
         if getattr(self, 'asLogProc', None):
             if self._checkChildLiveness(self.asLogProc):
                 import time
                 time.sleep(0.02)  # wait a little to allow logger to exit
             self._checkChildLiveness(
                 self.asLogProc)  # cleanup defunct proc
Esempio n. 12
0
def startChild(childClass, endpoint, transportClass,
               sourceHash, sourceToLoad,
               parentAddr, adminAddr, notifyAddr, loggerAddr,
               childRequirements, currentSystemCapabilities,
               fileNumsToClose):

    closeFileNums(fileNumsToClose)

    # Dirty trick here to workaround multiprocessing trying to impose
    # an unnecessary restriction.  A process should be set daemonic
    # before start() if the parent shouldn't track it (an specifically
    # automatically join() the subprocess on exit).  For Actors, the
    # parent exists independently of the child and the ActorSystem
    # manages them, so daemonic processes are desired.  However,
    # multiprocessing imposes a restriction that daemonic processes
    # cannot create more processes.  The following reaches deep into
    # the implementation of the multiprocessing module to override
    # that restriction.  This process was already started as daemonic,
    # and it's detached from its parent.  The following simply clears
    # that flag locally so that other processes can be created from
    # this one.
    multiprocessing.process._current_process._daemonic = False

    transport = transportClass(endpoint)
    #closeUnusedFiles(transport)

    # Dirty trick here to completely re-initialize logging in this
    # process... something the standard Python logging interface does
    # not allow via the API.  We also do not want to run
    # logging.shutdown() because (a) that does not do enough to reset,
    # and (b) it shuts down handlers, but we want to leave the parent's
    # handlers alone.
    logging.root = ThespianLogForwarder(loggerAddr, transport)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)

    logger = logging.getLogger('Thespian.ActorManager')

    am = MultiProcManager(childClass, transport,
                          sourceHash, sourceToLoad,
                          parentAddr, adminAddr,
                          childRequirements, currentSystemCapabilities)
    am.asLogger = loggerAddr
    am.transport.scheduleTransmit(None,
                                  TransmitIntent(notifyAddr,
                                                 EndpointConnected(endpoint.addrInst)))
    setProcName(getattr(childClass, '__name__', str(childClass)), am.transport.myAddress)
    am.run()
Esempio n. 13
0
def enable_confidential_logging(prefix: str = "SystemLog:", **kwargs) -> None:
    """
    The default format is `logging.BASIC_FORMAT` (`%(levelname)s:%(name)s:%(message)s`).
    All other kwargs are passed to `logging.basicConfig`. Sets the default
    logger class and root logger to be confidential. This means the format
    string `%(prefix)` will work.

    Set the format using the `format` kwarg.

    If running in Python >= 3.8, will attempt to add `force=True` to the kwargs
    for logging.basicConfig.

    After calling this method, use the kwarg `category` to pass in a value of
    `DataCategory` to denote data category. The default is `PRIVATE`. That is,
    if no changes are made to an existing set of log statements, the log output
    should be the same.

    The standard implementation of the logging API is a good reference:
    https://github.com/python/cpython/blob/3.9/Lib/logging/__init__.py
    """
    set_prefix(prefix)

    if "format" not in kwargs:
        kwargs["format"] = f"%(prefix)s{logging.BASIC_FORMAT}"

    # Ensure that all loggers created via `logging.getLogger` are instances of
    # the `ConfidentialLogger` class.
    logging.setLoggerClass(ConfidentialLogger)

    if len(logging.root.handlers) > 0:
        p = get_prefix()
        for line in _logging_basic_config_set_warning.splitlines():
            print(f"{p}{line}", file=sys.stderr)

    if "force" not in kwargs and sys.version_info >= (3, 8):
        kwargs["force"] = True

    old_root = logging.root

    root = ConfidentialLogger(logging.root.name)
    root.handlers = old_root.handlers

    logging.root = root
    logging.Logger.root = root  # type: ignore
    logging.Logger.manager = logging.Manager(root)  # type: ignore

    # https://github.com/kivy/kivy/issues/6733
    logging.basicConfig(**kwargs)
Esempio n. 14
0
 def _cleanupAdmin(self):
     if getattr(self, 'asLogger', None):
         if hasattr(self, 'oldLoggerRoot'):
             logging.root = self.oldLoggerRoot
             logging.Logger.root = self.oldLoggerRoot
             logging.Logger.manager = logging.Manager(logging.Logger.root)
         self.transport.run(TransmitOnly, maximumDuration=timedelta(milliseconds=250))
         import time
         time.sleep(0.05)  # allow children to exit and log their exit
         self.transport.scheduleTransmit(None, TransmitIntent(self.asLogger,
                                                              LoggerExitRequest()))
         self.transport.run(TransmitOnly)
         if getattr(self, 'asLogProc', None):
             if self._checkChildLiveness(self.asLogProc):
                 import time
                 time.sleep(0.02)  # wait a little to allow logger to exit
             self._checkChildLiveness(self.asLogProc) # cleanup defunct proc
Esempio n. 15
0
def test_basic_output():
    stream = io.StringIO()
    formatter = logging.Formatter(
        "%(asctime)s %(levelname)-8s %(processName)-5s %(threadName)-5s %(name)-12s %(message)s"
    )
    stream_handler = logging.StreamHandler(stream)
    stream_handler.setFormatter(formatter)

    root_logger = logging.RootLogger(logging.DEBUG)
    root_logger.addHandler(stream_handler)
    manager = logging.Manager(root_logger)

    logger = manager.getLogger("the-logger-1")
    logger.info("I am the log message n=%d m=%s", 123, "qwerty")

    assert "INFO     MainProcess MainThread the-logger-1 I am the log message n=123 m=qwerty\n" in stream.getvalue(
    )
Esempio n. 16
0
def resetLogging():
    """ Reset the handlers and loggers so that we
    can rerun the tests starting from a blank slate.
    """
    __pragma__("skip")
    logging._handlerList = []

    import weakref
    logging._handlers = weakref.WeakValueDictionary()

    logging.root = logging.RootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.root)
    logging.root.manager = logging.Logger.manager
    __pragma__("noskip")

    if __envir__.executor_name == __envir__.transpiler_name:
        logging._resetLogging()
Esempio n. 17
0
    def h_LoggerConnected(self, envelope):
        self.asLogger = envelope.sender
        # Dirty trick here to completely re-initialize logging in this
        # process... something the standard Python logging interface does
        # not allow via the API.
        self.oldLoggerRoot = logging.root
        logging.root = ThespianLogForwarder(self.asLogger, self.transport)
        logging.Logger.root = logging.root
        logging.Logger.manager = logging.Manager(logging.Logger.root)
        # logging.getLogger('Thespian.Admin') \
        #        .info('ActorSystem Administrator startup @ %s', self.myAddress)

        # Now that logging is started, Admin startup can be confirmed
        self.transport.scheduleTransmit(
            None, TransmitIntent(self.addrOfStarter, EndpointConnected(0)))

        self._activate()
        return True
Esempio n. 18
0
    def setup(self):
        """Configure the :class:`LoggingMixin`.

        This method adds the :option:`-l`, :option:`q`,
        :option:`-s` and :option:`-v` parameters to the
        application and instantiates the :attr:`log` attribute.
        """
        # Add logging-related options.
        self.add_param("-l",
                       "--logfile",
                       default=self.logfile,
                       help="log to file (default: log to stdout)")
        self.add_param("-q",
                       "--quiet",
                       default=0,
                       help="decrease the verbosity",
                       action="count")
        self.add_param("-s",
                       "--silent",
                       default=False,
                       help="only log warnings",
                       action="store_true")
        self.add_param("-v",
                       "--verbose",
                       default=0,
                       help="raise the verbosity",
                       action="count")

        # Create logger.
        logging.setLoggerClass(CommandLineLogger)
        self.log = logging.getLogger(self.name)
        self.formatter = Formatter(fmt=self.message_format,
                                   datefmt=self.date_format)

        self.log.level = self.log.default_level

        # If requested, make our logger the root.
        if self.root:
            logging.root = self.log
            logging.Logger.root = self.log
            logging.Logger.manager = logging.Manager(self.log)
Esempio n. 19
0
def test_my_json_output():
    stream = io.StringIO()
    formatter = MyJsonFormatter()
    stream_handler = logging.StreamHandler(stream)
    stream_handler.setFormatter(formatter)

    root_logger = logging.RootLogger(logging.DEBUG)
    root_logger.addHandler(stream_handler)
    manager = logging.Manager(root_logger)

    logger = manager.getLogger("the-logger-1")
    logger.info("I am the log message n=%d m=%s",
                123,
                "qwerty",
                extra={'someExtra': 'hi there!'})

    obj = json.loads(stream.getvalue())
    assert obj['timestamp'] != ''
    assert obj['name'] == 'the-logger-1'
    assert obj['level'] == 'INFO'
    assert obj['message'] == 'I am the log message n=123 m=qwerty'
    assert obj['someExtra'] == 'hi there!'
Esempio n. 20
0
def startChild(childClass, globalName, endpoint, transportClass, sourceHash,
               sourceToLoad, parentAddr, adminAddr, notifyAddr, loggerAddr,
               childRequirements, currentSystemCapabilities, fileNumsToClose,
               concurrency_context):

    closeFileNums(fileNumsToClose)

    # Dirty trick here to workaround multiprocessing trying to impose
    # an unnecessary restriction.  A process should be set daemonic
    # before start() if the parent shouldn't track it (an specifically
    # automatically join() the subprocess on exit).  For Actors, the
    # parent exists independently of the child and the ActorSystem
    # manages them, so daemonic processes are desired.  However,
    # multiprocessing imposes a restriction that daemonic processes
    # cannot create more processes.  The following reaches deep into
    # the implementation of the multiprocessing module to override
    # that restriction.  This process was already started as daemonic,
    # and it's detached from its parent.  The following simply clears
    # that flag locally so that other processes can be created from
    # this one.
    multiprocessing.process._current_process._daemonic = False

    transport = transportClass(endpoint)
    #closeUnusedFiles(transport)

    # Dirty trick here to completely re-initialize logging in this
    # process... something the standard Python logging interface does
    # not allow via the API.  We also do not want to run
    # logging.shutdown() because (a) that does not do enough to reset,
    # and (b) it shuts down handlers, but we want to leave the parent's
    # handlers alone.
    logging.root = ThespianLogForwarder(loggerAddr, transport)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)

    logger = logging.getLogger('Thespian.ActorManager')

    am = MultiProcManager(childClass, globalName, transport, sourceHash,
                          sourceToLoad, parentAddr, adminAddr,
                          childRequirements, currentSystemCapabilities,
                          concurrency_context)
    am.asLogger = loggerAddr
    am.transport.scheduleTransmit(
        None,
        TransmitIntent(notifyAddr, EndpointConnected(
            endpoint.addrInst)).addCallback(onFailure=am.actor_send_fail))
    setProcName(
        getattr(childClass, '__name__', str(childClass)).rpartition('.')[-1],
        am.transport.myAddress)

    sighandler = signal_detector(
        getattr(childClass, '__name__', str(childClass)),
        am.transport.myAddress, am)
    sigexithandler = shutdown_signal_detector(
        getattr(childClass, '__name__', str(childClass)),
        am.transport.myAddress, am)

    for each in range(1, signal.NSIG):
        # n.b. normally Python intercepts SIGINT to turn it into a
        # KeyboardInterrupt exception.  However, these Actors should
        # be detached from the keyboard, so revert to normal SIGINT
        # behavior.
        if each not in uncatchable_signals:
            if each in child_exit_signals:
                set_signal_handler(each, am.signalChildDied)
                continue
            try:
                set_signal_handler(
                    each,
                    sigexithandler if each in exit_signals else sighandler)
            except (RuntimeError, ValueError, EnvironmentError) as ex:
                # OK, this signal can't be caught for this
                # environment.  We did our best.
                pass

    am.run()
Esempio n. 21
0
 def _reset_logging(self):
     if hasattr(self, 'oldLoggerRoot'):
         logging.root = self.oldLoggerRoot
         logging.Logger.root = self.oldLoggerRoot
         logging.Logger.manager = logging.Manager(logging.Logger.root)
         delattr(self, 'oldLoggerRoot')
Esempio n. 22
0
def startupASLogger(addrOfStarter, logEndpoint, logDefs, transportClass,
                    aggregatorAddress):
    # Dirty trick here to completely re-initialize logging in this
    # process... something the standard Python logging interface does
    # not allow via the API.  We also do not want to run
    # logging.shutdown() because (a) that does not do enough to reset,
    # and (b) it shuts down handlers, but we want to leave the
    # parent's handlers alone.  Dirty trick here to completely
    # re-initialize logging in this process... something the standard
    # Python logging interface does not allow via the API.
    logging.root = logging.RootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)
    if logDefs:
        dictConfig(logDefs)
    else:
        logging.basicConfig()
    # Disable thesplog from within the logging process (by setting the
    # logfile size to zero) to try to avoid recursive logging loops.
    thesplog_control(logging.WARNING, False, 0)
    #logging.info('ActorSystem Logging Initialized')
    transport = transportClass(logEndpoint)
    setProcName('logger', transport.myAddress)
    transport.scheduleTransmit(
        None, TransmitIntent(addrOfStarter, LoggerConnected()))
    fdup = None
    last_exception_time = None
    exception_count = 0
    while True:
        try:
            r = transport.run(None)
            if isinstance(r, Thespian__UpdateWork):
                transport.scheduleTransmit(
                    TransmitIntent(transport.myAddress, r))
                continue
            logrecord = r.message
            if isinstance(logrecord, LoggerExitRequest):
                logging.info('ActorSystem Logging Shutdown')
                return
            elif isinstance(logrecord, LoggerFileDup):
                fdup = getattr(logrecord, 'fname', None)
            elif isinstance(logrecord, LogAggregator):
                aggregatorAddress = logrecord.aggregatorAddress
            elif isinstance(logrecord, logging.LogRecord):
                logging.getLogger(logrecord.name).handle(logrecord)
                if fdup:
                    with open(fdup, 'a') as ldf:
                        ldf.write('%s\n' % str(logrecord))
                if aggregatorAddress and \
                   logrecord.levelno >= logging.WARNING:
                    transport.scheduleTransmit(
                        None, TransmitIntent(aggregatorAddress, logrecord))
            else:
                logging.warn('Unknown message rcvd by logger: %s' %
                             str(logrecord))
        except Exception as ex:
            thesplog('Thespian Logger aborting (#%d) with error %s',
                     exception_count,
                     ex,
                     exc_info=True)
            if last_exception_time is None or \
               last_exception_time.view().expired():
                last_exception_time = ExpirationTimer(timedelta(seconds=1))
                exception_count = 0
            else:
                exception_count += 1
                if exception_count >= MAX_LOGGING_EXCEPTIONS_PER_SECOND:
                    thesplog(
                        'Too many Thespian Logger exceptions (#%d in %s); exiting!',
                        exception_count,
                        timedelta(seconds=1) -
                        last_exception_time.view().remaining())
                    return
Esempio n. 23
0
def get_logger(name: str = None):
    manager = logging.Manager(logging.getLogger())
    manager.setLoggerClass(Logger)
    return manager.getLogger(name)
        s = sio.getvalue()
        sio.close()

        return s


class ProSuiteRootLogger(logging.Logger):
    """
    """
    def getChild(self, suffix):
        return self.manager.getLogger(suffix)


# Create unique root logger
manager = logging.Manager(None)
manager.setLoggerClass(ProSuiteRootLogger)

root_logger = manager.getLogger("System")
root_logger.setLevel(settings.logging["level"])
root_logger.manager = manager

manager.root = weakref.proxy(root_logger)
manager.setLoggerClass(logging.Logger)

# create rotating logs handler
logs_path = application.storage.abs_path(settings.logging["file"])
logs_dir = os.path.dirname(logs_path)

try:
    os.stat(logs_dir)
Esempio n. 25
0
    # Production logging setup
    url = os.environ.get('SEQ_URL')
    key = os.environ.get('SEQ_BOT_KEY')

    if not key:
        raise Exception('SEQ_BOT_KEY not found but SEQ_URL was specified')

    seqlog.log_to_seq(
        # Initialize the seq logging url before the secrets are loaded
        # this is ok because seq logging only happens in prod
        server_url=url,
        api_key=key,
        level=logging.INFO,
        batch_size=5,
        auto_flush_timeout=10,  # seconds
        override_root_logger=False,
    )
else:
    # Development logging setup
    logging.setLoggerClass(StructuredLogger)

    logging.root = StructuredRootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)

    logging.basicConfig(
        format='%(asctime)s %(levelname)s %(message)s',
        handlers=[ConsoleStructuredLogHandler()],
        level=logging.INFO,
    )
Esempio n. 26
0
def _reset_logging():
    logging.root = logging.RootLogger(logging.WARNING)
    logging.Logger.root = logging.root
    logging.Logger.manager = logging.Manager(logging.Logger.root)
Esempio n. 27
0
 class MockLogger(logging.Logger):
     root = logging.RootLogger(logging.WARNING)
     manager = logging.Manager(root)
Esempio n. 28
0
            raise ValueError('msg is not allowed in kwargs')
        _args = kwargs.pop('_args', [])
        _kwargs = kwargs.pop('_kwargs', {})
        if msg is not None:
            kwargs['msg'] = msg
        msg = format_msg(kwargs)
        return msg, _args, _kwargs

    def addHandler(self, hdlr):
        if hasattr(self, 'formaater') and not isinstance(hdlr.formatter, JsonFormatter):
            raise ValueError('Please use JsonFormatter!')
        super().addHandler(hdlr)


root = JsonLogger(logging.INFO)
json_manager = logging.Manager(root)
json_manager.loggerClass = JsonLogger

JsonLogger.manager = json_manager
JsonLogger.root = root


def get_json_logger(name=None):
    if name:
        return JsonLogger.manager.getLogger(name)
    else:
        return root


def format_msg(msg):
    """
Esempio n. 29
0
 def __init__(self, level):
     super().__init__(level)
     self.propagate = False
     _RootLogger.manager = logging.Manager(self)
Esempio n. 30
0
def reset_logging_manager():
    _m = logging.Logger.manager
    logging.Logger.manager = logging.Manager(logging.root)
    yield
    _m.loggerClass = None
    logging.Logger.manager = _m