コード例 #1
0
ファイル: __init__.py プロジェクト: dockeroo/dockeroo
 def save_logger(self, logger):
     logging._acquireLock() # pylint: disable=protected-access
     self._save_logging['{}_handlers'.format(
         logger.name)] = list(logger.handlers)
     logging._releaseLock() # pylint: disable=protected-access
     self._save_logging['{}_propagate'.format(
         logger.name)] = logger.propagate
コード例 #2
0
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
    """
    Read the logging configuration from a ConfigParser-format file.

    This can be called several times from an application, allowing an end user
    the ability to select from various pre-canned configurations (if the
    developer provides a mechanism to present the choices and load the chosen
    configuration).
    """
    import configparser

    cp = configparser.ConfigParser(defaults)
    if hasattr(fname, 'readline'):
        cp.read_file(fname)
    else:
        cp.read(fname)

    formatters = _create_formatters(cp)

    # critical section
    logging._acquireLock()
    try:
        logging._handlers.clear()
        del logging._handlerList[:]
        # Handlers add themselves to logging._handlers
        handlers = _install_handlers(cp, formatters)
        _install_loggers(cp, handlers, disable_existing_loggers)
    finally:
        logging._releaseLock()
コード例 #3
0
    def getLogger(self, name, default_log=None):
        """
        This getLogger method allows the application to pass in a name,
        a default log, and a logging level. These values are passed to
        the InstallLogger to set up a custom default log file.

        The placeholder code is an adjunct to the python logging module.
        It is used to manage the logging hierarchy. Because this getLogger
        method interfaces with the logging hierarchy, it is necessary to
        comply with that structure.
        """
        logger_name = None
        logging._acquireLock()
        try:
            if name in logging.Logger.manager.loggerDict:
                logger_name = logging.Logger.manager.loggerDict[name]
                if isinstance(logger_name, logging.PlaceHolder):
                    placeholder_for_fixup = logger_name
                    logger_name = \
                        logging._loggerClass(name, default_log)
                    logger_name.manager = self
                    logging.Logger.manager.loggerDict[name] = logger_name
                    self._fixupChildren(placeholder_for_fixup, logger_name)
                    self._fixupParents(logger_name)
            else:
                logger_name = logging._loggerClass(name, default_log)
                logger_name.manager = self
                logging.Logger.manager.loggerDict[name] = logger_name
                self._fixupParents(logger_name)
        finally:
            logging._releaseLock()
        return logger_name
コード例 #4
0
ファイル: initialize.py プロジェクト: adamsxu/commons
def init(filebase):
  """
    Set up default logging using:
      {--log_dir}/filebase.{INFO,WARNING,...}
  """
  logging._acquireLock()

  # set up permissive logger
  root_logger = logging.getLogger()
  root_logger.setLevel(logging.DEBUG)

  # clear existing handlers
  teardown_stderr_logging()
  teardown_disk_logging()

  # setup INFO...FATAL handlers
  num_disk_handlers = 0
  for handler in _setup_disk_logging(filebase):
    root_logger.addHandler(handler)
    _DISK_LOGGERS.append(handler)
  for handler in _setup_stderr_logging():
    root_logger.addHandler(handler)
    _STDERR_LOGGERS.append(handler)

  logging._releaseLock()

  if len(_DISK_LOGGERS) > 0 and LogOptions.stderr_log_level() != LogOptions.LOG_LEVEL_NONE:
    print('Writing log files to disk in %s' % LogOptions.log_dir(), file=sys.stderr)

  return root_logger
コード例 #5
0
def _check_logger_class():
    """
    Make sure process name is recorded when loggers are used
    """
    # XXX This function is unnecessary once logging is patched
    import logging

    if hasattr(logging, "multiprocessing"):
        return

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, "_process_aware", False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
コード例 #6
0
ファイル: preforking.py プロジェクト: msolo-dropbox/msolo
  def post_fork_reinit(self):
    """Run anything that reinitializes the python process.

    This is a catch-all for patching modules that assume they
    are running in a single process environment and get confused
    by forking."""

    # quickly register our own signals
    self.install_child_signals()

    # remove any exit handlers - anything registered at this point is not
    # relevant. register a wiseguy-specific exit handler instead
    del atexit._exithandlers[:]
    
    # you don't want all your workers to have the same pseudo random order
    # go ahead and reseed it.
    random.seed()

    # the logging module is fun too - it has locks that might be held by a
    # thread in the parent process. to prevent intermittent deadlock, you need
    # to reset the locks. this just feels dirty.
    logging._lock = None
    logging._acquireLock()
    for handler in logging._handlers:
      # this will overwrite the individual locks on each handler
      handler.createLock()
    logging._releaseLock()
コード例 #7
0
ファイル: logger.py プロジェクト: wuyue92tree/crwy
def fileConfigWithLogPath(fname=BASE_LOGGER_CONF,
                          log_path=None,
                          defaults=None,
                          disable_existing_loggers=True):
    """
    通过拦截重写handler的方式传入log_path,实现日志位置修改
    """
    if not log_path:
        raise CrwyException('Please setup <log_path> first!')

    cp = configparser.ConfigParser(defaults)
    if hasattr(fname, 'readline'):
        cp.read_file(fname)
    else:
        cp.read(fname)
    try:
        formatters = logging.config._create_formatters(cp)
    except configparser.NoSectionError:
        raise CrwyException('Please make sure fname: "%s" is exist.' % fname)

    logging._acquireLock()
    try:
        logging._handlers.clear()
        del logging._handlerList[:]
        # Handlers add themselves to logging._handlers
        handlers = _install_handlers_custom(cp, formatters, log_path)
        logging.config._install_loggers(cp, handlers, disable_existing_loggers)
    finally:
        logging._releaseLock()
コード例 #8
0
ファイル: util.py プロジェクト: IronLanguages/ironpython2
def get_logger():
    '''
    Returns logger used by multiprocessing
    '''
    global _logger
    import logging, atexit

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0
            logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
            logging.addLevelName(SUBWARNING, 'SUBWARNING')

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, 'unregister'):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger
コード例 #9
0
ファイル: logger.py プロジェクト: anizami/astropy
def _teardown_log():
    """Shut down exception and warning logging (if enabled) and clear all
    Astropy loggers from the logging module's cache.

    This involves poking some logging module internals, so much if it is 'at
    your own risk' and is allowed to pass silently if any exceptions occur.
    """

    global log

    if log.exception_logging_enabled():
        log.disable_exception_logging()

    if log.warnings_logging_enabled():
        log.disable_warnings_logging()

    del log

    # Now for the fun stuff...
    try:
        logging._acquireLock()
        try:
            loggerDict = logging.Logger.manager.loggerDict
            for key in loggerDict.keys():
                if key == 'astropy' or key.startswith('astropy.'):
                    del loggerDict[key]
        finally:
            logging._releaseLock()
    except:
        pass
コード例 #10
0
def fileConfig(fname, defaults=None, disable_existing_loggers=1):
    """
    Read the logging configuration from a ConfigParser-format file.

    This can be called several times from an application, allowing an end user
    the ability to select from various pre-canned configurations (if the
    developer provides a mechanism to present the choices and load the chosen
    configuration).
    In versions of ConfigParser which have the readfp method [typically
    shipped in 2.x versions of Python], you can pass in a file-like object
    rather than a filename, in which case the file-like object will be read
    using readfp.
    """
    import configparser

    cp = configparser.ConfigParser(defaults)
    if hasattr(cp, 'readfp') and hasattr(fname, 'readline'):
        cp.readfp(fname)
    else:
        cp.read(fname)

    formatters = _create_formatters(cp)

    # critical section
    logging._acquireLock()
    try:
        logging._handlers.clear()
        del logging._handlerList[:]
        # Handlers add themselves to logging._handlers
        handlers = _install_handlers(cp, formatters)
        _install_loggers(cp, handlers, disable_existing_loggers)
    finally:
        logging._releaseLock()
コード例 #11
0
 def serve(rcvr, hdlr, port):
     server = rcvr(port=port, handler=hdlr)
     global _listener
     logging._acquireLock()
     _listener = server
     logging._releaseLock()
     server.serve_until_stopped()
コード例 #12
0
ファイル: IRCLogging.py プロジェクト: DarkMio/Renol-IRC
 def __prependHandler__(self, logger,  hdlr):
     logging._acquireLock()
     try:
         if not (hdlr in logger.handlers):
             logger.handlers.insert(0, hdlr)
     finally:
         logging._releaseLock()
コード例 #13
0
ファイル: test_log.py プロジェクト: JIVS/qutebrowser
 def tearDown(self):
     """Restore the original logging configuration."""
     while self.root_logger.handlers:
         h = self.root_logger.handlers[0]
         self.root_logger.removeHandler(h)
         h.close()
     self.root_logger.setLevel(self.original_logging_level)
     for h in self.root_handlers:
         self.root_logger.addHandler(h)
     logging._acquireLock()
     try:
         logging._levelToName.clear()
         logging._levelToName.update(self.saved_level_to_name)
         logging._nameToLevel.clear()
         logging._nameToLevel.update(self.saved_name_to_level)
         logging._handlers.clear()
         logging._handlers.update(self.saved_handlers)
         logging._handlerList[:] = self.saved_handler_list
         logger_dict = logging.getLogger().manager.loggerDict
         logger_dict.clear()
         logger_dict.update(self.saved_loggers)
         logger_states = self.logger_states
         for name in self.logger_states:
             if logger_states[name] is not None:
                 self.saved_loggers[name].disabled = logger_states[name]
     finally:
         logging._releaseLock()
コード例 #14
0
def test5():
    loggerDict = logging.getLogger().manager.loggerDict
    logging._acquireLock()
    try:
        saved_handlers = logging._handlers.copy()
        saved_handler_list = logging._handlerList[:]
        saved_loggers = loggerDict.copy()
    finally:
        logging._releaseLock()
    try:
        fn = tempfile.mktemp(".ini")
        f = open(fn, "w")
        f.write(test5_config)
        f.close()
        logging.config.fileConfig(fn)
        try:
            raise KeyError
        except KeyError:
            logging.exception("just testing")
        os.remove(fn)
        hdlr = logging.getLogger().handlers[0]
        logging.getLogger().handlers.remove(hdlr)
    finally:
        logging._acquireLock()
        try:
            logging._handlers.clear()
            logging._handlers.update(saved_handlers)
            logging._handlerList[:] = saved_handler_list
            loggerDict = logging.getLogger().manager.loggerDict
            loggerDict.clear()
            loggerDict.update(saved_loggers)
        finally:
            logging._releaseLock()
コード例 #15
0
ファイル: agent.py プロジェクト: uzzz/cleverdb-agent
def setup_logging(cp, syslog_level, facility):
    from logging import config as logging_config

    if cp.has_section("logging"):
        # This branch is bit modified version of logging.config.fileConfig
        # critical section
        logging._acquireLock()
        try:
            logging._handlers.clear()
            del logging._handlerList[:]
            # Handlers add themselves to logging._handlers
            disable_existing_loggers = True
            formatters = logging_config._create_formatters(cp)
            handlers = logging_config._install_handlers(cp, formatters)
            logging_config = _install_loggers(cp, handlers,
                                              disable_existing_loggers)
        finally:
            logging._releaseLock()
    else:
        try:
            syslog = logging.handlers.SysLogHandler(address='/dev/log',
                                                    facility=facility)
            syslog.setLevel(syslog_level)
            logger.addHandler(syslog)
        except socket.error, e:
            pass
コード例 #16
0
def get_logger():
    """
    Returns logger used by multiprocessing
    """
    global _logger
    import logging

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, "unregister"):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger
コード例 #17
0
def fix_logging_module():
    logging = sys.modules.get("logging")
    # Prevent fixing multiple times as that would cause a deadlock.
    if logging and getattr(logging, "fixed_for_atfork", None):
        return
    if logging:
        warnings.warn("logging module already imported before fixup.")
    import logging

    if logging.getLogger().handlers:
        # We could register each lock with atfork for these handlers but if
        # these exist, other loggers or not yet added handlers could as well.
        # Its safer to insist that this fix is applied before logging has been
        # configured.
        warnings.warn("logging handlers already registered.")
    # raise Error('logging handlers already registered.')

    logging._acquireLock()
    try:

        def fork_safe_createLock(self):
            self._orig_createLock()
            atfork(self.lock.acquire, self.lock.release, self.lock.release)

        # Fix the logging.Handler lock (a major source of deadlocks).
        logging.Handler._orig_createLock = logging.Handler.createLock
        logging.Handler.createLock = fork_safe_createLock

        # Fix the module level lock.
        atfork(logging._acquireLock, logging._releaseLock, logging._releaseLock)

        logging.fixed_for_atfork = True
    finally:
        logging._releaseLock()
コード例 #18
0
 def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
              handler=None):
     ThreadingTCPServer.__init__(self, (host, port), handler)
     logging._acquireLock()
     self.abort = 0
     logging._releaseLock()
     self.timeout = 1
コード例 #19
0
ファイル: log.py プロジェクト: Aliced3645/celery
def ensure_process_aware_logger(force=False):
    """Make sure process name is recorded when loggers are used."""
    global _process_aware
    if force or not _process_aware:
        logging._acquireLock()
        try:
            _process_aware = True
            Logger = logging.getLoggerClass()
            if getattr(Logger, '_process_aware', False):  # pragma: no cover
                return

            class ProcessAwareLogger(Logger):
                _signal_safe = True
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = Logger.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

                def log(self, *args, **kwargs):
                    if _in_sighandler:
                        return
                    return Logger.log(self, *args, **kwargs)
            logging.setLoggerClass(ProcessAwareLogger)
        finally:
            logging._releaseLock()
コード例 #20
0
ファイル: bash.py プロジェクト: FedericoRessi/devstack-ws
    def use_logging(self):
        logging._acquireLock()
        try:
            root = logging.root
            if len(root.handlers) == 0:
                root.setLevel(logging.DEBUG)
                logging.getLogger('sh').setLevel(logging.WARNING)

                formatter = logging.Formatter(self.log_format)

                stream_handler = logging.StreamHandler()
                stream_handler.setLevel(self.log_level)
                stream_handler.setFormatter(formatter)
                LOG.addHandler(stream_handler)

                log_path = self.new_log_path()
                while path.isfile(log_path):
                    time.sleep(.1)
                    log_path = self.new_log_path()

                if path.isdir(path.dirname(log_path)):
                    file_handler = logging.FileHandler(log_path + '.ansi', 'wt')
                    file_handler.setLevel(logging.DEBUG)
                    file_handler.setFormatter(formatter)
                    root.addHandler(file_handler)

                    html_handler = HtmlFileHandler(log_path + '.html', 'wt')
                    html_handler.setLevel(logging.DEBUG)
                    html_handler.setFormatter(formatter)
                    root.addHandler(html_handler)

                    txt_handler = TxtFileHandler(log_path + '.txt', 'wt')
                    txt_handler.setLevel(logging.DEBUG)
                    txt_handler.setFormatter(formatter)
                    root.addHandler(txt_handler)

                else:
                    log_path = file_handler = html_handler = txt_handler = None
                self.log_path = log_path
        finally:
            self.output_logger = logging.getLogger('out')
            self.error_logger = logging.getLogger('err')
            logging._releaseLock()

        try:
            yield log_path

        finally:
            if file_handler:
                file_handler.close()
            if html_handler:
                html_handler.close()
            if txt_handler:
                html_handler.close()

            if log_path:
                for file_name in glob.glob(log_path + '.*'):
                    new_file_name = file_name.replace(
                        '_RUNNING.', '_' + self.status + '.')
                    os.rename(file_name, new_file_name)
コード例 #21
0
ファイル: patch.py プロジェクト: Aaron1011/oh-mainline
def _patch_logger_class():
    """Make sure process name is recorded when loggers are used."""

    try:
        from multiprocessing.process import current_process
    except ImportError:
        current_process = None  # noqa

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, '_process_aware', False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    if current_process:
                        record.processName = current_process()._name
                    else:
                        record.processName = ""
                    return record
            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
コード例 #22
0
    def setUp(self):
        """Setup the default logging stream to an internal StringIO instance,
        so that we can examine log output as we want."""
        logger_dict = logging.getLogger().manager.loggerDict
        logging._acquireLock()
        try:
            self.saved_handlers = logging._handlers.copy()
            self.saved_handler_list = logging._handlerList[:]
            self.saved_loggers = logger_dict.copy()
            self.saved_level_names = logging._levelNames.copy()
        finally:
            logging._releaseLock()

        # Set two unused loggers: one non-ASCII and one Unicode.
        # This is to test correct operation when sorting existing
        # loggers in the configuration code. See issues 8201, 9310.
        logging.getLogger("\xab\xd7\xbb")
        logging.getLogger(u"\u013f\u00d6\u0047")

        self.root_logger = logging.getLogger("")
        self.original_logging_level = self.root_logger.getEffectiveLevel()

        self.stream = cStringIO.StringIO()
        self.root_logger.setLevel(logging.DEBUG)
        self.root_hdlr = logging.StreamHandler(self.stream)
        self.root_formatter = logging.Formatter(self.log_format)
        self.root_hdlr.setFormatter(self.root_formatter)
        self.root_logger.addHandler(self.root_hdlr)
コード例 #23
0
ファイル: __init__.py プロジェクト: dockeroo/dockeroo
 def restore_logger(self, logger):
     logging._acquireLock() # pylint: disable=protected-access
     logger.handlers = self._save_logging.pop(
         '{}_handlers'.format(logger.name))
     logging._releaseLock() # pylint: disable=protected-access
     logger.propagate = self._save_logging.pop(
         '{}_propagate'.format(logger.name))
コード例 #24
0
 def __exit__(self, etype, evalue, etraceback):
     if hasattr(logging, '_acquireLock'):
         logging._acquireLock()
     try:
         self._mock_logger.handlers = self._orig_handlers
     finally:
         if hasattr(logging, '_acquireLock'):
             logging._releaseLock()
コード例 #25
0
ファイル: __init__.py プロジェクト: dockeroo/dockeroo
 def setup_logging(self):
     self._save_logging = {}
     for logger in [logging.getLogger(x) for x in self.loggers]:
         self.save_logger(logger)
         logging._acquireLock() # pylint: disable=protected-access
         logger.handlers = [self.log_handler]
         logging._releaseLock() # pylint: disable=protected-access
         logger.propagate = False
コード例 #26
0
ファイル: log.py プロジェクト: ArtRichards/Limnoria
 def disable(self):
     self.setLevel(sys.maxsize) # Just in case.
     _logger.removeHandler(self)
     logging._acquireLock()
     try:
         del logging._handlers[self]
     finally:
         logging._releaseLock()
コード例 #27
0
    def _addLevelName(level, levelName):
        '''
        Добавляет запись о новом уровне детализации лог-файла в структуры модуля logging.
        Необходимо при преобразовании численных констант, соответствующих уровням детализации, в текст при выводе лога.
        Также добавляет функцию в модуль logging для создания сообщения, аналогично встроенным(debug(),error() и т.д.).
        Подробнее см. http://mail.python.org/pipermail/tutor/2007-August/056247.html

        @param level		Численная константа для нового уровня детализации (внутреннее представление модуля logging) [int]
        @param levelName	Имя нового уровня детализации (используется при выводе) [string]
        '''

        # Установка блокировки для модуля logging
        logging._acquireLock()

        try:  # unlikely to cause an exception, but you never know...

            logging._levelNames[level] = levelName
            logging._levelNames[levelName] = level

            lowerName = levelName.lower()

            def Logger_func(self, msg, *args, **kwargs):
                '''
                Функция-logger регистрируемого уровня детализации аналогично debug(), error() и т.д. для добавления в класс Logger

                @param self         Ссылка на экземпляр класса
                @param msg          Текст сообщения
                @param *args        Неименованные прочие аргументы функции [tuple]
                @param *kwargs      Именованные прочие аргументы функции [tuple]
                '''

                if self.manager.disable >= level:
                    return
                if level >= self.getEffectiveLevel():
                    self._log(level, msg, args, **kwargs)

            # Добавление функции в класс logging.Logger
            setattr(logging.Logger, lowerName, Logger_func)

            # define a new root level logging function
            # this is like existing info, critical, debug...etc
            def root_func(msg, *args, **kwargs):
                '''
                Глобальная функция-logger регистрируемого уровня детализации аналогично debug(), error() для модуля logging

                @param msg					Текст сообщения
                @param *args				Неименованные прочие аргументы функции [tuple]
                @param *kwargs				Именованные прочие аргументы функции [tuple]
                '''

                if len(logging.root.handlers) == 0:
                    logging.basicConfig()
                Logger_func(logging.root, (msg,) + args, kwargs)

            # Добавление функции в модуль logging
            setattr(logging, lowerName, root_func)
        finally:
            logging._releaseLock()  # снятие блокировки для модуля logging
コード例 #28
0
ファイル: config.py プロジェクト: connoryang/dec-eve-serenity
def stopListening():
    global _listener
    logging._acquireLock()
    try:
        if _listener:
            _listener.abort = 1
            _listener = None
    finally:
        logging._releaseLock()
コード例 #29
0
ファイル: logging.py プロジェクト: fordhurley/python-colorlog
def basicConfig():
    """This calls basicConfig() and then overrides the formatter it creates"""
    logging.basicConfig()
    logging._acquireLock()
    try:
        stream = logging.root.handlers[0]
        stream.setFormatter(ColoredFormatter(BASIC_FORMAT))
    finally:
        logging._releaseLock()
コード例 #30
0
def _acquireMultiProcLock():
    """
    Acquire the module-level lock for serializing access to shared data at a multiprocess level.

    This should be released with _releaseLock().
    """
    if _multi_proc_lock:
        _multi_proc_lock.acquire()
    logging._acquireLock()
コード例 #31
0
ファイル: config.py プロジェクト: Pydiderot/pydiderotIDE
 def run(self):
     server = self.rcvr(port=self.port,
                        handler=self.hdlr,
                        ready=self.ready,
                        verify=self.verify)
     if self.port == 0:
         self.port = server.server_address[1]
     self.ready.set()
     global _listener
     logging._acquireLock()
     _listener = server
     logging._releaseLock()
     server.serve_until_stopped()
コード例 #32
0
ファイル: config.py プロジェクト: Pydiderot/pydiderotIDE
 def __init__(self,
              host='localhost',
              port=DEFAULT_LOGGING_CONFIG_PORT,
              handler=None,
              ready=None,
              verify=None):
     ThreadingTCPServer.__init__(self, (host, port), handler)
     logging._acquireLock()
     self.abort = 0
     logging._releaseLock()
     self.timeout = 1
     self.ready = ready
     self.verify = verify
コード例 #33
0
        def serve_until_stopped(self):
            import select
            abort = 0
            while not abort:
                rd, wr, ex = select.select([self.socket.fileno()], [], [],
                                           self.timeout)
                if rd:
                    self.handle_request()
                logging._acquireLock()
                abort = self.abort
                logging._releaseLock()

            self.socket.close()
コード例 #34
0
ファイル: common.py プロジェクト: chy4412312/MyCT
def printToStdout(data, color=None, on_color=None, bold=False):
    if bold:
        msg = colored(text=data,
                      color=color,
                      on_color=on_color,
                      attrs=['bold'])
    else:
        msg = colored(text=data, color=color, on_color=on_color, attrs=None)
    if runtime['engineMode'] == ENGINE_MODE.THREAD:
        logging._acquireLock()
    sys.stdout.write(msg)
    sys.stdout.flush()
    if runtime['engineMode'] == ENGINE_MODE.THREAD:
        logging._releaseLock()
コード例 #35
0
def setup_multiprocessing_logging(queue=None):
    '''
    This code should be called from within a running multiprocessing
    process instance.
    '''
    from salt.utils.platform import is_windows

    global __MP_LOGGING_CONFIGURED
    global __MP_LOGGING_QUEUE_HANDLER

    if __MP_IN_MAINPROCESS is True and not is_windows():
        # We're in the MainProcess, return! No multiprocessing logging setup shall happen
        # Windows is the exception where we want to set up multiprocessing
        # logging in the MainProcess.
        return

    try:
        logging._acquireLock()  # pylint: disable=protected-access

        if __MP_LOGGING_CONFIGURED is True:
            return

        # Let's set it to true as fast as possible
        __MP_LOGGING_CONFIGURED = True

        if __MP_LOGGING_QUEUE_HANDLER is not None:
            return

        # The temp null and temp queue logging handlers will store messages.
        # Since noone will process them, memory usage will grow. If they
        # exist, remove them.
        __remove_null_logging_handler()
        __remove_queue_logging_handler()

        # Let's add a queue handler to the logging root handlers
        __MP_LOGGING_QUEUE_HANDLER = SaltLogQueueHandler(queue or get_multiprocessing_logging_queue())
        logging.root.addHandler(__MP_LOGGING_QUEUE_HANDLER)
        # Set the logging root level to the lowest to get all messages
        logging.root.setLevel(logging.GARBAGE)
        logging.getLogger(__name__).debug(
            'Multiprocessing queue logging configured for the process running '
            'under PID: %s', os.getpid()
        )
        # The above logging call will create, in some situations, a futex wait
        # lock condition, probably due to the multiprocessing Queue's internal
        # lock and semaphore mechanisms.
        # A small sleep will allow us not to hit that futex wait lock condition.
        time.sleep(0.0001)
    finally:
        logging._releaseLock()  # pylint: disable=protected-access
コード例 #36
0
ファイル: config.py プロジェクト: spirit0801/core-framework
def _config(props, category=None, disable_existing_loggers=1):
    logging.shutdown()

    # critical section
    # patterned after from logging.config...
    logging._acquireLock()
    try:
        logging._handlers.clear()
        del logging._handlerList[:]
        # Handlers add themselves to logging._handlers
        handlers = _install_handlers(props)
        _install_loggers(props, handlers, category, disable_existing_loggers)
    finally:
        logging._releaseLock()
コード例 #37
0
ファイル: __init__.py プロジェクト: dakrauth/prolog
def basic_config(loggers=None,
                 level=None,
                 handlers=None,
                 propagate=None,
                 reset_handlers=None,
                 cfg=None):
    '''
    Configure and initialize loggers and handlers, based on the following
    options::

        ``loggers``
            The desired loggers to configure; can be either a comma-separated
            string of logger names, a list of ``Logger`` instances, or ``None``
            for the root logger.

        ``level``
            Specify the logging level for all loggers and handlers. Can be
            either the numeric value or string name for the desired level.

        ``handlers``
            The handlers to add to the given ``loggers``; can be a comma-separated
            string of shortcut names ('stream' or 'file', by default) or a list
            of ``Handler`` instances.
        
        ``propagate``
            Indicates whether each ``logger`` instance will be set to propagte.
        
        ``reset_handlers``
            If True, force a reset of all currently configured handlers.
        
        ``cfg``
            The ``prolog.config.PrologConfig`` instance to use. If not given,
            the default will be used (``prolog.config.config``). For all
            preceding parameters except for ``loggers`` set to None (the default),
            ``cfg`` will be used to determine the appropriate setting.
    '''
    cfg = cfg or config
    level = cfg.resolve('LEVEL', level, 'INFO')
    logging._acquireLock()
    try:
        handlers = get_handlers(cfg.resolve('HANDLERS', handlers), level,
                                cfg.resolve('RESET_HANDLERS', reset_handlers))
        for name in config.logger_names(loggers):
            logger = logging.getLogger(name)
            logger.setLevel(level)
            logger.propagate = cfg.resolve('PROPAGATE', propagate)
            for h in handlers:
                logger.addHandler(h)
    finally:
        logging._releaseLock()
コード例 #38
0
 def tearDown(self):
     self.root_logger.setLevel(self.original_logging_level)
     logging._acquireLock()
     try:
         logging._levelNames.clear()
         logging._levelNames.update(self.saved_level_names)
         logging._handlers.clear()
         logging._handlers.update(self.saved_handlers)
         logging._handlerList[:] = self.saved_handler_list
         loggerDict = logging.getLogger().manager.loggerDict
         loggerDict.clear()
         loggerDict.update(self.saved_loggers)
     finally:
         logging._releaseLock()
コード例 #39
0
ファイル: setup.py プロジェクト: viq/salt
def shutdown_logfile_logging():
    global __LOGFILE_CONFIGURED
    global __LOGGING_LOGFILE_HANDLER

    if not __LOGFILE_CONFIGURED or not __LOGGING_LOGFILE_HANDLER:
        return

    try:
        logging._acquireLock()
        logging.root.removeHandler(__LOGGING_LOGFILE_HANDLER)
        __LOGGING_LOGFILE_HANDLER = None
        __LOGFILE_CONFIGURED = False
    finally:
        logging._releaseLock()
コード例 #40
0
def stopListening():
    """
    Stop the listening server which was created with a call to listen().
    """
    global _listener
    logging._acquireLock()
    try:
        if _listener:
            _listener.abort = 1
            _listener = None
    finally:
        logging._releaseLock()

    return
コード例 #41
0
 def serve_until_stopped(self):
     if sys.platform.startswith('java'):
         from select import cpython_compatible_select as select
     else:
         from select import select
     abort = 0
     while not abort:
         rd, wr, ex = select([self.socket.fileno()], [], [],
                             self.timeout)
         if rd:
             self.handle_request()
         logging._acquireLock()
         abort = self.abort
         logging._releaseLock()
     self.socket.close()
コード例 #42
0
    def matchLoggers(self, pattern):
        """
            Finds all the loggers whose name match the associated pattern.
        """
        logger_dict = {}

        logging._acquireLock()
        try:
            for lname in self.manager.loggerDict.keys():
                if fnmatch.fnmatch(lname, pattern):
                    logger_dict[lname] = self.manager.loggerDict[lname]
        finally:
            logging._releaseLock()

        return logger_dict
コード例 #43
0
def shutdown_multiprocessing_logging():
    global __MP_LOGGING_CONFIGURED
    global __MP_LOGGING_QUEUE_HANDLER

    if not __MP_LOGGING_CONFIGURED or not __MP_LOGGING_QUEUE_HANDLER:
        return

    try:
        logging._acquireLock()
        # Let's remove the queue handler from the logging root handlers
        logging.root.removeHandler(__MP_LOGGING_QUEUE_HANDLER)
        __MP_LOGGING_QUEUE_HANDLER = None
        __MP_LOGGING_CONFIGURED = False
    finally:
        logging._releaseLock()
コード例 #44
0
ファイル: configuration.py プロジェクト: yakkle/loopchain
    def _update_handlers(self, logger):
        logging._acquireLock()
        try:
            for handler in logger.handlers[:]:
                logger.removeHandler(handler)

            handlers = []

            partial_new_excepthook = new_excepthook
            partial_new_print_exception = new_print_exception

            if self.log_output_type & conf.LogOutputType.console:
                stream_handler = self._create_stream_handler()
                handlers.append(stream_handler)

                partial_new_excepthook = partial(partial_new_excepthook,
                                                 console=True)
                partial_new_print_exception = partial(
                    partial_new_print_exception, console=True)
            else:
                partial_new_excepthook = partial(partial_new_excepthook,
                                                 console=False)
                partial_new_print_exception = partial(
                    partial_new_print_exception, console=False)

            if self.log_output_type & conf.LogOutputType.file and self.log_file_location:
                self._update_log_file_path()

                file_handler = self._create_file_handler()
                handlers.append(file_handler)

                sys.excepthook = partial(partial_new_excepthook,
                                         output_file=file_handler.stream)
                traceback.print_exception = partial(
                    partial_new_print_exception,
                    output_file=file_handler.stream)
            else:
                sys.excepthook = partial(partial_new_excepthook,
                                         output_file=None)
                traceback.print_exception = partial(
                    partial_new_print_exception, output_file=None)

            logging.basicConfig(handlers=handlers,
                                format=self._log_format,
                                datefmt="%Y-%m-%d %H:%M:%S",
                                level=self._log_level)
        finally:
            logging._releaseLock()
コード例 #45
0
ファイル: log.py プロジェクト: 5l1v3r1/python-tinyscript
def delLevelName(level):
    """
    Remove association of 'levelName' with 'level'.
    """
    logging._acquireLock()
    if isinstance(level, int):
        levelName = logging._levelToName[level] if PY3 else \
                    logging._levelNames[level]
    else:
        levelName = level.upper()
        level = logging._nameToLevel.get(levelName) if PY3 else \
                {v: k for k, v in logging._levelNames.items()}.get(levelName)
    __del(getattr(logging, "_levelToName", None), level)
    __del(getattr(logging, "_levelNames", None), level)
    __del(getattr(logging, "_nameToLevel", None), levelName)
    logging._releaseLock()
コード例 #46
0
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
    import ConfigParser
    cp = ConfigParser.ConfigParser(defaults)
    if hasattr(fname, 'readline'):
        cp.readfp(fname)
    else:
        cp.read(fname)
    formatters = _create_formatters(cp)
    logging._acquireLock()
    try:
        logging._handlers.clear()
        del logging._handlerList[:]
        handlers = _install_handlers(cp, formatters)
        _install_loggers(cp, handlers, disable_existing_loggers)
    finally:
        logging._releaseLock()
コード例 #47
0
    def setUp(self):
        self.logger = l = logging.getLogger()
        self.adapter = LoggerAdapter(l, {})

        logger_dict = logging.getLogger().manager.loggerDict
        logging._acquireLock()
        try:
            self.saved_handlers = logging._handlers.copy()
            self.saved_handler_list = logging._handlerList[:]
            self.saved_loggers = logger_dict.copy()
            self.saved_level_names = logging._levelNames.copy()
        finally:
            logging._releaseLock()

        self.root_logger = logging.getLogger("")
        self.original_logging_level = self.root_logger.getEffectiveLevel()
コード例 #48
0
ファイル: __init__.py プロジェクト: tmdoit/microcule
def hookioLoggingConfig(level=None, format=None, datefmt=None):
    logging._acquireLock()
    try:
        if level is not None:
            logging.root.setLevel(level)
        if len(logging.root.handlers) > 0:
            return
        hdlr = logging.StreamHandler(sys.stderr)
        if format is None:
            fmt = FullMicroculeJSONFormatter()
        else:
            fmt = SimpleMicroculeJSONFormatter(format, datefmt)
        hdlr.setFormatter(fmt)
        logging.root.addHandler(hdlr)
    finally:
        logging._releaseLock()
コード例 #49
0
 def setFormatter(self, fmt):
     """
     Set the message formatter.
     """
     super(TableStorageHandler, self).setFormatter(fmt)
     if self.extra_properties:
         logging._acquireLock()
         try:
             for extra in self.extra_property_formatters.values():
                 extra.converter = fmt.converter
                 extra.datefmt = fmt.datefmt
                 if _PY3:
                     extra.default_time_format = fmt.default_time_format
                     extra.default_msec_format = fmt.default_msec_format
         finally:
             logging._releaseLock()
コード例 #50
0
ファイル: loggex.py プロジェクト: jack51706/rpc2socks
def install():
    global _INSTALLED

    if (not hasattr(_mod_logging, "root")
            or not hasattr(_mod_logging.root, "handlers")
            or not hasattr(_mod_logging.Logger, "manager")
            or not hasattr(_mod_logging, "_lock")
            or not hasattr(_mod_logging, "_defaultFormatter")):
        raise RuntimeError(
            "internals of the standard logging package changed, this module "
            "must be updated accordingly, please contact the developers")

    addLevelName(PPRINT, "PPRINT")
    addLevelName(HINFO, "HINFO")
    addLevelName(ASSERTION, "ASSERTION")

    setLogRecordFactory(LogRecord)
    setLoggerClass(Logger)

    # gentle but intrusive patching...

    if _mod_logging.Logger.manager.logRecordFactory is not None:
        _mod_logging.Logger.manager.setLogRecordFactory(LogRecord)

    if _mod_logging.Logger.manager.loggerClass is not None:
        _mod_logging.Logger.manager.setLoggerClass(Logger)

    _mod_logging._defaultFormatter = _defaultFormatter
    _mod_logging.lastResort = lastResort

    # patch root logger if needed
    _mod_logging._acquireLock()
    try:
        if not hasattr(_mod_logging.root, "hinfo"):
            patch_with_extra_methods(_mod_logging.root)

        globvars = globals()
        for method_name in ("pprint", "hinfo", "assertion"):
            globvars[method_name] = \
                getattr(_mod_logging.root, method_name)
    finally:
        _mod_logging._releaseLock()
        _INSTALLED = True

    # this force re-creates handlers by default
    basicConfig(force=True)
コード例 #51
0
ファイル: log.py プロジェクト: septimius/nova
def setup():
    """Setup nova logging."""
    if not isinstance(logging.root, NovaRootLogger):
        logging._acquireLock()
        for handler in logging.root.handlers:
            logging.root.removeHandler(handler)
        logging.root = NovaRootLogger("nova")
        NovaLogger.root = logging.root
        NovaLogger.manager.root = logging.root
        for logger in NovaLogger.manager.loggerDict.itervalues():
            logger.root = logging.root
            if isinstance(logger, logging.Logger):
                NovaLogger.manager._fixupParents(logger)
        NovaLogger.manager.loggerDict["nova"] = logging.root
        logging._releaseLock()
        sys.excepthook = handle_exception
        reset()
コード例 #52
0
def get_logger(name: str = "laia") -> Logger:
    """Create/Get the Laia logger.
    The logger is an object of the class :class:`~.Logger`
    which uses the new string formatting, and accepts keyword arguments.

    Args:
        name: name of the logger to get.
    Returns:
        A :obj:`~.Logger` object.
    """
    logging._acquireLock()  # noqa
    backup_class = logging.getLoggerClass()
    logging.setLoggerClass(Logger)
    logger = logging.getLogger(name)
    logging.setLoggerClass(backup_class)
    logging._releaseLock()  # noqa
    return logger
コード例 #53
0
def SetLogPrefix(prefix):
    """Adds a prefix to the log handler to identify the process.

  Args:
    prefix: The prefix string to append at the beginning of each line.
  """
    formatter = logging.Formatter(
        str(prefix) + ' [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
    logging._acquireLock()
    try:
        for handler in logging._handlerList:
            if isinstance(handler, weakref.ref):
                handler = handler()
            if handler:
                handler.setFormatter(formatter)
    finally:
        logging._releaseLock()
コード例 #54
0
ファイル: setup.py プロジェクト: lvg01/salt.old
def shutdown_multiprocessing_logging():
    global __MP_LOGGING_CONFIGURED
    global __MP_LOGGING_QUEUE_HANDLER

    if __MP_IN_MAINPROCESS is True:
        # We're in the MainProcess, return! No multiprocessing logging shutdown shall happen
        return

    try:
        logging._acquireLock()
        if __MP_LOGGING_CONFIGURED is True:
            # Let's remove the queue handler from the logging root handlers
            logging.root.removeHandler(__MP_LOGGING_QUEUE_HANDLER)
            __MP_LOGGING_QUEUE_HANDLER = None
            __MP_LOGGING_CONFIGURED = False
    finally:
        logging._releaseLock()
コード例 #55
0
ファイル: test_log.py プロジェクト: rahonen/qutebrowser
    def setUp(self):
        """Save the old logging configuration."""
        logger_dict = logging.getLogger().manager.loggerDict
        logging._acquireLock()
        try:
            self.saved_handlers = logging._handlers.copy()
            self.saved_handler_list = logging._handlerList[:]
            self.saved_loggers = saved_loggers = logger_dict.copy()
            self.logger_states = {}
            for name in saved_loggers:
                self.logger_states[name] = getattr(saved_loggers[name],
                                                   'disabled', None)
        finally:
            logging._releaseLock()

        self.root_logger = logging.getLogger("")
        self.original_logging_level = self.root_logger.getEffectiveLevel()
コード例 #56
0
def stopServer():
    """
    Stop the listening server which was created with a call to listen().
    """
    global _listener, _server, abort

    logging._acquireLock()
    abort = True
    if _listener:
        _listener = None
    if _server:
        _server = None
        try:
            os.unlink(unixDomainAddr)
        except:
            pass
    logging._releaseLock()
コード例 #57
0
ファイル: loggex.py プロジェクト: jack51706/rpc2socks
    def _stdBasicConfigWrapper(**kwargs):
        # We cannot just blindly rely on the implementation of the standard
        # basicConfig() because it instantiates a standard Formatter class
        # instead of relying on its module-level *_defaultFormatter* value,
        # which we patched in install() to use our own PerLevelFormatter class.
        #
        # Same goes for FileHandler and StreamHandler classes.
        #
        # This is not a desired behavior in our case, hence the temporary
        # monkey-patching below, which helps avoiding having to re-implement
        # basicConfig() ourselves.

        old_formatter = _mod_logging.Formatter
        old_filehandler = _mod_logging.FileHandler
        old_streamhandler = _mod_logging.StreamHandler
        old_basicformat = _mod_logging.BASIC_FORMAT
        old_styles = _mod_logging._STYLES

        _mod_logging._acquireLock()
        try:
            _mod_logging.Formatter = PerLevelFormatter
            _mod_logging.FileHandler = FileHandler
            _mod_logging.StreamHandler = StreamHandler
            _mod_logging.BASIC_FORMAT = BASIC_FORMAT
            _mod_logging._STYLES = _STYLES

            # backward compatibility for *force* arg (3.8+)
            if not _PYTHON38:
                if kwargs.pop("force", False):
                    for hdlr in _mod_logging.root.handlers[:]:
                        _mod_logging.root.removeHandler(hdlr)
                        hdlr.close()

            # CAUTION: do not call "_mod_logging.basicConfig" here because we
            # may have monkey-patched it already :)
            res = stdBasicConfig(**kwargs)
        finally:
            _mod_logging.Formatter = old_formatter
            _mod_logging.FileHandler = old_filehandler
            _mod_logging.StreamHandler = old_streamhandler
            _mod_logging.BASIC_FORMAT = old_basicformat
            _mod_logging._STYLES = old_styles

            _mod_logging._releaseLock()

        return res
コード例 #58
0
ファイル: utils.py プロジェクト: qu574/augpt
def setup_logging(level=logging.INFO):
    from tqdm import tqdm

    def is_console_handler(handler):
        return isinstance(handler, logging.StreamHandler) and handler.stream in {sys.stdout, sys.stderr}

    class TqdmLoggingHandler(logging.StreamHandler):
        def emit(self, record):
            try:
                msg = self.format(record)
                tqdm.write(msg)
                self.flush()
            except (KeyboardInterrupt, SystemExit):
                raise
            except:  # noqa pylint: disable=bare-except
                self.handleError(record)

    logging.basicConfig(stream=sys.stdout, level=level)
    handler = TqdmLoggingHandler(sys.stdout)
    try:
        import colorlog
        formatter = colorlog.LevelFormatter(fmt={
            'DEBUG': '%(log_color)sdebug: %(message)s (%(module)s:%(lineno)d)%(reset)s',
            'INFO': '%(log_color)sinfo%(reset)s: %(message)s',
            'WARNING': '%(log_color)swarning%(reset)s: %(message)s (%(module)s:%(lineno)d)',
            'ERROR': '%(log_color)serror%(reset)s: %(message)s (%(module)s:%(lineno)d)',
            'CRITICAL': '%(log_color)scritical: %(message)s (%(module)s:%(lineno)d)%(reset)s',
        }, log_colors={
            'DEBUG': 'white',
            'INFO': 'bold_green',
            'WARNING': 'bold_yellow',
            'ERROR': 'bold_red',
            'CRITICAL': 'bold_red',
        })
        handler.setFormatter(formatter)
    except(ModuleNotFoundError):
        # We do not require colorlog to be present
        pass
    logging._acquireLock()
    orig_handlers = logging.root.handlers
    try:
        logging.root.handlers = [x for x in orig_handlers if not is_console_handler(x)] + [handler]
    except Exception:
        logging.root.handlers = orig_handlers
    finally:
        logging._releaseLock()
コード例 #59
0
ファイル: mylogging.py プロジェクト: qsdj/ver-observer
def basicConfig(
    level=logging.INFO,
    color=False,
    handler=None,
    formatter=None,
    logfile=None,
    file_level=None,
    maxBytes=0,
    backupCount=0,
    file_format=FILE_LOG_FORMAT,
    multi_process=False,
):
    _install_custom_levels()

    logging._acquireLock()
    try:
        if len(logging.root.handlers) != 0:
            return
        handler = handler or logging.StreamHandler()
        formatter = formatter or logzero.LogFormatter(color=color)
        handler.setFormatter(formatter)
        logging.root.addHandler(handler)

        if logfile:
            if multi_process:
                file_handler_class = MultiprocessRotatingFileHandler
            else:
                file_handler_class = logging.handlers.RotatingFileHandler
            file_handler = file_handler_class(logfile,
                                              maxBytes=maxBytes,
                                              backupCount=backupCount)
            file_formatter = logging.Formatter(file_format)
            file_handler.setFormatter(file_formatter)
            logging.root.addHandler(file_handler)

            if file_level is not None:
                file_handler.setLevel(file_level)
                _root_level = _lower_level(level, file_level)
                handler.setLevel(level)
                logging.root.setLevel(_root_level)

        if file_level is None:
            logging.root.setLevel(level)

    finally:
        logging._releaseLock()
コード例 #60
0
ファイル: log.py プロジェクト: wendy-king/x7_venv
def setup():
    """Setup engine logging."""
    if not isinstance(logging.root, EngineRootLogger):
        logging._acquireLock()
        for handler in logging.root.handlers:
            logging.root.removeHandler(handler)
        logging.root = EngineRootLogger("engine")
        EngineLogger.root = logging.root
        EngineLogger.manager.root = logging.root
        for logger in EngineLogger.manager.loggerDict.itervalues():
            logger.root = logging.root
            if isinstance(logger, logging.Logger):
                EngineLogger.manager._fixupParents(logger)
        EngineLogger.manager.loggerDict["engine"] = logging.root
        logging._releaseLock()
        sys.excepthook = handle_exception
        reset()