コード例 #1
0
ファイル: reporters.py プロジェクト: sgill2/ncmc
def addLoggingLevel(levelName, levelNum, methodName=None):
    """
    Comprehensively adds a new logging level to the `logging` module and the
    currently configured logging class.

    `levelName` becomes an attribute of the `logging` module with the value
    `levelNum`. `methodName` becomes a convenience method for both `logging`
    itself and the class returned by `logging.getLoggerClass()` (usually just
    `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
    used.

    To avoid accidental clobberings of existing attributes, this method will
    raise an `AttributeError` if the level name is already an attribute of the
    `logging` module or if the method name is already present

    Parameters
    ----------
    levelName : str
        The new level name to be added to the `logging` module.
    levelNum : int
        The level number indicated for the logging module.
    methodName : str, default=None
        The method to call on the logging module for the new level name.
        For example if provided 'trace', you would call `logging.trace()`.

    Example
    -------
    >>> addLoggingLevel('TRACE', logging.DEBUG - 5)
    >>> logging.getLogger(__name__).setLevel("TRACE")
    >>> logging.getLogger(__name__).trace('that worked')
    >>> logging.trace('so did this')
    >>> logging.TRACE
    5

    """
    if not methodName:
        methodName = levelName.lower()

    if hasattr(logging, levelName):
        logging.warn('{} already defined in logging module'.format(levelName))
    if hasattr(logging, methodName):
        logging.warn('{} already defined in logging module'.format(methodName))
    if hasattr(logging.getLoggerClass(), methodName):
        logging.warn('{} already defined in logger class'.format(methodName))

    # This method was inspired by the answers to Stack Overflow post
    # http://stackoverflow.com/q/2183233/2988730, especially
    # http://stackoverflow.com/a/13638084/2988730
    def logForLevel(self, message, *args, **kwargs):
        if self.isEnabledFor(levelNum):
            self._log(levelNum, message, args, **kwargs)

    def logToRoot(message, *args, **kwargs):
        logging.log(levelNum, message, *args, **kwargs)

    logging.addLevelName(levelNum, levelName)
    setattr(logging, levelName, levelNum)
    setattr(logging.getLoggerClass(), methodName, logForLevel)
    setattr(logging, methodName, logToRoot)
コード例 #2
0
ファイル: decorators.py プロジェクト: aglavic/quicknxs
 def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None):
   if extra is None:
     return logging.getLoggerClass().makeRecord(self, name, lvl, fn, lno, 
                        msg, args, exc_info, func=func, extra=None)
   else:
     
     return logging.getLoggerClass().makeRecord(self, name, lvl, extra['name'], extra['lno'],
                        msg, args, exc_info, func=extra['func'], extra=None)
コード例 #3
0
ファイル: log.py プロジェクト: ajdiaz/whistler
 def __init__(self, level=logging.INFO):
     logging.getLoggerClass().__init__(self, None)
     self.handler = logging.StreamHandler()
     self.formatter = logging.Formatter(self.message_format,
             self.datetime_format)
     self.handler.setFormatter(self.formatter)
     self.addHandler(self.handler)
     self.setLevel(level)
コード例 #4
0
ファイル: test_log.py プロジェクト: EzyInsights/celery
    def test_patches(self):
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)

        with in_sighandler():
            logging.getLoggerClass().log(get_logger('test'))
コード例 #5
0
ファイル: factorytest.py プロジェクト: pombr/conary
    def testLocalFactoryWithLocalRecipe(self):
        os.chdir(self.workDir)
        self.newpkg("foo", factory = "test")
        os.chdir("foo")
        self.writeFile("factory-test.recipe",
"""
class TestFactory(Factory):

    name = "factory-test"
    version = "1.0"

    def getRecipeClass(self):
        class TestSubclass(PackageRecipe):
            name = "testsubclass"
            version = "1.0"
            internalAbstractBaseClass = True
            clearBuildReqs()

        return TestSubclass
""")

        self.writeFile("foo.recipe",
"""
# CNY-2813. importing log inside a recipe used to reset the loglevel
from conary.lib import log
class FooRecipe(FactoryRecipeClass):

    name = "foo"
    version = "1.1"

    def setup(self):
        self.Create("/etc/foo", "foo")
""")

        self.addfile("foo.recipe")
        repos = self.openRepository()
        cstate = state.ConaryStateFromFile('CONARY')
        level = log.getVerbosity()
        try:
            log.setVerbosity(log.INFO)
            klass = logging.getLoggerClass()
            self.discardOutput(cook.cookCommand, self.cfg, [cstate], False, {})
            self.assertEquals(klass, logging.getLoggerClass())
        finally:
            log.setVerbosity(level)
        ccs = changeset.ChangeSetFromFile(os.path.join(self.workDir,
                'foo', 'foo-1.1.ccs'))
        trvs = [trove.Trove(x) for x in ccs.iterNewTroveList()]
        trv = [x for x in trvs if x.getName() == 'foo:debuginfo'][0]
        files = [x for x in trv.iterFileList() if \
                x[1] == '/usr/src/debug/buildlogs/foo-1.1-log.bz2']
        fileId, path, pathId, ver = files[0]
        fileInfo, fileObj = ccs.getFileContents(fileId, pathId)
        decomp = bz2.BZ2Decompressor()
        data = decomp.decompress(fileObj.f.read())
        self.assertFalse("+ Processing" not in data,
                "build log data appears to be incomplete")
コード例 #6
0
ファイル: log.py プロジェクト: gaetano-guerriero/eyeD3-debian
def getLogger(name):
    og_class = logging.getLoggerClass()
    try:
        logging.setLoggerClass(Logger)
        return logging.getLogger(name)
    finally:
        logging.setLoggerClass(og_class)
コード例 #7
0
ファイル: __init__.py プロジェクト: mdevaev/contextlog
def patch_logging():
    """
        This hack is used to log the context inside standard and thirdparty libraries which
        uses usually python logging. The context inherits from caller using contextlog.
    """
    if logging.getLoggerClass() != _SlaveContextLogger:
        logging.setLoggerClass(_SlaveContextLogger)
コード例 #8
0
ファイル: gui_logging.py プロジェクト: JeanBilheux/RefRed
def setup_system():
  logger=logging.getLogger()#logging.getLogger('quicknxs')
  logger.setLevel(min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL))
  if not sys.platform.startswith('win'):
    # no console logger for windows (py2exe)
    console=logging.StreamHandler(sys.__stdout__)
    formatter=logging.Formatter('%(levelname) 7s: %(message)s')
    console.setFormatter(formatter)
    console.setLevel(CONSOLE_LEVEL)
    logger.addHandler(console)

  logfile=logging.FileHandler(paths.LOG_FILE, 'w')
  formatter=logging.Formatter('[%(levelname)s] - %(asctime)s - %(filename)s:%(lineno)i:%(funcName)s %(message)s', '')
  logfile.setFormatter(formatter)
  logfile.setLevel(FILE_LEVEL)
  logger.addHandler(logfile)

  logging.info('*** QuickNXS %s Logging started ***'%str_version)

  # define numpy warning behavior
  global nplogger
  old_class=logging.getLoggerClass()
  logging.setLoggerClass(NumpyLogger)
  nplogger=logging.getLogger('numpy')
  nplogger.setLevel(logging.DEBUG)
  null_handler=logging.StreamHandler(StringIO())
  null_handler.setLevel(logging.CRITICAL)
  nplogger.addHandler(null_handler)
  logging.setLoggerClass(old_class)
  seterr(divide='call', over='call', under='ignore', invalid='call')
  seterrcall(numpy_logger)

  # write information on program exit
  sys.excepthook=excepthook_overwrite
  atexit.register(goodby)
コード例 #9
0
    def test_initialiazation(self):
        # Ensure metric data reset in __init__
        self.assertTrue(not self.connection.metric_data,
                        "Metric data is not empty")
        # Ensure the state dict is reset in __init__
        for state in self.connection.state.values():
            if not state['current_state'] == component_states.unknown \
                    or not state['reason'] == '' or state['metrics']:
                self.state_reset = False

        self.assertTrue(self.state_reset,
                        "State data is not empty")

        # Ensure the latency dict is reset in __init__
        for value in self.connection.latency.values():
            if value > 0:
                self.latency_reset = False
                break

        self.assertTrue(self.latency_reset,
                        "Latency data is not empty")
        # Ensure logger set to valid logger object in __init__
        self.assertIsInstance(self.connection.logger, logging.getLoggerClass(),
                              "Invalid logger object")
        # Ensure cache file path is not empty
        self.assertIsNotNone(self.connection.cache_file_path)
コード例 #10
0
ファイル: logging.py プロジェクト: Amazeotron/Garagey
def create_logger(name, debug=False, format=None):
        Logger = getLoggerClass()

        class DebugLogger(Logger):
            def getEffectiveLevel(x):
                if x.level == 0 and debug:
                    return DEBUG
                else:
                    return Logger.getEffectiveLevel(x)

        class DebugHandler(StreamHandler):
            def emit(x, record):
                StreamHandler.emit(x, record) if debug else None

        handler = DebugHandler()
        handler.setLevel(DEBUG)

        if format:
            handler.setFormatter(Formatter(format))

        logger = getLogger(name)
        del logger.handlers[:]
        logger.__class__ = DebugLogger
        logger.addHandler(handler)

        return logger
コード例 #11
0
    def test_neutron_command_meta_defines_log(self):
        class FakeCommand(neutronV20.NeutronCommand):
            pass

        self.assertTrue(helpers.safe_hasattr(FakeCommand, 'log'))
        self.assertIsInstance(FakeCommand.log, logging.getLoggerClass())
        self.assertEqual(__name__ + ".FakeCommand", FakeCommand.log.name)
コード例 #12
0
ファイル: common.py プロジェクト: NextGenIntelligence/main
def catch_logmsg(pattern, level=logging.WARNING, count=None):
    '''Catch (and ignore) log messages matching *pattern*

    *pattern* is matched against the *unformatted* log message, i.e. before any
    arguments are merged.

    If *count* is not None, raise an exception unless exactly *count* matching
    messages are caught.
    '''

    logger_class = logging.getLoggerClass()
    handle_orig = logger_class.handle
    caught = [0]

    @wraps(handle_orig)
    def handle_new(self, record):
        if (record.levelno != level
            or not re.search(pattern, record.msg)):
            return handle_orig(self, record)
        caught[0] += 1

    logger_class.handle = handle_new
    try:
        yield

    finally:
        logger_class.handle = handle_orig

        if count is not None and caught[0] != count:
            raise AssertionError('Expected to catch %d log messages, but got only %d'
                                 % (count, caught[0]))
コード例 #13
0
ファイル: logging.py プロジェクト: comick/barduino
def create_logger():
    """Creates a logger for the given application.
    
    This logger works similar to a regular Python logger but changes the
    effective logging level based on the application's debug flag.  Furthermore
    this function also removes all attached handlers in case there was a logger
    with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(self):
            if self.level == 0:
                return DEBUG if web.config.DEBUG else INFO
            return super(DebugLogger, self).getEffectiveLevel()

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record)

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(web.config.LOG_FORMAT))
    logger = getLogger(web.config.LOGGER_NAME)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    if web.config.LOG_ENABLE:
        logger.addHandler(handler)
    return logger
コード例 #14
0
ファイル: logsetup.py プロジェクト: Huskyeder/augustus
def setupLogging(config_options=[]):
    """Set up overall logging for the program given the config options.

    Arguments:

        config_options (List of XML objects):
            Empty if the user did not elect to use logging.
            Otherwise could contain either or both of the
            <Logging>...</Logging> and <Metadata>...</Metadata>
            elements.
    """
    # There must be a metadata logger regardless of its level,
    # so that its internal functions may be called from any other
    # module without error.
    OldClass = logging.getLoggerClass()
    logging.setLoggerClass(MetaDataLogger)
    logging.getLogger('metadata')
    logging.setLoggerClass(OldClass)

    # this will be reset below if the loggers are configured.
    logging.getLogger().setLevel(level=logging.CRITICAL)
    logging.getLogger("metadata").setLevel(level=logging.CRITICAL)

    if not config_options:
        logging.disable(level=logging.CRITICAL)
    else:
        for child in config_options:
            if child.tag == "Logging":
                _setupLogging(child)
            elif child.tag == "Metadata":
                _setupLogging(child, "metadata")
            else:
                logging.getLogger('').critical("Logging configuration attempted for an object that is not a logger: %s" % str(child.tag))
                sys.exit()
コード例 #15
0
ファイル: chatlog.py プロジェクト: NURDspace/jsonbot
def enablelogging(botname, channel):
    """ set loglevel to level_name. """
    global loggers
    global logfiles
    LOGDIR = initlog(getdatadir())
    logging.warn("enabling on (%s,%s)" % (botname, channel))
    channel = stripname(channel)
    logname = "%s_%s" % (botname, channel)
    #if logname in loggers: logging.warn("there is already a logger for %s" % logname) ; return
    logfile = LOGDIR + os.sep + logname + ".log"
    try:
        filehandler = logging.handlers.TimedRotatingFileHandler(logfile, 'midnight')
        formatter = logging.Formatter(format)
        filehandler.setFormatter(formatter)
        logfiles[logfile] = time.time()
    except IOError:
        filehandler = None
    chatlogger = logging.getLoggerClass()(logname)
    chatlogger.setLevel(logging.INFO)
    if chatlogger.handlers:
        for handler in chatlogger.handlers: chatlogger.removeHandler(handler)
    if filehandler: chatlogger.addHandler(filehandler) ; logging.warn("%s - logging enabled on %s" % (botname, channel))
    else: logging.error("no file handler found - not enabling logging.")
    global lastlogger
    lastlogger = chatlogger
    loggers[logname] = lastlogger
コード例 #16
0
ファイル: log.py プロジェクト: nzlosh/st2
def setup(config_file, redirect_stderr=True, excludes=None, disable_existing_loggers=False):
    """
    Configure logging from file.
    """
    try:
        logging.config.fileConfig(config_file,
                                  defaults=None,
                                  disable_existing_loggers=disable_existing_loggers)
        handlers = logging.getLoggerClass().manager.root.handlers
        _add_exclusion_filters(handlers=handlers, excludes=excludes)
        if redirect_stderr:
            _redirect_stderr()
    except Exception as exc:
        exc_cls = type(exc)
        tb_msg = traceback.format_exc()

        msg = str(exc)
        msg += '\n\n' + tb_msg

        # revert stderr redirection since there is no logger in place.
        sys.stderr = sys.__stderr__

        # No logger yet therefore write to stderr
        sys.stderr.write('ERROR: %s' % (msg))

        raise exc_cls(six.text_type(msg))
コード例 #17
0
    def __init__(self, driver=None, request=None, trg=None, *args, **kwargs):
        """Create an eval controller
        @param driver {Driver} The OOP driver instance to communicate via
        @param request {Request} The request causing the evaluation
        """
        log.debug("__init__")
        EvalController.__init__(self, *args, **kwargs)

        self.driver = driver
        self.request = request
        self.trg = trg
        self.silent = request.get("silent", False)
        self.keep_existing = request.get("keep_existing", self.keep_existing)

        # Set up a *new* logger to record any errors
        # Note that the output will be discarded if there is no error
        self.log_stream = cStringIO.StringIO()
        self.log_hndlr = logging.StreamHandler(self.log_stream)
        loggerClass = logging.Logger.manager.loggerClass
        if not loggerClass:
            loggerClass = logging.getLoggerClass()
        self.log = loggerClass("codeintel.evaluator")
        self.log.setLevel(logging.WARN) # Only log warnings and worse
        self.log.manager = logging.Logger.manager
        self.log.propagate = False
        self.log.addHandler(self.log_hndlr)
        self.best_msg = (0, "")
        self.has_sent_response = False
コード例 #18
0
ファイル: SetLogger_New.py プロジェクト: Loreton/LnFunctions3
def setLogger(gv, logFile=None, pkgName=None):

        # ------------------------------------------------
        # - del packageHier cerchiamo di prendere
        # - solo gli ultimi due qualificatori.
        # ------------------------------------------------
    packageHier = pkgName.split('.')
    loggerName  = ('.'.join(packageHier[-2:]))

    if logFile:
        try:
            logging.config.fileConfig(logFile, disable_existing_loggers=False)
        except Exception as why:
            gv.LN.sys.exit(gv, 2001, "{} - ERROR in file: {}".format(str(why), logFile), console=True)

        logger = logging.getLogger(loggerName)
        savedLevel = logger.getEffectiveLevel()
        logger.setLevel(logging.INFO)
        for i in range(1,10):   logger.info(' ')
        for i in range(1,5):    logger.info('-'*40 + 'Start LOGging' + '-'*20)
        logger.setLevel(savedLevel)
        logFileName = logging.getLoggerClass().root.handlers[0].baseFilename
        return logFileName

    logger = logging.getLogger(loggerName)
    return logger
コード例 #19
0
ファイル: bootstrap.py プロジェクト: UfSoft/dac
    def postOptions(self):
        from baca.application import app
        if self.opts['config'] == "~/.ilog":
            self.opt_config(self.opts['config'])


        if not isfile(join(app.config.dir, app.config.file)):
            app.config_initial_populate()
            app.config_save()
        app.config_load()

        # Setup logging
        from baca.utils.logger import Logging
        if logging.getLoggerClass() is not Logging:
            logging.config.fileConfig(
                usefull_path(str(app.config.logging_config_file))
            )
            logging.setLoggerClass(Logging)

            twisted_logging = PythonLoggingObserver('twisted')
            twisted_logging.start()

#        self._setup_database()
        app.setup_log()

        if not self.subCommand:
            self.opt_help()
コード例 #20
0
ファイル: TrLogger.py プロジェクト: utsdab/usr
def TrLogger(logger_name, options):
    """
    Factory for the actual logging-derived logger
    """
    try:
        trEmbraceAndExtendErrno()

        old = logging.getLoggerClass()  # save old setting
        logging.setLoggerClass(TrLoggerClass)

        #
        # Provide a logging configuration backdoor for sites that want
        # to do something sophisticated.
        #
        if options.logconfig:
            if os.path.exists(options.logconfig):
                logging.config.fileConfig(options.logconfig)
            else:
                options.logconfig = None

        logger = logging.getLogger(logger_name)

        logging.setLoggerClass(old)  # restore

        if not options.logconfig:
            # In the typical case that there is no logging config file,
            # apply our usual handlers.
            logger.trBasicConfig(options)

    except Exception:
        logger = TrDesperationLogger(logger_name)
        logger.exception("logging configuration failed")

    return logger
コード例 #21
0
def _check_logger_class():
    """
    Make sure process name is recorded when loggers are used
    """
    # XXX This function is unnecessary once logging is patched
    import logging

    if hasattr(logging, "multiprocessing"):
        return

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, "_process_aware", False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
コード例 #22
0
def _initialize():
  """Initializes loggers and handlers."""
  global _absl_logger, _absl_handler

  if _absl_logger:
    return

  original_logger_class = logging.getLoggerClass()
  logging.setLoggerClass(ABSLLogger)
  _absl_logger = logging.getLogger('absl')
  logging.setLoggerClass(original_logger_class)

  python_logging_formatter = PythonFormatter()
  _absl_handler = ABSLHandler(python_logging_formatter)

  # The absl handler logs to stderr by default. To prevent double logging to
  # stderr, the following code tries its best to remove other handlers that emit
  # to stderr. Those handlers are most commonly added when logging.info/debug is
  # called before importing this module.
  handlers = [
      h for h in logging.root.handlers
      if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
  for h in handlers:
    logging.root.removeHandler(h)

  # The absl handler will always be attached to root, not the absl logger.
  if not logging.root.handlers:
    # Attach the absl handler at import time when there are no other handlers.
    # Otherwise it means users have explicitly configured logging, and the absl
    # handler will only be attached later in app.run(). For App Engine apps,
    # the absl handler is not used.
    logging.root.addHandler(_absl_handler)
コード例 #23
0
def setup_logging(increase_padding=False):
    """
    Setup overall logging engine and add 2 more levels of logging lower than
    DEBUG, TRACE and GARBAGE.
    """
    import logging

    if increase_padding and logging.getLoggerClass() is not Logging:
        logging.setLoggerClass(Logging)

    if not hasattr(LoggingLoggerClass, 'trace'):
        def trace(cls, msg, *args, **kwargs):
            return cls.log(5, msg, *args, **kwargs)

        logging.addLevelName(5, 'TRACE')
        LoggingLoggerClass.trace = new.instancemethod(
            trace, None, LoggingLoggerClass
        )

    if not hasattr(LoggingLoggerClass, 'garbage'):
        def garbage(cls, msg, *args, **kwargs):
            return cls.log(1, msg, *args, **kwargs)

        logging.addLevelName(1, 'GARBAGE')
        LoggingLoggerClass.garbage = new.instancemethod(
            garbage, None, LoggingLoggerClass
        )

    # Set the root logger at the lowest level possible
    logging.getLogger().setLevel(1)
コード例 #24
0
ファイル: log.py プロジェクト: Aliced3645/celery
def ensure_process_aware_logger(force=False):
    """Make sure process name is recorded when loggers are used."""
    global _process_aware
    if force or not _process_aware:
        logging._acquireLock()
        try:
            _process_aware = True
            Logger = logging.getLoggerClass()
            if getattr(Logger, '_process_aware', False):  # pragma: no cover
                return

            class ProcessAwareLogger(Logger):
                _signal_safe = True
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = Logger.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

                def log(self, *args, **kwargs):
                    if _in_sighandler:
                        return
                    return Logger.log(self, *args, **kwargs)
            logging.setLoggerClass(ProcessAwareLogger)
        finally:
            logging._releaseLock()
コード例 #25
0
ファイル: patch.py プロジェクト: Aaron1011/oh-mainline
def _patch_logger_class():
    """Make sure process name is recorded when loggers are used."""

    try:
        from multiprocessing.process import current_process
    except ImportError:
        current_process = None  # noqa

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, '_process_aware', False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    if current_process:
                        record.processName = current_process()._name
                    else:
                        record.processName = ""
                    return record
            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
コード例 #26
0
    def test_tacker_command_meta_defines_log(self):
        class FakeCommand(tackerV10.TackerCommand):
            pass

        self.assertTrue(helpers.safe_hasattr(FakeCommand, 'log'))
        self.assertIsInstance(FakeCommand.log, logging.getLoggerClass())
        self.assertEqual(FakeCommand.log.name, __name__ + ".FakeCommand")
コード例 #27
0
ファイル: exceptions.py プロジェクト: Goggin/vsc-base
def get_callers_logger():
    """
    Get logger defined in caller's environment
    @return: logger instance (or None if none was found)
    """
    logger_cls = logging.getLoggerClass()
    frame = inspect.currentframe()
    logger = None

    # frame may be None, see https://docs.python.org/2/library/inspect.html#inspect.currentframe
    if frame is not None:
        try:
            # consider calling stack in reverse order, i.e. most inner frame (closest to caller) first
            for frameinfo in inspect.getouterframes(frame)[::-1]:
                bindings = inspect.getargvalues(frameinfo[0]).locals
                for val in bindings.values():
                    if isinstance(val, logger_cls):
                        logger = val
                        break
        finally:
            # make very sure that reference to frame object is removed, to avoid reference cycles
            # see https://docs.python.org/2/library/inspect.html#the-interpreter-stack
            del frame

    return logger
コード例 #28
0
ファイル: subwindows.py プロジェクト: jcornford/pyecog
    def run_pred_feature_extraction(self):
        # grab the settings...

        if self.extraction_thread.isRunning():
            QtGui.QMessageBox.information(self, "Not implemented, lazy!", "Worker thread still running, please wait for previous orders to be finished!")
            return 0

        chunk_len   = int(self.chunk_len_box.text())
        ncores = self.cores_to_use.text()
        use_peaks_bool = self.run_peakdet_checkBox.isChecked()

        if ncores == 'all':
            ncores = -1
        else:
            ncores = int(ncores)

        try:
            logfilepath = logging.getLoggerClass().root.handlers[0].baseFilename
            self.logpath_dsplay.setText(str(logfilepath))
        except:
            print('couldnt get logpath')



        self.extraction_thread.set_params_for_extraction(h5_folder=self.h5directory,
                                            timewindow = chunk_len,
                                            run_peakdet_flag = use_peaks_bool,
                                            n_cores=ncores)
        self.extraction_thread.start()
コード例 #29
0
def create_logger(app):
    """Creates a logger for the given application.  This logger works
    similar to a regular Python logger but changes the effective logging
    level based on the application's debug flag.  Furthermore this
    function also removes all attached handlers in case there was a
    logger with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(x):
            if x.level == 0 and app.debug:
                return DEBUG
            return Logger.getEffectiveLevel(x)

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record) if app.debug else None

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(app.debug_log_format))
    logger = getLogger(app.logger_name)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    logger.addHandler(handler)
    return logger
コード例 #30
0
ファイル: base.py プロジェクト: sharoonthomas/PyUPS
    def create_logger(self):
        """Creates a logger.  This logger works similar to a regular Python
        logger but changes the effective logging level based on the API's
        sandbox flag.
        Furthermore this function also removes all attached handlers in case
        there was a logger with the log name before.

        :copyright: (c) 2010 by Armin Ronacher.
        """
        Logger = getLoggerClass()

        class DebugLogger(Logger):
            def getEffectiveLevel(x):
                return DEBUG if self.sandbox else Logger.getEffectiveLevel(x)

        class DebugHandler(StreamHandler):
            def emit(x, record):
                StreamHandler.emit(x, record) if self.sandbox else None

        handler = DebugHandler()
        handler.setLevel(DEBUG)
        handler.setFormatter(Formatter(self.debug_log_format))
        logger = getLogger(self.logger_name)
        # just in case that was not a new logger, get rid of all the handlers
        # already attached to it.
        del logger.handlers[:]
        logger.__class__ = DebugLogger
        logger.addHandler(handler)
        return logger
コード例 #31
0
 def test_patches(self):
     ensure_process_aware_logger()
     with in_sighandler():
         logging.getLoggerClass().log(get_logger('test'))
コード例 #32
0
def runFullChain(opt, Params, point=None, NRgridPoint=-1, extraLabel=''):
  #print 'Running: ', sys._getframe().f_code.co_name, " Node=",point
  # print sys._getframe().f_code
  PID = os.getpid()

  if opt.verb==0:
    logLvl = logging.ERROR
  elif opt.verb==1:
    logLvl = logging.INFO
  else:
    logLvl = logging.DEBUG

  LTDir_type  = os.getenv("CMSSW_BASE")+Params['LTDIR']
  if '/tmp' in Params['LTDIR'] or '/store' in Params['LTDIR'] or '/afs' in Params['LTDIR']:
    LTDir_type = Params['LTDIR']
    if '/store' in Params['LTDIR']:
      LTDir_type = '/eos/cms'+Params['LTDIR']

  signalModelCard = os.getenv("CMSSW_BASE")+Params['signal']['signalModelCard']
  lumi = 35.87 # Only used for plot produced by bbgg2Dfitter
  energy = str(Params['other']["energy"])
  mass   = Params['other']["higgsMass"]
  addHiggs   = Params['other']["addHiggs"]
  scaleSingleHiggs = Params['other']["scaleSingleHiggs"]
  doBlinding = Params['other']["doBlinding"]
  doBands = Params['other']["doBands"]
  NCAT    = Params['other']["ncat"]
  doBrazilianFlag = Params['other']["doBrazilianFlag"]
  Combinelxbatch = Params['other']['Combinelxbatch']
  doSingleLimit = Params['other']['doSingleLimit']
  drawSignalFit = Params['other']['drawSignalFit']
  doCombine       = Params['other']["runCombine"]
  useSigTheoryUnc = Params['other']["useSigTheoryUnc"]
  HH   = Params['other']["HH"]
  base = Params['other']["base"]
  low  = Params['other']["low"]
  obs  = Params['other']["obs"]
  twotag=Params['other']["twotag"]
  dataName = Params['data']['name']
  combineOpt = Params['other']['combineOption']
  doBias = Params['other']['doBias']
  biasConfig = Params['other']['biasConfig']
  doDoubleSidedCB = Params['other']['doDoubleSidedCB']
  fitStrategy = Params['other']['fitStrategy']

  massCuts = [Params['other']["minMggMassFit"], Params['other']["maxMggMassFit"],
              Params['other']["minMjjMassFit"], Params['other']["maxMjjMassFit"],
              Params['other']["minSigFitMgg"],  Params['other']["maxSigFitMgg"],
              Params['other']["minSigFitMjj"],  Params['other']["maxSigFitMjj"],
              Params['other']["minHigMggFit"],  Params['other']["maxHigMggFit"],
              Params['other']["minHigMjjFit"],  Params['other']["maxHigMjjFit"]]

  if NCAT > 3:
    procLog.error("Error NCAT>3!")
    return __BAD__

  signalTypes = Params['signal']['types']

  if point!=None and NRgridPoint!=-1:
    print 'WARning: cannot have both the Node and grid Point. Chose one and try again'
    return __BAD__
  elif opt.analyticalRW==True:
    Label = "_ARW_"
  elif point!=None:
    Label = "_Node_"+str(point)
  elif NRgridPoint!=-1:
    Label = "_gridPoint_"+str(NRgridPoint)
  else:
    print 'WARning: using list of nodes from the json input file'
    return __BAD__

  sigCat = 0
  isRes = 0
  if point==None:
    sigCat = -1
  elif point == 'SM':
    sigCat = 0
  elif point == 'box':
    sigCat = 1
  elif int(point) > 15:
    sigCat = int(point)
    isRes = 1
    Label.replace("Node", "Mass")
  else:
    sigCat = int(point)

  Label +=  extraLabel

  print "Label=",Label


  if opt.outDir:
    baseFolder="./"+opt.outDir
  else:
    baseFolder="./bbggToolsResults"

  # Create PID file to track the job:
  pidfile = "/tmp/"+__username__+"/PIDs/PoolWorker"+Label+".pid"
  file(pidfile, 'w').write(str(PID))

  procName = current_process().name
  try:
    logging.basicConfig(level=logLvl,
                        format='%(asctime)s PID:%(process)d %(name)-12s %(levelname)-8s %(message)s',
                        datefmt='%m-%d %H:%M',
                        filename=baseFolder+'/logs/processLog_'+str(procName)+Label+'.log',
                        filemode='w')
  except:
    print 'I got excepted!'
    return __BAD__

  procLog = logging.getLogger('Process.Log')

  procLog.info('\n\n New process Log started. PID = %d,  job label: %r\n', PID, Label)
  procLog.info("This log filename = "+logging.getLoggerClass().root.handlers[0].baseFilename)
  procLog.info('Node or Mass=%r  gridPoint=%r  PID=%r \n Options: %s',point, NRgridPoint, PID, pformat(opt))

  start = time.time()


  # For now the mass cuts are all the same, but can be changed in future.
  # ParamsForFits = {'SM': massCuts, 'box': massCuts}

  SignalFile = "/LT_output_GluGluToHHTo2B2G_node_"+str(point)+"_13TeV-madgraph.root"
  if "LT_StrikeBack" in LTDir_type or "MadMax" in LTDir_type or "ttH" in LTDir_type:
      SignalFile = "/LT_output_GluGluToHHTo2B2G_node_"+str(point)+"_13TeV-madgraph_0.root"
  if isRes:
    SignalFile = "/LT_output_GluGluToTYPEToHHTo2B2G_M-"+str(point)+"_narrow_13TeV-madgraph.root"
    if "RES_Mar21" in LTDir_type:
      SignalFile = "/LT_output_GluGluToTYPEToHHTo2B2G_M-"+str(point)+"_narrow_13TeV-madgraph_0.root"
    
  if NRgridPoint >= 0:
    SignalFile = "/LT_NR_Nodes_2to13_merged.root"

  if opt.analyticalRW == True:
    pointStr = "_".join(['kl',str(opt.ARW_kl),'kt',str(opt.ARW_kt),'cg',str(opt.ARW_cg),'c2',str(opt.ARW_c2),'c2g',str(opt.ARW_c2g)]).replace('.', 'p').replace('-', 'm')
    SignalFile="/LT_NR_Nodes_All_merged_"+pointStr+".root"

  procLog.debug('%s, %s', SignalFile, pformat(signalTypes))


  for t in signalTypes:
    newFolder = baseFolder+ str('/'+t+Label)
    thisSignalFile = SignalFile.replace("TYPE", t)

    procLog.info('Type = %s, %s', t, newFolder)

    createDir(newFolder,procLog)

    HLFactoryname= str(t+Label)
    hlf = RooStats.HLFactory(HLFactoryname, signalModelCard, False)
    w = hlf.GetWs()

    theFitter = bbgg2DFitter()
    theStyle = theFitter.style()
    gROOT.SetStyle('hggPaperStyle')

    theFitter.Initialize( w, sigCat, lumi, newFolder, energy, doBlinding, NCAT, addHiggs,
                          massCuts[0],massCuts[1],massCuts[2],
                          massCuts[3],massCuts[4],massCuts[5],
                          massCuts[6],massCuts[7],massCuts[8],
                          massCuts[9],massCuts[10],massCuts[11], NRgridPoint,
                          logging.getLoggerClass().root.handlers[0].baseFilename+'.bbgg2D', opt.analyticalRW)

    theFitter.SetVerbosityLevel(opt.verb)


    if opt.ttHTaggerCut!=None:
      theFitter.SetCut("ttHTagger > "+str(opt.ttHTaggerCut))
      if opt.verb>0:
        procLog.info('Apply the cut on ttHTagger: ' + str(opt.ttHTaggerCut))
        
    if 'HighMass' in t:
      theFitter.SetNCat0(2)
    else:
      theFitter.SetNCat0(0)


    # Fit strategies. 1: 1D - m(gg); 2: 2D - m(gg),m(jj)
    if fitStrategy not in [1,2]:
      print "Fit strategy is not supported:", fitStrategy
      return __BAD__
    else:
      procLog.info('Setting fit strategy to: %r', fitStrategy)
      theFitter.SetFitStrategy(fitStrategy)
    if fitStrategy==1:
      theFitter.SetCut("mjj > 100 && mjj < 140")

    if opt.verb>0:
      procLog.info('Using Double Sided Crystal Ball as Signal Model: %r', doDoubleSidedCB)
    if doDoubleSidedCB: theFitter.UseDoubleSidedCB()

    LTDir = LTDir_type.replace('TYPE', t)

    mass = 125.0
    if opt.verb>0:
      procLog.info('Signal File:\n'+LTDir+thisSignalFile)

    if not os.path.isfile(LTDir+thisSignalFile):
      print 'File does not exist: ', LTDir+thisSignalFile
      return __BAD__

    openStatus = theFitter.AddSigData( mass, str(LTDir+thisSignalFile))
    if openStatus==-1:
      procLog.error('There is a problem with openStatus')
      return __BAD__
    procLog.info("\t SIGNAL ADDED. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p1 = printTime(start, start, procLog)

    createDir(newFolder+'/workspaces',procLog)
    createDir(newFolder+'/datacards',procLog)

    theFitter.SigModelFit(mass)
    procLog.info("\t SIGNAL FITTED. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p2 = printTime(p1,start, procLog)

    fileBaseName = "hhbbgg.mH"+str(mass)[0:3]+"_13TeV"
    theFitter.MakeSigWS( fileBaseName)
    procLog.info("\t SIGNAL'S WORKSPACE DONE. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p3 = printTime(p2,start,procLog)

    if drawSignalFit:
      theFitter.MakePlots( mass)
      procLog.info("\t SIGNAL'S PLOT DONE. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
      if opt.verb>0: p4 = printTime(p3,start,procLog)

    if addHiggs:
      higTypes = Params['higgs']['type']
      if opt.verb>1:
        procLog.debug('Here will add SM Higgs contributions \n Higgs types: '+ pformat(higTypes))
      higgsExp = {}
      for iht,HT in enumerate(higTypes):
        higgsExp[HT] = [0,0]
        ht = higTypes[HT]
        if opt.verb>1:
          procLog.debug('iht = %r, ht = %r, HT = %r' % (iht,ht,HT))
        higFileName = str(LTDir)+"/LT_output_"+str(ht)+".root"

        exphig = theFitter.AddHigData( mass,higFileName,iht, str(HT))
        theFitter.HigModelFit(mass,iht, str(HT) )
        theFitter.MakeHigWS(str('hhbbgg.')+str(HT), iht, str(HT))

        higgsExp[HT] = [exphig[0], exphig[1]]

      if opt.verb>1:
        procLog.debug("Done SM Higgs bzz")

    ddata = str(LTDir + '/LT_'+dataName+'.root')
    ddata = ddata.replace("%MASS%", str(point))

    theFitter.AddBkgData(ddata)
    procLog.info("\t BKG ADDED. Node=%r, GridPoint=%r, type=%r, data file=%s", point,NRgridPoint,t,ddata)
    if opt.verb>0: p4 = printTime(p3,start, procLog)

    if opt.verb>1:
      theFitter.PrintWorkspace();

    fitresults = theFitter.BkgModelFit( doBands, addHiggs)
    procLog.info("\t BKG FITTED. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p5 = printTime(p4,start,procLog)
    if fitresults==None:
      procLog.error("PROBLEM with fitresults !!")
      return __BAD__

    if opt.verb>1:
      procLog.debug("\n Fit Results: \n\n"+pformat(fitresults.Print()))

    wsFileBkgName = "hhbbgg.inputbkg_13TeV"
    theFitter.MakeBkgWS( wsFileBkgName);
    procLog.info("\t BKG'S WORKSPACE DONE. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p6 = printTime(p5,start,procLog)

    ##do fits for bias study, if needed

    procLog.info("\t Making Fits and WS for Bias Study? * %r *  Node=%r, GridPoint=%r, type=%r", doBias, point,NRgridPoint,t)
    if doBias:
      createDir(newFolder+'/bias',procLog)
      theFitter.MakeFitsForBias(str(os.getenv("CMSSW_BASE")+'/src/HiggsAnalysis/bbggLimits/'+biasConfig), str(newFolder+'/bias/biasWorkspace.root'))

    procLog.info("\t BIAS FITS DONE. Node=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p7 = printTime(p6,start,procLog)

    # print PID, "IM HERE"

    sigExp = []
    bkgObs = []
    for cc in xrange(NCAT):
      sigExp.append(-1)
      bkgObs.append(-1)

    sigExpStr = ''
    bkgObsStr = ''
    for cc in xrange(NCAT):
      sigExp[cc] = theFitter.GetSigExpectedCats(cc);
      if not doBlinding:
        bkgObs[cc] = theFitter.GetObservedCats(cc);

      sigExpStr += "%f" % sigExp[cc]
      bkgObsStr += "%f" % bkgObs[cc]
      if cc < NCAT-1:
        sigExpStr += ","
        bkgObsStr += ","

    # print PID, "IM HERE2"

    # Make datacards:
    myLoc = os.getenv("CMSSW_BASE") + '/src/HiggsAnalysis/bbggLimits/'+newFolder
    if isRes==1:
      DataCardMaker(str(myLoc), NCAT, sigExpStr, bkgObsStr, isRes)
    elif addHiggs == 0:
      DataCardMaker(str(myLoc), NCAT, sigExpStr, bkgObsStr, isRes, t)
    else:
      DataCardMaker_wHiggs(str(myLoc), NCAT, sigExpStr, bkgObsStr, higgsExp, t)

    procLog.info("\t DATACARD DONE. Node/Mass=%r, GridPoint=%r, type=%r", point,NRgridPoint,t)
    if opt.verb>0: p8 = printTime(p7,start,procLog)

    # print PID, "IM HERE3"
    # Limits by type:
    if doSingleLimit or isRes:
      # print PID, "IM HERE4"
      if doCombine:
        # print PID, "IM HERE5"
        if Combinelxbatch:
          # print PID, "IM HERE6"
          runCombineOnLXBatch(myLoc+"/datacards/", doBlinding, procLog, combineOpt, t+Label)
        else:
          # print PID, "IM HERE7"
          runCombine(newFolder+"/datacards/", doBlinding, procLog, combineOpt, Combinelxbatch, t+Label)



    # End of loop over Types
  ## <-- indent

  #Nonresonant data card massaging...
  if not isRes:
    # Here we merge datacars of all categories (in this case two)
    cardsToMerge = ''
    for t in signalTypes:
      cardsToMerge += baseFolder+'/'+t+Label+'/datacards/hhbbgg_13TeV_DataCard.txt '

    newDir = baseFolder+'/CombinedCard'+Label
    createDir(newDir,procLog)

    combCard = newDir+'/hhbbgg_13TeV_DataCard.txt'
    os.system("combineCards.py "+ cardsToMerge + " > " + combCard)

    # Now we actually need to fix the combined card
    for t in signalTypes:
      strReplace = baseFolder+'/'+t+Label+'/datacards/'+os.getenv("CMSSW_BASE")+'/src/HiggsAnalysis/bbggLimits/'
      os.system("sed -i 's|"+strReplace+"||g' "+combCard)
      procLog.info("String to replace: "+ strReplace)

    if opt.analyticalRW:
      # Make another datacard, with entries for kt-reweihgting of single Higgs background
      ktScaled = newDir+'/kt_scaled_hhbbgg_13TeV_DataCard.txt'
      copy2(combCard, ktScaled)
      with open(ktScaled, "a") as myfile:
        HigScale = opt.ARW_kt*opt.ARW_kt
        appendString = '\n \n'
        appendString+= 'h_norm_ggh rateParam * ggh %.4f \n' % HigScale
        appendString+= 'nuisance edit freeze h_norm_ggh \n'
        appendString+= 'h_norm_tth rateParam * tth %.4f \n' % HigScale
        appendString+= 'nuisance edit freeze h_norm_tth \n'
        myfile.write(appendString)
        
    if doCombine:
      if Combinelxbatch:
        # print PID, "IM HERE6"
        myLoc = os.getenv("CMSSW_BASE") + '/src/HiggsAnalysis/bbggLimits/' + newDir
        runCombineOnLXBatch(myLoc+"/", doBlinding, procLog, combineOpt, "CombinedCard"+Label)
      else:
        for method in [1,2,3]:
          # If options 1,2,3 are provided - run the corresponding limits:
          # 1 - asymptotic, 2 - asymptotoc with adaptive azimov option; 3 - hybridnew
          # If combineOpt==4: run all of them at once
          if combineOpt!=4 and method!=combineOpt: continue
          try:
            combStatus = runCombine(newDir, doBlinding, procLog, method, Combinelxbatch, Label, scaleSingleHiggs)
          except:
            return __BAD__
          procLog.info("\t COMBINE with Option=%r is DONE. Node=%r, GridPoint=%r, type=%r \n \t Status = %r",
                       method, point,NRgridPoint,t, combStatus)
          if combStatus!=0:
            procLog.error('Combine failed...')
            # return __BAD__


  if opt.verb>0: p9 = printTime(p8,start,procLog)
  os.remove(pidfile)
  # procLog.handlers = []
  procLog.info('This process has ended. Label=%r', Label)
  return 42
コード例 #33
0
                             attrs=attr)
            if hasattr(record, 'highlight') and record.highlight:
                record.msg = colored(record.msg,
                                     color,
                                     attrs=['bold', 'reverse'])
        else:
            prefix = str('[' + record.levelname + ']').ljust(18)

        record.msg = prefix + record.msg

        logging.StreamHandler.emit(self, record)


class ConsoleLogger(logging.Logger):
    """Log to the console with some color decorations."""
    def __init__(self, name):
        super(ConsoleLogger, self).__init__(name)
        self.setLevel(logging.DEBUG)
        self.addHandler(ColorStreamHandler(sys.stdout))


# Save the current logger
default_logger_class = logging.getLoggerClass()

# Console logging for CLI
logging.setLoggerClass(ConsoleLogger)
console = logging.getLogger('zap')

# Restore the previous logger
logging.setLoggerClass(default_logger_class)
コード例 #34
0
ファイル: log.py プロジェクト: msimonin/execo
# Copyright 2009-2017 INRIA Rhone-Alpes, Service Experimentation et
# Developpement
#
# This file is part of Execo.
#
# Execo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Execo is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Execo.  If not, see <http://www.gnu.org/licenses/>

import logging
import execo.log  #IGNORE:W0611 #@PydevCodeAnalysisIgnore #@UnusedImport
__default_logger = logging.getLoggerClass()
logging.setLoggerClass(execo.log.Logger)
logger = logging.getLogger("execo.engine")
logging.setLoggerClass(__default_logger)
コード例 #35
0
ファイル: test_logging.py プロジェクト: Ginobilium/dax
 def test_get_logger_class(self):
     dax.util.logging.decorate_logger_class(logging.getLoggerClass())
     self.assertTrue(is_rpc_logger(logging.getLoggerClass()))
コード例 #36
0
class TraceLogger(logging.getLoggerClass()):
    def trace(self, msg, *args, **kwargs):
        self.log(TRACE, msg, *args, **kwargs)
コード例 #37
0
class Logger(logging.getLoggerClass()):
    """
    Core logger for all apps and scripts. This logger adds the following features
    to the default:

    - Colors!
    - Cooler line format
    - Function is included in the log format
    - File log
    - File rotation every 5M with 100 files kept
    """

    LOGDIR = "/var/log"
    """Log directory, it will be created if it does not exist"""

    MAXBYTES = 10000000
    """Default maximum file size allowed, after that rotation is taking place"""

    BACKUPCOUNT = 20
    """Default number of backup files to keep"""

    DATEFORMAT = "%d-%m-%Y %H:%M:%S"
    """Date output format"""

    LOGFORMAT = "%(asctime)s.%(msecs)03d %(process)s:%(thread)u %(levelname)-8s %(module)15.15s %(lineno)-4s: %(message)s"
    """Default log format for all handlers. This can change in :py:meth:`init`"""

    @classmethod
    def _chkdir(cls):
        """
        Check that the loggin dir exists and create it if not
        """
        if not os.path.isdir(cls.LOGDIR):
            try:
                mkdir_p(cls.LOGDIR)
            except BaseException:
                # Do not log... logging here enables the
                # root logger...
                pass

    @classmethod
    def addFileLogger(cls, specs):
        """
        Add a file logger with the given specs. Specs::

            {
                'filename': Filename (under LOGDIR)
                'level': Logging level for this file
                'format': [ 'json' | 'console' | 'default' ] # TODO: CSV
                'backupCount': Number of files to keep
                'maxBytes': Maximum file size
            }

        If format is set to "console", then ColorFormatter options are also
        supported.
        """

        root = logging.getLogger()

        if "filename" not in specs:
            raise RuntimeError('"filename" missing from file specs... skipping!')

        if "level" not in specs:
            specs["level"] = logging.INFO

        filePath = cls.LOGDIR + "/" + specs["filename"]

        # Test that we can access the file
        try:
            # Make sure the file is there
            fhandle = open(filePath, "a")
            fhandle.close()
        except BaseException:
            logging.error(
                "NOT Initializing File Logging: File-system permissions error: \n\t%s"
                % filePath
            )
            # No point doing anything else! BUT do not raise exception
            return

        # If we just created it, set correct permissions so that is group accessible
        # This often crashes apps in multiuser environmnets
        try:
            # Correct permissions
            perms = (
                stat.S_IREAD
                | stat.S_IWRITE
                | stat.S_IWOTH
                | stat.S_IROTH
                | stat.S_IWGRP
                | stat.S_IRGRP
            )
            os.chmod(filePath, perms)
        except Exception as e:
            # Error here can appear if another user owns the file...
            pass

        # Register the rotating file handler
        rotFileH = logging.handlers.RotatingFileHandler(
            filePath,
            backupCount=specs.get("backupCount", cls.BACKUPCOUNT),
            maxBytes=specs.get("maxBytes", cls.MAXBYTES),
        )

        fmt = specs.get("fmt", cls.USER_LOGFORMAT)
        datefmt = specs.get("datefmt", cls.USER_DATEFORMAT)

        formatter = logging.Formatter(fmt, datefmt=datefmt)
        if "format" not in specs or specs["format"] == "console":
            specs = ColorFormatter.parseSpecs(specs, ColorFormatter.FILEDEFAULTS)
            formatter = ColorFormatter(
                fmt,
                datefmt=datefmt,
                color=False,
                pretty=specs["pretty"],
                splitLines=specs["splitLines"],
            )

        elif specs["format"] == "default":
            pass
        elif specs["format"] == "json":
            formatter = JSONFormatter(specs.get("fields", JSONFormatter.FIELDS))

        rotFileH.setFormatter(formatter)
        rotFileH.setLevel(specs["level"])
        rotFileH.propagate = False
        root.addHandler(rotFileH)

    @classmethod
    def init(
        cls,
        folder="/var/log/lazylog",
        termSpecs={},
        fileSpecs=None,
        fmt=None,
        datefmt=None,
    ):
        """
        Initialize logging based on the requested fileName. This
        will create one logger (global) that writes in both file
        and terminal

        :param str fileSpecs: A dict with 'filename', 'level', etc. See addFileLogger
                              for details
        :param int termSpecs: A dict with boolean values for 'color' and 'splitLines'
        """

        logging.setLoggerClass(cls)

        cls.LOGDIR = folder

        if fmt is None:
            fmt = cls.LOGFORMAT

        if datefmt is None:
            datefmt = cls.DATEFORMAT

        # Store those statically as the last user supplied format. This will be
        # used from now on if format is not specified
        cls.USER_LOGFORMAT = fmt
        cls.USER_DATEFORMAT = datefmt

        # Check folder and create if needed
        cls._chkdir()

        # Merge with defaults...
        termSpecs = ColorFormatter.parseSpecs(termSpecs, ColorFormatter.TERMDEFAULTS)

        # Disable default logger
        root = logging.getLogger()
        root.setLevel(logging.DEBUG)
        root.handlers = []

        # Console logger
        console = logging.StreamHandler()
        console.setLevel(termSpecs["level"])
        if "format" not in termSpecs:
            formatter = ColorFormatter(
                fmt,
                datefmt=datefmt,
                color=termSpecs["color"],
                splitLines=termSpecs["splitLines"],
                pretty=termSpecs["pretty"],
            )
        elif termSpecs["format"] == "json":
            formatter = JSONFormatter(termSpecs.get("fields", JSONFormatter.FIELDS))
        console.setFormatter(formatter)
        console.propagate = False
        root.addHandler(console)

        # File logger
        if fileSpecs is not None:
            for specs in fileSpecs:
                cls.addFileLogger(specs)

    @staticmethod
    def logFun():
        """Print one message in each level to demo the colours"""
        logging.debug("All set in logger!")
        logging.info("This is how INFO lines are")
        logging.warning("Warnings here")
        logging.critical("Critical stuff appear like this")
        logging.error("Look out for ERRORs")

    @staticmethod
    def setConsoleLevel(level):
        """
        In this logger, by convention, handler 0 is always the console halder.
        """
        logging.getLogger().handlers[0].setLevel(level)

    @staticmethod
    def setFileLevel(filenum, level):
        """
        Set a file logger log level. Filenum is 1,2,3,.. in the order the
        files have been added.
        """
        if len(logging.getLogger().handlers) < filenum + 1:
            return

        logging.getLogger().handlers[filenum].setLevel(level)

    @staticmethod
    def mockHandler(index):
        if len(logging.getLogger().handlers) < index + 1:
            return

        h = logging.getLogger().handlers[index]

        h.old_stream = h.stream
        h.stream = StringIO()
        return h.stream

    @staticmethod
    def restoreHandler(index):
        if len(logging.getLogger().handlers) < index + 1:
            return

        h = logging.getLogger().handlers[index]

        if not hasattr(h, "old_stream"):
            return

        h.stream = h.old_stream
        del h.old_stream
コード例 #38
0
ファイル: service_setup.py プロジェクト: tzmvp/st2
def setup(service,
          config,
          setup_db=True,
          register_mq_exchanges=True,
          register_signal_handlers=True,
          register_internal_trigger_types=False,
          run_migrations=True,
          register_runners=True,
          config_args=None):
    """
    Common setup function.

    Currently it performs the following operations:

    1. Parses config and CLI arguments
    2. Establishes DB connection
    3. Set log level for all the loggers to DEBUG if --debug flag is present or
       if system.debug config option is set to True.
    4. Registers RabbitMQ exchanges
    5. Registers common signal handlers
    6. Register internal trigger types
    7. Register all the runners which are installed inside StackStorm virtualenv.

    :param service: Name of the service.
    :param config: Config object to use to parse args.
    """
    # Set up logger which logs everything which happens during and before config
    # parsing to sys.stdout
    logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)

    # Parse args to setup config.
    if config_args:
        config.parse_args(config_args)
    else:
        config.parse_args()

    version = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
                            sys.version_info[2])
    LOG.debug('Using Python: %s (%s)' % (version, sys.executable))

    config_file_paths = cfg.CONF.config_file
    config_file_paths = [os.path.abspath(path) for path in config_file_paths]
    LOG.debug('Using config files: %s', ','.join(config_file_paths))

    # Setup logging.
    logging_config_path = config.get_logging_config_path()
    logging_config_path = os.path.abspath(logging_config_path)

    LOG.debug('Using logging config: %s', logging_config_path)

    is_debug_enabled = (cfg.CONF.debug or cfg.CONF.system.debug)

    try:
        logging.setup(logging_config_path,
                      redirect_stderr=cfg.CONF.log.redirect_stderr,
                      excludes=cfg.CONF.log.excludes)
    except KeyError as e:
        tb_msg = traceback.format_exc()
        if 'log.setLevel' in tb_msg:
            msg = 'Invalid log level selected. Log level names need to be all uppercase.'
            msg += '\n\n' + getattr(e, 'message', str(e))
            raise KeyError(msg)
        else:
            raise e

    exclude_log_levels = [stdlib_logging.AUDIT]
    handlers = stdlib_logging.getLoggerClass().manager.root.handlers

    for handler in handlers:
        # If log level is not set to DEBUG we filter out "AUDIT" log messages. This way we avoid
        # duplicate "AUDIT" messages in production deployments where default service log level is
        # set to "INFO" and we already log messages with level AUDIT to a special dedicated log
        # file.
        ignore_audit_log_messages = (handler.level >= stdlib_logging.INFO
                                     and handler.level < stdlib_logging.AUDIT)
        if not is_debug_enabled and ignore_audit_log_messages:
            LOG.debug(
                'Excluding log messages with level "AUDIT" for handler "%s"' %
                (handler))
            handler.addFilter(LogLevelFilter(log_levels=exclude_log_levels))

    if not is_debug_enabled:
        # NOTE: statsd logger logs everything by default under INFO so we ignore those log
        # messages unless verbose / debug mode is used
        logging.ignore_statsd_log_messages()

    logging.ignore_lib2to3_log_messages()

    if is_debug_enabled:
        enable_debugging()
    else:
        # Add global ignore filters, such as "heartbeat_tick" messages which are logged every 2
        # ms which cause too much noise
        add_global_filters_for_all_loggers()

    if cfg.CONF.profile:
        enable_profiling()

    # All other setup which requires config to be parsed and logging to be correctly setup.
    if setup_db:
        db_setup()

    if register_mq_exchanges:
        register_exchanges_with_retry()

    if register_signal_handlers:
        register_common_signal_handlers()

    if register_internal_trigger_types:
        triggers.register_internal_trigger_types()

    # TODO: This is a "not so nice" workaround until we have a proper migration system in place
    if run_migrations:
        run_all_rbac_migrations()

    if register_runners:
        runnersregistrar.register_runners()

    register_kombu_serializers()

    metrics_initialize()
コード例 #39
0
class ChangeLogger(logging.getLoggerClass()):
    def change(self, msg, *args, **kwargs):
        if self.isEnabledFor(logging.CHANGE):
            self._log(logging.CHANGE, msg, args, **kwargs)
コード例 #40
0
ファイル: networking.py プロジェクト: Vikash84/barapost
def wait_for_align(rid, rtoe, pack_to_send, filename):
    # Function waits untill BLAST server accomplishes the request.
    #
    # :param rid: Request ID to wait for;
    # :type rid: str;
    # :param rtoe: time in seconds estimated by BLAST server needed to accomplish the request;
    # :type rtoe: int;
    # :param pack_to_send: current packet (id) number to send;
    # :type pack_to_send: int;
    # :param filename: basename of current FASTA file;
    # :type filename: str
    #
    # Returns XML response ('str').

    print()
    print("Requesting for current query status. Request ID: {}".format(rid))
    print(" `{}`; Submission #{}".format(filename, pack_to_send[0]))
    log_info("Requesting for current query status.")
    log_info("Request ID: {}; `{}`; Submission #{}".format(
        rid,
        filename,
        pack_to_send[0],
    ))
    # RTOE can be zero at the very beginning of resumption
    if rtoe > 0:

        printlog_info_time(
            "BLAST server estimates that alignment will be accomplished in {} seconds"
            .format(rtoe))
        printlog_info_time(
            "Waiting for {}+3 (+3 extra) seconds...".format(rtoe))
        # Server migth be wrong -- we will give it 3 extra seconds
        sleep(rtoe + 3)
        printlog_info_time(
            "{} seconds have passed. Checking if alignment is accomplished...".
            format(rtoe + 3))
    # end if

    server = "blast.ncbi.nlm.nih.gov"
    wait_url = "/blast/Blast.cgi?CMD=Get&FORMAT_OBJECT=SearchInfo&RID=" + rid

    whtspc_len = 6 + len("(requesting)")

    while True:
        resp_content = lingering_https_get_request(server, wait_url,
                                                   "BLAST response")

        # if server asks to wait
        if "Status=WAITING" in resp_content:
            printn("\r{} - The request is being processed. Waiting{}{}".format(
                getwt(), ' ' * whtspc_len, "\033[%dD" % whtspc_len))
            # indicate each 20 seconds with a dot
            for i in range(1, 7):
                sleep(10)
                printn(
                    "\r{} - The request is being processed. Waiting{}".format(
                        getwt(), '.' * i))
            # end for
            printn("(requesting)")
            continue
        elif "Status=FAILED" in resp_content:
            # if job failed
            print()
            printlog_info_time("Job failed\a\n")
            printlog_info("Resending this packet.")
            return None, BlastError(2)
        elif "Status=UNKNOWN" in resp_content:
            # if job expired
            print()
            printlog_info_time("Job expired\a\n")
            printlog_info("Resending this packet.")
            return None, BlastError(1)
        # if results are ready
        elif "Status=READY" in resp_content:
            print()
            printlog_info("Result for query `{}` #{} is ready!".format(
                filename, pack_to_send[0]))
            # if there are hits
            if "ThereAreHits=yes" in resp_content:
                for i in range(15, 0, -5):
                    print('-' * i)
                # end for
                print("-\nRetrieving results...")

                # Retrieve human-readable text and put it into result directory
                retrieve_text_url = "/Blast.cgi?CMD=Get&FORMAT_TYPE=Text&DESCRIPTIONS=1&ALIGNMENTS=1&RID=" + rid
                txt_align_res = lingering_https_get_request(
                    server, retrieve_text_url,
                    "text version of BLAST response")

                # Count already existing plain text files in outdir:
                is_txt_response = lambda f: not re.search(
                    r"prober_blast_response_[0-9]+\.txt", f) is None
                outdir_path = os.path.dirname(logging.getLoggerClass(
                ).root.handlers[0].baseFilename)  # tricky trick
                response_num = len(
                    tuple(filter(is_txt_response, os.listdir(outdir_path))))

                # Curent txt response file will have number `response_num+1`
                txt_hpath = os.path.join(
                    outdir_path,
                    "prober_blast_response_{}.txt".format(response_num + 1))
                # Write text result for a human to read
                with open(txt_hpath, 'w') as txt_file:
                    txt_file.write(txt_align_res)
                # end with
            elif "ThereAreHits=no" in resp_content:
                # if there are no hits
                printlog_info_time("There are no hits. It happens.\n")
            else:
                # probably, job is failed if execution reaches here
                print()
                printlog_info_time("Job failed\a\n")
                printlog_info("Resending this packet.")
                return None, BlastError(2)
            # end if
            break
        # end if
        # Execution should not reach here
        printlog_error_time(
            "Fatal error (-122). Please contact the developer.\a\n")
        platf_depend_exit(-122)
    # end while

    # Retrieve XML result
    retrieve_xml_url = "/Blast.cgi?CMD=Get&FORMAT_TYPE=XML&ALIGNMENTS=1&RID=" + rid
    xml_text = lingering_https_get_request(server, retrieve_xml_url,
                                           "XML BLAST response")

    if "Bad Gateway" in xml_text:
        print()
        printlog_info_time("Bad Gateway. Data from last packet has been lost.")
        printlog_info("Resending this packet.")
        return None, BlastError(1)

    elif "Status=FAILED" in xml_text:
        print()
        printlog_info_time("BLAST error: request failed")
        printlog_info("Resending this packet.")
        return None, BlastError(2)

    elif "to start it again" in xml_text:
        print()
        printlog_info_time("BLAST error")
        printlog_info("Resending this packet.")
        return None, BlastError(2)

    elif "[blastsrv4.REAL]" in xml_text:
        blastsrv4_match = re.search(r"(\[blastsrv4\.REAL\].*\))", xml_text)
        blastsrv4_str = "" if blastsrv4_match is None else ": {}".format(
            blastsrv4_match.group(1))
        printlog_info_time("BLAST server error{}".format(blastsrv4_str))
        # Error code 2 indicated that we need to split packet and resubmit
        return None, BlastError(2)
    # end if

    return xml_text, BlastError(0)
コード例 #41
0
ファイル: test_stdlib.py プロジェクト: if-fi/structlog
 def setup_method(self, method):
     """
     The stdlib logger factory modifies global state to fix caller
     identification.
     """
     self.original_logger = logging.getLoggerClass()
コード例 #42
0
ファイル: test_stdlib.py プロジェクト: if-fi/structlog
 def test_sets_correct_logger(self):
     assert logging.getLoggerClass() is logging.Logger
     LoggerFactory()
     assert logging.getLoggerClass() is _FixedFindCallerLogger
コード例 #43
0
 def dbnd_airflow_handler(self):
     return DbndAirflowHandler(
         logger=Mock(logging.getLoggerClass()),
         local_base="/logger_base",
         log_file_name_factory=airflow_log_factory,
     )
コード例 #44
0
def _parse_args_and_run_subcommand(argv):
    parser = ArgumentParser(
        prog="anaconda-project",
        description="Actions on projects (runnable projects).")

    subparsers = parser.add_subparsers(help="Sub-commands")

    parser.add_argument('-v', '--version', action='version', version=version)
    parser.add_argument('--verbose',
                        action='store_true',
                        default=False,
                        help="show verbose debugging details")

    def add_directory_arg(preset):
        preset.add_argument(
            '--directory',
            metavar='PROJECT_DIR',
            default='.',
            help=
            "Project directory containing anaconda-project.yml (defaults to current directory)"
        )

    def add_env_spec_arg(preset):
        preset.add_argument(
            '--env-spec',
            metavar='ENVIRONMENT_SPEC_NAME',
            default=None,
            action='store',
            help="An environment spec name from anaconda-project.yml")

    def add_prepare_args(preset, include_command=True):
        add_directory_arg(preset)
        add_env_spec_arg(preset)
        all_supported_modes = list(_all_ui_modes)
        # we don't support "ask about every single thing" mode yet.
        all_supported_modes.remove(UI_MODE_TEXT_ASK_QUESTIONS)
        preset.add_argument('--mode',
                            metavar='MODE',
                            default=UI_MODE_TEXT_DEVELOPMENT_DEFAULTS_OR_ASK,
                            choices=_all_ui_modes,
                            action='store',
                            help="One of " + ", ".join(_all_ui_modes))
        if include_command:
            preset.add_argument(
                '--command',
                metavar='COMMAND_NAME',
                default=None,
                action='store',
                help=
                "A command name from anaconda-project.yml (env spec for this command will be used)"
            )

    def add_env_spec_name_arg(preset, required):
        preset.add_argument(
            '-n',
            '--name',
            metavar='ENVIRONMENT_SPEC_NAME',
            required=required,
            action='store',
            help="Name of the environment spec from anaconda-project.yml")

    preset = subparsers.add_parser(
        'init',
        help="Initialize a directory with default project configuration")
    add_directory_arg(preset)
    preset.add_argument(
        '--with-anaconda-package',
        action='store_true',
        help="Add the 'anaconda' metapackage to the packages list.",
        default=None)
    preset.add_argument(
        '--empty-environment',
        action='store_true',
        help=
        "[DEPRECATED] Do not add the default package set to the environment.",
        default=None)
    preset.add_argument('-y',
                        '--yes',
                        action='store_true',
                        help="Assume yes to all confirmation prompts",
                        default=None)
    preset.set_defaults(main=init.main)

    preset = subparsers.add_parser(
        'run', help="Run the project, setting up requirements first")
    add_prepare_args(preset, include_command=False)
    preset.add_argument('command',
                        metavar='COMMAND_NAME',
                        default=None,
                        nargs='?',
                        help="A command name from anaconda-project.yml")
    preset.add_argument('extra_args_for_command',
                        metavar='EXTRA_ARGS_FOR_COMMAND',
                        default=None,
                        nargs=REMAINDER)
    preset.set_defaults(main=run.main)

    preset = subparsers.add_parser(
        'prepare',
        help="Set up the project requirements, but does not run the project")
    preset.add_argument('--all',
                        action='store_true',
                        help="Prepare all environments",
                        default=None)
    preset.add_argument('--refresh',
                        action='store_true',
                        help='Remove and recreate the environment',
                        default=None)
    add_prepare_args(preset)
    preset.set_defaults(main=prepare.main)

    preset = subparsers.add_parser(
        'clean',
        help=
        "Removes generated state (stops services, deletes environment files, etc)"
    )
    add_directory_arg(preset)
    preset.set_defaults(main=clean.main)

    if not anaconda_project._beta_test_mode:
        preset = subparsers.add_parser(
            'activate',
            help=
            "Set up the project and output shell export commands reflecting the setup"
        )
        add_prepare_args(preset)
        preset.set_defaults(main=activate.main)

    preset = subparsers.add_parser(
        'archive',
        help=
        "Create a .zip, .tar.gz, or .tar.bz2 archive with project files in it")
    add_directory_arg(preset)
    preset.add_argument('filename', metavar='ARCHIVE_FILENAME')
    preset.add_argument('--pack-envs',
                        action='store_true',
                        help='Experimental: Package env_specs into the archive'
                        ' using conda-pack')

    preset.set_defaults(main=archive.main)

    preset = subparsers.add_parser(
        'unarchive',
        help=
        "Unpack a .zip, .tar.gz, or .tar.bz2 archive with project files in it")
    preset.add_argument('filename', metavar='ARCHIVE_FILENAME')
    preset.add_argument('directory',
                        metavar='DESTINATION_DIRECTORY',
                        default=None,
                        nargs='?')

    preset.set_defaults(main=unarchive.main)

    preset = subparsers.add_parser('upload',
                                   help="Upload the project to Anaconda Cloud")
    add_directory_arg(preset)
    preset.add_argument('-p',
                        '--private',
                        action='store_true',
                        help="Upload a private project",
                        default=None)
    preset.add_argument('-s',
                        '--site',
                        metavar='SITE',
                        help='Select site to use')
    preset.add_argument(
        '-t',
        '--token',
        metavar='TOKEN',
        help='Auth token or a path to a file containing a token')
    preset.add_argument('-u',
                        '--user',
                        metavar='USERNAME',
                        help='User account, defaults to the current user')
    preset.add_argument(
        '--suffix',
        metavar='SUFFIX',
        help='Project archive suffix (.tar.gz, .tar.bz2, .zip)',
        default='.tar.bz2',
        choices=['.tar.gz', '.tar.bz2', '.zip'])
    preset.set_defaults(main=upload.main)

    preset = subparsers.add_parser(
        'download', help="Download the project from Anaconda Cloud")
    add_directory_arg(preset)
    preset.add_argument(
        'project',
        help=
        'The project to download as <username>/<projectname>. If <projectname>'
        +
        'has spaces inclose everything in quotes "<username>/<project name>".'
        + 'If specified as <projectname> then the logged-in username is used.')
    preset.add_argument('--no-unpack',
                        action='store_true',
                        help='Do not unpack the project archive.')
    preset.add_argument(
        '--parent_dir',
        default=None,
        help=
        'Download archive to specific directory, otherwise downloaded to current working directory.'
    )
    preset.add_argument('-s',
                        '--site',
                        metavar='SITE',
                        help='Select site to use')
    preset.add_argument(
        '-t',
        '--token',
        metavar='TOKEN',
        help='Auth token or a path to a file containing a token')
    preset.add_argument('-u',
                        '--user',
                        metavar='USERNAME',
                        help='User account, defaults to the current user')
    preset.set_defaults(main=download.main)

    preset = subparsers.add_parser(
        'dockerize', help="Build a docker image of the Anaconda Project.")
    add_directory_arg(preset)
    preset.add_argument(
        '-t',
        '--tag',
        default=None,
        help='Tag of the output docker image in the format name:tag. '
        'Default: "<project-name>:latest", where <project-name> is taken from '
        'the name tag in the anaconda-project.yml file.')
    preset.add_argument(
        '--command',
        default='default',
        help=
        'Select the command to run. If unspecified the "default" command is run.\nThe default command '
        'is defined as either the command named "default" (if any) or (otherwise)  '
        'the first command specified in the anaconda-project.yml file.')
    preset.add_argument('--builder-image',
                        default='{}:latest'.format(DEFAULT_BUILDER_IMAGE),
                        help='The s2i builder image')
    preset.add_argument(
        'build_args',
        default=None,
        nargs="*",
        help='Optional arguments for the s2i build command. '
        'See the output of "s2i build --help" for the available arguments. '
        'It is recommended to include a -- separator before supplying these arguments.'
    )
    preset.set_defaults(main=dockerize.main)

    preset = subparsers.add_parser(
        'add-variable',
        help="Add a required environment variable to the project")
    add_env_spec_arg(preset)
    preset.add_argument('vars_to_add',
                        metavar='VARS_TO_ADD',
                        default=None,
                        nargs=REMAINDER)
    preset.add_argument('--default',
                        metavar='DEFAULT_VALUE',
                        default=None,
                        help='Default value if environment variable is unset')
    add_directory_arg(preset)
    preset.set_defaults(main=variable_commands.main_add)

    preset = subparsers.add_parser(
        'remove-variable',
        help="Remove an environment variable from the project")
    add_env_spec_arg(preset)
    add_directory_arg(preset)
    preset.add_argument('vars_to_remove',
                        metavar='VARS_TO_REMOVE',
                        default=None,
                        nargs=REMAINDER)
    preset.set_defaults(main=variable_commands.main_remove)

    preset = subparsers.add_parser('list-variables',
                                   help="List all variables on the project")
    add_env_spec_arg(preset)
    add_directory_arg(preset)
    preset.set_defaults(main=variable_commands.main_list)

    preset = subparsers.add_parser(
        'set-variable',
        help="Set an environment variable value in anaconda-project-local.yml")
    add_env_spec_arg(preset)
    preset.add_argument('vars_and_values',
                        metavar='VARS_AND_VALUES',
                        default=None,
                        nargs=REMAINDER)
    add_directory_arg(preset)
    preset.set_defaults(main=variable_commands.main_set)

    preset = subparsers.add_parser(
        'unset-variable',
        help=
        "Unset an environment variable value from anaconda-project-local.yml")
    add_env_spec_arg(preset)
    add_directory_arg(preset)
    preset.add_argument('vars_to_unset',
                        metavar='VARS_TO_UNSET',
                        default=None,
                        nargs=REMAINDER)
    preset.set_defaults(main=variable_commands.main_unset)

    preset = subparsers.add_parser(
        'add-download',
        help="Add a URL to be downloaded before running commands")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.add_argument('filename_variable',
                        metavar='ENV_VAR_FOR_FILENAME',
                        default=None)
    preset.add_argument('download_url', metavar='DOWNLOAD_URL', default=None)
    preset.add_argument(
        '--filename',
        help="The name to give the file/folder after downloading it",
        default=None)
    preset.add_argument('--hash-algorithm',
                        help="Defines which hash algorithm to use",
                        default=None,
                        choices=_hash_algorithms)
    preset.add_argument(
        '--hash-value',
        help="The expected checksum hash of the downloaded file",
        default=None)
    preset.set_defaults(main=download_commands.main_add)

    preset = subparsers.add_parser(
        'remove-download',
        help="Remove a download from the project and from the filesystem")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.add_argument('filename_variable',
                        metavar='ENV_VAR_FOR_FILENAME',
                        default=None)
    preset.set_defaults(main=download_commands.main_remove)

    preset = subparsers.add_parser('list-downloads',
                                   help="List all downloads on the project")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.set_defaults(main=download_commands.main_list)

    service_types = RequirementsRegistry().list_service_types()
    service_choices = list(map(lambda s: s.name, service_types))

    def add_service_variable_name(preset):
        preset.add_argument('--variable',
                            metavar='ENV_VAR_FOR_SERVICE_ADDRESS',
                            default=None)

    preset = subparsers.add_parser(
        'add-service',
        help="Add a service to be available before running commands")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    add_service_variable_name(preset)
    preset.add_argument('service_type',
                        metavar='SERVICE_TYPE',
                        default=None,
                        choices=service_choices)
    preset.set_defaults(main=service_commands.main_add)

    preset = subparsers.add_parser('remove-service',
                                   help="Remove a service from the project")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.add_argument('variable', metavar='SERVICE_REFERENCE', default=None)
    preset.set_defaults(main=service_commands.main_remove)

    preset = subparsers.add_parser('list-services',
                                   help="List services present in the project")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.set_defaults(main=service_commands.main_list)

    def add_package_args(preset):
        preset.add_argument('--pip',
                            action='store_true',
                            help='Install the requested packages using pip.')
        preset.add_argument('-c',
                            '--channel',
                            metavar='CHANNEL',
                            action='append',
                            help='Channel to search for packages')
        preset.add_argument('packages',
                            metavar='PACKAGES',
                            default=None,
                            nargs=REMAINDER)

    preset = subparsers.add_parser(
        'add-env-spec', help="Add a new environment spec to the project")
    add_directory_arg(preset)
    add_package_args(preset)
    add_env_spec_name_arg(preset, required=True)
    preset.set_defaults(main=environment_commands.main_add)

    preset = subparsers.add_parser(
        'remove-env-spec', help="Remove an environment spec from the project")
    add_directory_arg(preset)
    add_env_spec_name_arg(preset, required=True)
    preset.set_defaults(main=environment_commands.main_remove)

    preset = subparsers.add_parser(
        'list-env-specs', help="List all environment specs for the project")
    add_directory_arg(preset)
    preset.set_defaults(main=environment_commands.main_list_env_specs)

    preset = subparsers.add_parser(
        'export-env-spec',
        help="Save an environment spec as a conda environment file")
    add_directory_arg(preset)
    add_env_spec_name_arg(preset, required=False)
    preset.add_argument('filename', metavar='ENVIRONMENT_FILE')
    preset.set_defaults(main=environment_commands.main_export)

    preset = subparsers.add_parser(
        'lock', help="Lock all packages at their current versions")
    add_directory_arg(preset)
    add_env_spec_name_arg(preset, required=False)
    preset.set_defaults(main=environment_commands.main_lock)

    preset = subparsers.add_parser('unlock',
                                   help="Remove locked package versions")
    add_directory_arg(preset)
    add_env_spec_name_arg(preset, required=False)
    preset.set_defaults(main=environment_commands.main_unlock)

    preset = subparsers.add_parser(
        'update', help="Update all packages to their latest versions")
    add_directory_arg(preset)
    add_env_spec_name_arg(preset, required=False)
    preset.set_defaults(main=environment_commands.main_update)

    preset = subparsers.add_parser(
        'add-packages', help="Add packages to one or all project environments")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    add_package_args(preset)
    preset.set_defaults(main=environment_commands.main_add_packages)

    preset = subparsers.add_parser(
        'remove-packages',
        help="Remove packages from one or all project environments")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.add_argument('--pip',
                        action='store_true',
                        help='Uninstall the requested packages using pip.')
    preset.add_argument('packages',
                        metavar='PACKAGE_NAME',
                        default=None,
                        nargs='+')
    preset.set_defaults(main=environment_commands.main_remove_packages)

    preset = subparsers.add_parser(
        'list-packages',
        help="List packages for an environment on the project")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.set_defaults(main=environment_commands.main_list_packages)

    def add_platforms_list(preset):
        preset.add_argument('platforms',
                            metavar='PLATFORM_NAME',
                            default=None,
                            nargs='+')

    preset = subparsers.add_parser(
        'add-platforms',
        help="Add platforms to one or all project environments")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    add_platforms_list(preset)
    preset.set_defaults(main=environment_commands.main_add_platforms)

    preset = subparsers.add_parser(
        'remove-platforms',
        help="Remove platforms from one or all project environments")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    add_platforms_list(preset)
    preset.set_defaults(main=environment_commands.main_remove_platforms)

    preset = subparsers.add_parser(
        'list-platforms',
        help="List platforms for an environment on the project")
    add_directory_arg(preset)
    add_env_spec_arg(preset)
    preset.set_defaults(main=environment_commands.main_list_platforms)

    def add_command_name_arg(preset):
        preset.add_argument('name',
                            metavar="NAME",
                            help="Command name used to invoke it")

    preset = subparsers.add_parser('add-command',
                                   help="Add a new command to the project")
    add_directory_arg(preset)
    command_choices = list(ALL_COMMAND_TYPES) + ['ask']
    command_choices.remove(
        "conda_app_entry")  # conda_app_entry is sort of silly and may go away
    preset.add_argument('--type',
                        action="store",
                        choices=command_choices,
                        help="Command type to add")
    add_command_name_arg(preset)
    add_env_spec_arg(preset)
    preset.add_argument(
        '--supports-http-options',
        dest='supports_http_options',
        action="store_true",
        help="The command supports project's HTTP server options")
    preset.add_argument(
        '--no-supports-http-options',
        dest='supports_http_options',
        action="store_false",
        help=" The command does not support project's HTTP server options")
    preset.add_argument('command',
                        metavar="COMMAND",
                        help="Command line or app filename to add")
    preset.set_defaults(main=command_commands.main, supports_http_options=None)

    preset = subparsers.add_parser('remove-command',
                                   help="Remove a command from the project")
    add_directory_arg(preset)
    add_command_name_arg(preset)
    preset.set_defaults(main=command_commands.main_remove)

    preset = subparsers.add_parser(
        'list-default-command',
        help="List only the default command on the project")
    add_directory_arg(preset)
    preset.set_defaults(main=command_commands.main_default)

    preset = subparsers.add_parser('list-commands',
                                   help="List the commands on the project")
    add_directory_arg(preset)
    preset.set_defaults(main=command_commands.main_list)

    # argparse doesn't do this for us for whatever reason
    if len(argv) < 2:
        print("Must specify a subcommand.", file=sys.stderr)
        parser.print_usage(file=sys.stderr)
        return 2  # argparse exits with 2 on bad args, copy that

    try:
        args = parser.parse_args(argv[1:])
    except SystemExit as e:
        return e.code

    if args.verbose:
        logger = (logging.getLoggerClass())(name="anaconda_project_verbose")
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(stream=sys.stderr)
        logger.addHandler(handler)
        push_verbose_logger(logger)

    try:
        # '--directory' is used for most subcommands; for unarchive,
        # args.directory is positional and may be None
        if 'directory' in args and args.directory is not None:
            args.directory = os.path.abspath(args.directory)
        return args.main(args)
    finally:
        if args.verbose:
            pop_verbose_logger()
コード例 #45
0
ファイル: log.py プロジェクト: zhangjm12/integrations-core
class AgentLogger(logging.getLoggerClass()):
    def trace(self, msg, *args, **kwargs):
        if self.isEnabledFor(TRACE_LEVEL):
            self._log(TRACE_LEVEL, msg, args, **kwargs)
コード例 #46
0
class CustomLevelLogger(logging.getLoggerClass()):
    """ Custom Logger
    Logs messages directly through shell.
    """
    def __init__(self, name, level=logging.NOTSET):
        """ Constructor.

        :param name:  Name of the logger.
        :type  name:  string
        :param level: Initial logging level for the logger. By default, NOTSET.
        :type level:  string
        """
        # Use old-class initialization for compatibility with Python 2.6
        # and add logging.getLoggerClass() check to avoid recursion issues.
        # super(CustomLevelLogger, self).__init__(name, level)
        if logging.getLoggerClass() != CustomLevelLogger:
            logging.getLoggerClass().__init__(self, name, level)
        else:
            logging.Logger.__init__(self, name, level)
        logging.addLevelName(STEP_LOG_LEVEL_VALUE, STEP_LOG_LEVEL_NAME)

    def step(self, msg, *args, **kwargs):
        """Log message of STEP severity."""
        self.log(STEP_LOG_LEVEL_VALUE, msg, *args, **kwargs)

    def debug(self, msg, *args, **kwargs):
        """Log message of DEBUG severity (function overwrite)."""
        self.log(logging.DEBUG, msg, *args, **kwargs)

    def info(self, msg, *args, **kwargs):
        """Log message of INFO severity (function overwrite)."""
        self.log(logging.INFO, msg, *args, **kwargs)

    def warning(self, msg, *args, **kwargs):
        """Log message of WARNING severity (function overwrite)."""
        self.log(logging.WARNING, msg, *args, **kwargs)

    def error(self, msg, *args, **kwargs):
        """Log message of ERROR severity (function overwrite)."""
        self.log(logging.ERROR, msg, *args, **kwargs)

    def critical(self, msg, *args, **kwargs):
        """Log message of CRITICAL severity (function overwrite)."""
        self.log(logging.CRITICAL, msg, *args, **kwargs)

    def log(self, level, msg, *args, **kwargs):
        """ Log message of any severity (function overwrite). """
        if kwargs:
            raise GadgetLogError("Invalid use of logging parameters. kwargs "
                                 "not supported for log(): "
                                 "{0}".format(kwargs))
        levellog = levelname = logging.getLevelName(level)

        if levelname == "CRITICAL":
            levellog = levelname = "ERROR"
        if levelname == STEP_LOG_LEVEL_NAME:
            levellog = "INFO"

        formatted_msg = msg % args

        if self.isEnabledFor(level):
            if level >= logging.ERROR:
                output = sys.stderr
            else:
                output = sys.stdout
            output.write(
                u"%s\n" % {
                    "type": levelname,
                    "msg":
                    formatted_msg.encode('utf-8') if PY2 else formatted_msg
                })
        shell.log(levellog, "mysqlprovision: " + formatted_msg)
コード例 #47
0
ファイル: log.py プロジェクト: vitorarins/salt
    'debug': logging.DEBUG,
    'error': logging.ERROR,
    'garbage': GARBAGE,
    'info': logging.INFO,
    'quiet': 1000,
    'trace': TRACE,
    'warning': logging.WARNING,
}

# Make a list of log level names sorted by log level
SORTED_LEVEL_NAMES = [
    l[0] for l in sorted(LOG_LEVELS.iteritems(), key=lambda x: x[1])
]

# Store an instance of the current logging logger class
LoggingLoggerClass = logging.getLoggerClass()

MODNAME_PATTERN = re.compile(r'(?P<name>%%\(name\)(?:\-(?P<digits>[\d]+))?s)')

__CONSOLE_CONFIGURED = False
__LOGFILE_CONFIGURED = False


def is_console_configured():
    global __CONSOLE_CONFIGURED
    return __CONSOLE_CONFIGURED


def is_logfile_configured():
    global __LOGFILE_CONFIGURED
    return __LOGFILE_CONFIGURED
コード例 #48
0
ファイル: log.py プロジェクト: m4c3/checkMK
# looks wrong.

# Users should be able to set log levels without importing "logging"

CRITICAL = _logging.CRITICAL
ERROR = _logging.ERROR
WARNING = _logging.WARNING
INFO = _logging.INFO
DEBUG = _logging.DEBUG

# We need an additional log level between INFO and DEBUG to reflect the
# verbose() and vverbose() mechanisms of Check_MK.

VERBOSE = 15

_logger_class = _logging.getLoggerClass()  # type: Any


class CMKLogger(_logger_class):
    def __init__(self, name, level=_logging.NOTSET):
        super(CMKLogger, self).__init__(name, level)

        _logging.addLevelName(VERBOSE, "VERBOSE")

    def verbose(self, msg, *args, **kwargs):
        if self.is_verbose():
            self._log(VERBOSE, msg, args, **kwargs)

    def is_verbose(self):
        return self.isEnabledFor(VERBOSE)
コード例 #49
0
class Logger(logging.getLoggerClass()):
    logging.captureWarnings(True)
    logging.getLogger().addHandler(logging.NullHandler())

    def __init__(self,
                 name="sickrage",
                 consoleLogging=True,
                 fileLogging=False,
                 debugLogging=False,
                 logFile=None,
                 logSize=1048576,
                 logNr=5):
        super(Logger, self).__init__(name)
        self.propagate = False

        self.consoleLogging = consoleLogging
        self.fileLogging = fileLogging
        self.debugLogging = debugLogging

        self.logFile = logFile
        self.logSize = logSize
        self.logNr = logNr

        self.CRITICAL = CRITICAL
        self.DEBUG = DEBUG
        self.ERROR = ERROR
        self.WARNING = WARNING
        self.INFO = INFO
        self.DB = 5

        self.CENSORED_ITEMS = {}

        self.logLevels = {
            'CRITICAL': self.CRITICAL,
            'ERROR': self.ERROR,
            'WARNING': self.WARNING,
            'INFO': self.INFO,
            'DEBUG': self.DEBUG,
            'DB': 5
        }

        # list of allowed loggers
        self.loggers = {
            'sickrage': self,
            'tornado.general': logging.getLogger('tornado.general'),
            'tornado.application': logging.getLogger('tornado.application'),
            'apscheduler.jobstores':
            logging.getLogger('apscheduler.jobstores'),
            'apscheduler.scheduler': logging.getLogger('apscheduler.scheduler')
        }

        # set custom level for database logging
        logging.addLevelName(self.logLevels['DB'], 'DB')
        self.setLevel(self.logLevels['DB'])

        # viewers
        self.warning_viewer = WarningViewer()
        self.error_viewer = ErrorViewer()

        # start logger
        self.start()

    def start(self):
        # remove all handlers
        self.handlers = []

        # sentry log handler
        sentry_client = raven.Client(
            'https://[email protected]/2?verify_ssl=0',
            release=sickrage.version(),
            repos={'sickrage': {
                'name': 'sickrage/sickrage'
            }})

        sentry_ignore_exceptions = [
            'KeyboardInterrupt',
            'PermissionError',
            'FileNotFoundError',
        ]

        sentry_tags = {
            'platform': platform.platform(),
            'locale': sys.getdefaultencoding(),
            'python': platform.python_version()
        }

        if sickrage.app.config and sickrage.app.config.sub_id:
            sentry_tags.update({'sub_id': sickrage.app.config.sub_id})
        if sickrage.app.config and sickrage.app.config.app_id:
            sentry_tags.update({'app_id': sickrage.app.config.app_id})

        sentry_handler = SentryHandler(
            client=sentry_client,
            ignore_exceptions=sentry_ignore_exceptions,
            tags=sentry_tags)

        sentry_handler.setLevel(self.logLevels['ERROR'])
        sentry_handler.set_name('sentry')
        self.addHandler(sentry_handler)

        # console log handler
        if self.consoleLogging:
            console_handler = logging.StreamHandler()
            formatter = logging.Formatter(
                '%(asctime)s %(levelname)s::%(threadName)s::%(message)s',
                '%H:%M:%S')

            console_handler.setFormatter(formatter)
            console_handler.setLevel(self.logLevels['INFO'] if not self.
                                     debugLogging else self.logLevels['DEBUG'])
            self.addHandler(console_handler)

        # file log handlers
        if self.logFile:
            # make logs folder if it doesn't exist
            if not os.path.exists(os.path.dirname(self.logFile)):
                if not make_dir(os.path.dirname(self.logFile)):
                    return

            if sickrage.app.developer:
                rfh = FileHandler(filename=self.logFile, )
            else:
                rfh = RotatingFileHandler(filename=self.logFile,
                                          maxBytes=self.logSize,
                                          backupCount=self.logNr)

            rfh_errors = RotatingFileHandler(filename=self.logFile.replace(
                '.log', '.error.log'),
                                             maxBytes=self.logSize,
                                             backupCount=self.logNr)

            formatter = logging.Formatter(
                '%(asctime)s %(levelname)s::%(threadName)s::%(message)s',
                '%Y-%m-%d %H:%M:%S')

            rfh.setFormatter(formatter)
            rfh.setLevel(self.logLevels['INFO']
                         if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(rfh)

            rfh_errors.setFormatter(formatter)
            rfh_errors.setLevel(self.logLevels['ERROR'])
            self.addHandler(rfh_errors)

    def makeRecord(self,
                   name,
                   level,
                   fn,
                   lno,
                   msg,
                   args,
                   exc_info,
                   func=None,
                   extra=None,
                   sinfo=None):
        if (False, True)[name in self.loggers]:
            record = super(Logger,
                           self).makeRecord(name, level, fn, lno, msg, args,
                                            exc_info, func, extra, sinfo)

            try:
                record.msg = re.sub(
                    r"(.*)\b({})\b(.*)".format('|'.join(
                        [x for x in self.CENSORED_ITEMS.values() if len(x)])),
                    r"\1\3", record.msg)

                # needed because Newznab apikey isn't stored as key=value in a section.
                record.msg = re.sub(
                    r"([&?]r|[&?]apikey|[&?]api_key)=[^&]*([&\w]?)",
                    r"\1=**********\2", record.msg)
                record.msg = unidecode(record.msg)
            except:
                pass

            # sending record to UI
            if record.levelno in [WARNING, ERROR]:
                (self.warning_viewer,
                 self.error_viewer)[record.levelno == ERROR].add(
                     "{}::{}".format(record.threadName, record.msg), True)

            return record

    def set_level(self):
        self.debugLogging = sickrage.app.config.debug
        level = DEBUG if self.debugLogging else INFO
        for __, logger in self.loggers.items():
            logger.setLevel(level)
            for handler in logger.handlers:
                if not handler.name == 'sentry':
                    handler.setLevel(level)

    def list_modules(self, package):
        """Return all sub-modules for the specified package.

        :param package:
        :type package: module
        :return:
        :rtype: list of str
        """
        return [
            modname for importer, modname, ispkg in pkgutil.walk_packages(
                path=package.__path__,
                prefix=package.__name__ + '.',
                onerror=lambda x: None)
        ]

    def get_loggers(self, package):
        """Return all loggers for package and sub-packages.

        :param package:
        :type package: module
        :return:
        :rtype: list of logging.Logger
        """
        return [
            logging.getLogger(modname)
            for modname in self.list_modules(package)
        ]

    def log(self, level, msg, *args, **kwargs):
        super(Logger, self).log(level, msg, *args, **kwargs)

    def db(self, msg, *args, **kwargs):
        super(Logger, self).log(self.logLevels['DB'], msg, *args, **kwargs)

    def info(self, msg, *args, **kwargs):
        super(Logger, self).info(msg, *args, **kwargs)

    def debug(self, msg, *args, **kwargs):
        super(Logger, self).debug(msg, *args, **kwargs)

    def critical(self, msg, *args, **kwargs):
        super(Logger, self).critical(msg, *args, **kwargs)

    def exception(self, msg, *args, **kwargs):
        super(Logger, self).exception(msg, *args, **kwargs)

    def error(self, msg, *args, **kwargs):
        super(Logger, self).error(msg, exc_info=1, *args, **kwargs)

    def warning(self, msg, *args, **kwargs):
        super(Logger, self).warning(msg, *args, **kwargs)

    def fatal(self, msg, *args, **kwargs):
        super(Logger, self).fatal(msg, *args, **kwargs)
        sys.exit(1)

    def close(self, *args, **kwargs):
        logging.shutdown()
コード例 #50
0
ファイル: gui_logging.py プロジェクト: llohse/genx_cicd
class NumpyLogger(logging.getLoggerClass()):
    '''
    A logger that makes sure the actual function definition filename, lineno and function name
    is used for logging numpy floating point errors, not the numpy_logger function.
  '''

    if sys.version_info[0:2] >= (3, 2):  #sinfo was introduced in python 3.2

        def makeRecord(self,
                       name,
                       lvl,
                       fn,
                       lno,
                       msg,
                       args,
                       exc_info,
                       func=None,
                       extra=None,
                       sinfo=None):
            curframe = inspect.currentframe()
            calframes = inspect.getouterframes(curframe, 2)
            # stack starts with:
            # (this method, debug call, debug call rootlogger, numpy_logger, actual function, ...)
            ignore, fname, lineno, func, ignore, ignore = calframes[4]
            return logging.getLoggerClass().makeRecord(self,
                                                       name,
                                                       lvl,
                                                       fname,
                                                       lineno,
                                                       msg,
                                                       args,
                                                       exc_info,
                                                       func=func,
                                                       extra=extra,
                                                       sinfo=sinfo)
    else:

        def makeRecord(self,
                       name,
                       lvl,
                       fn,
                       lno,
                       msg,
                       args,
                       exc_info,
                       func=None,
                       extra=None):
            curframe = inspect.currentframe()
            calframes = inspect.getouterframes(curframe, 2)
            # stack starts with:
            # (this method, debug call, debug call rootlogger, numpy_logger, actual function, ...)
            ignore, fname, lineno, func, ignore, ignore = calframes[4]
            return logging.getLoggerClass().makeRecord(self,
                                                       name,
                                                       lvl,
                                                       fname,
                                                       lineno,
                                                       msg,
                                                       args,
                                                       exc_info,
                                                       func=func,
                                                       extra=extra)
コード例 #51
0
 def __init__(self, logger_or_log_name=None):
     if isinstance(logger_or_log_name, logging.getLoggerClass()):
         self.logger = logger_or_log_name
     else:
         self.logger = logging.getLogger(logger_or_log_name)
     EvalController.__init__(self)
コード例 #52
0
def patching_logger_class():
    logger_class = logging.getLoggerClass()
    original_log = logger_class._log
    original_makeRecord = logger_class.makeRecord

    try:

        def wrap_log(original_func):
            @wraps(original_func)
            def _log(self, *args, **kwargs):
                attachment = kwargs.pop('attachment', None)
                if attachment is not None:
                    kwargs.setdefault('extra',
                                      {}).update({'attachment': attachment})
                return original_func(self, *args, **kwargs)

            return _log

        def wrap_makeRecord(original_func):
            @wraps(original_func)
            def makeRecord(self,
                           name,
                           level,
                           fn,
                           lno,
                           msg,
                           args,
                           exc_info,
                           func=None,
                           extra=None,
                           sinfo=None):
                if extra is not None:
                    attachment = extra.pop('attachment', None)
                else:
                    attachment = None
                try:
                    # Python 3.5
                    record = original_func(self,
                                           name,
                                           level,
                                           fn,
                                           lno,
                                           msg,
                                           args,
                                           exc_info,
                                           func=func,
                                           extra=extra,
                                           sinfo=sinfo)
                except TypeError:
                    # Python 2.7
                    record = original_func(self,
                                           name,
                                           level,
                                           fn,
                                           lno,
                                           msg,
                                           args,
                                           exc_info,
                                           func=func,
                                           extra=extra)
                record.attachment = attachment
                return record

            return makeRecord

        if not hasattr(logger_class, "_patched"):
            logger_class._log = wrap_log(logger_class._log)
            logger_class.makeRecord = wrap_makeRecord(logger_class.makeRecord)
            logger_class._patched = True

        yield

    finally:
        logger_class._log = original_log
        logger_class.makeRecord = original_makeRecord
コード例 #53
0
class FancyLogger(logging.getLoggerClass()):
    """
    This is a custom Logger class that uses the FancyLogRecord
    and has extra log methods raiseException and deprecated and
    streaming versions for debug,info,warning and error.
    """
    # this attribute can be checked to know if the logger is thread aware
    _thread_aware = True

    # default class for raiseException method, that can be redefined by deriving loggers
    RAISE_EXCEPTION_CLASS = Exception

    def log_method(self, msg):
        self.warning(msg)

    RAISE_EXCEPTION_LOG_METHOD = log_method

    # method definition as it is in logging, can't change this
    # pylint: disable=unused-argument
    def makeRecord(self,
                   name,
                   level,
                   pathname,
                   lineno,
                   msg,
                   args,
                   excinfo,
                   func=None,
                   extra=None,
                   sinfo=None):
        """
        overwrite make record to use a fancy record (with more options)
        """
        logrecordcls = logging.LogRecord
        if hasattr(self, 'fancyrecord') and self.fancyrecord:
            logrecordcls = FancyLogRecord
        try:
            new_msg = str(msg)
        except UnicodeEncodeError:
            new_msg = msg.encode('utf8', 'replace')
        return logrecordcls(name, level, pathname, lineno, new_msg, args,
                            excinfo)

    def fail(self, message, *args):
        """Log error message and raise exception."""
        formatted_message = message % args
        self.RAISE_EXCEPTION_LOG_METHOD(formatted_message)
        raise self.RAISE_EXCEPTION_CLASS(formatted_message)

    def raiseException(self, message, exception=None, catch=False):
        """
        logs message and raises an exception (since it can be caught higher up and handled)
        and raises it afterwards
        @param exception: subclass of Exception to use for raising
        @param catch: boolean, try to catch raised exception and add relevant info to message
                      (this will also happen if exception is not specified)
        """
        fullmessage = message
        tb = None

        if catch or exception is None:
            # assumes no control by codemonkey
            # lets see if there is something more to report on
            exc, detail, tb = sys.exc_info()
            if exc is not None:
                if exception is None:
                    exception = exc
                # extend the message with the traceback and some more details
                # or use self.exception() instead of self.warning()?
                tb_text = "\n".join(traceback.format_tb(tb))
                message += " (%s)" % detail
                fullmessage += " (%s\n%s)" % (detail, tb_text)

        if exception is None:
            exception = self.RAISE_EXCEPTION_CLASS

        self.RAISE_EXCEPTION_LOG_METHOD(fullmessage)
        raise_with_traceback(exception(message))

    # pylint: disable=unused-argument
    def deprecated(self,
                   msg,
                   cur_ver,
                   max_ver,
                   depth=2,
                   exception=None,
                   log_callback=None,
                   *args,
                   **kwargs):
        """
        Log deprecation message, throw error if current version is passed given threshold.

        Checks only major/minor version numbers (MAJ.MIN.x) by default, controlled by 'depth' argument.
        """
        if log_callback is None:
            log_callback = self.warning

        loose_cv = LooseVersion(cur_ver)
        loose_mv = LooseVersion(max_ver)

        loose_cv.version = loose_cv.version[:depth]
        loose_mv.version = loose_mv.version[:depth]

        if loose_cv >= loose_mv:
            self.raiseException(
                "DEPRECATED (since v%s) functionality used: %s" %
                (max_ver, msg),
                exception=exception)
        else:
            deprecation_msg = "Deprecated functionality, will no longer work in v%s: %s" % (
                max_ver, msg)
            log_callback(deprecation_msg)

    def _handleFunction(self, function, levelno, **kwargs):
        """
        Walk over all handlers like callHandlers and execute function on each handler
        """
        c = self
        found = 0
        while c:
            for hdlr in c.handlers:
                found = found + 1
                if levelno >= hdlr.level:
                    function(hdlr, **kwargs)
            if not c.propagate:
                c = None  # break out
            else:
                c = c.parent

    def setLevelName(self, level_name):
        """Set the level by name."""
        # This is supported in py27 setLevel code, but not in py24
        self.setLevel(getLevelInt(level_name))

    def streamLog(self, levelno, data):
        """
        Add (continuous) data to an existing message stream (eg a stream after a logging.info()
        """
        if is_string(levelno):
            levelno = getLevelInt(levelno)

        def write_and_flush_stream(hdlr, data=None):
            """Write to stream and flush the handler"""
            if (not hasattr(hdlr, 'stream')) or hdlr.stream is None:
                # no stream or not initialised.
                raise Exception(
                    "write_and_flush_stream failed. No active stream attribute."
                )
            if data is not None:
                hdlr.stream.write(data)
                hdlr.flush()

        # only log when appropriate (see logging.Logger.log())
        if self.isEnabledFor(levelno):
            self._handleFunction(write_and_flush_stream, levelno, data=data)

    def streamDebug(self, data):
        """Get a DEBUG loglevel streamLog"""
        self.streamLog('DEBUG', data)

    def streamInfo(self, data):
        """Get a INFO loglevel streamLog"""
        self.streamLog('INFO', data)

    def streamError(self, data):
        """Get a ERROR loglevel streamLog"""
        self.streamLog('ERROR', data)

    def _get_parent_info(self, verbose=True):
        """Return some logger parent related information"""
        def info(x):
            res = [
                x, x.name,
                logging.getLevelName(x.getEffectiveLevel()),
                logging.getLevelName(x.level), x.disabled
            ]
            if verbose:
                res.append([(h, logging.getLevelName(h.level))
                            for h in x.handlers])
            return res

        parentinfo = []
        logger = self
        parentinfo.append(info(logger))
        while logger.parent is not None:
            logger = logger.parent
            parentinfo.append(info(logger))
        return parentinfo

    def get_parent_info(self, prefix, verbose=True):
        """Return pretty text version"""
        rev_parent_info = self._get_parent_info(verbose=verbose)
        return [
            "%s %s%s" % (prefix, " " * 4 * idx, info)
            for idx, info in enumerate(rev_parent_info)
        ]

    def __copy__(self):
        """Return shallow copy, in this case reference to current logger"""
        return getLogger(self.name, fname=False, clsname=False)

    def __deepcopy__(self, memo):
        """This behaviour is undefined, fancylogger will return shallow copy, instead just crashing."""
        return self.__copy__()
コード例 #54
0
ファイル: logging.py プロジェクト: sgeiser/urlscraper
        logging.basicConfig(level=default_level)

LOGGING_WRAPPER_NAME = '__logging_method_wrapper'
def logging_method(func):
    @wraps(func)
    def __logging_method_wrapper(*args, **kwargs):
        func(*args, **kwargs)
    
    __logging_method_wrapper.__name__ = LOGGING_WRAPPER_NAME
    return __logging_method_wrapper

TRACE_LEVEL_NUM = 5
DEVELOP_LEVEL_NUM = 60

# noinspection PyTypeChecker
_logger_class: Type[logging.Logger] = getLoggerClass()
class ExtendedLogger(_logger_class):
    def __init__(self, name, level=NOTSET):
        super().__init__(name, level)
        
        addLevelName(TRACE_LEVEL_NUM, "TRACE")
        addLevelName(DEVELOP_LEVEL_NUM, "DEVELOP")
    
    # noinspection PyMethodOverriding
    def findCaller(self, stack_info=False):
        """
        Find the stack frame of the caller so that we can note the source
        file name, line number and function name.
        """
        
        _frame_object = logging.currentframe()
コード例 #55
0
        'TRACE': TextFormat('magenta'),
        'GARBAGE': TextFormat('blue'),
        'NOTSET': TextFormat('reset'),
    },
    'name': TextFormat('bold', 'green'),
    'process': TextFormat('bold', 'blue'),
}


# Make a list of log level names sorted by log level
SORTED_LEVEL_NAMES = [
    l[0] for l in sorted(six.iteritems(LOG_LEVELS), key=lambda x: x[1])
]

# Store an instance of the current logging logger class
LOGGING_LOGGER_CLASS = logging.getLoggerClass()

MODNAME_PATTERN = re.compile(r'(?P<name>%%\(name\)(?:\-(?P<digits>[\d]+))?s)')

__CONSOLE_CONFIGURED = False
__LOGFILE_CONFIGURED = False
__TEMP_LOGGING_CONFIGURED = False
__EXTERNAL_LOGGERS_CONFIGURED = False
__MP_LOGGING_LISTENER_CONFIGURED = False
__MP_LOGGING_CONFIGURED = False
__MP_LOGGING_QUEUE = None
__MP_LOGGING_QUEUE_PROCESS = None
__MP_LOGGING_QUEUE_HANDLER = None


def is_console_configured():
コード例 #56
0
class srLogger(logging.getLoggerClass()):
    logging.captureWarnings(True)
    logging.getLogger().addHandler(logging.NullHandler())

    def __init__(self, name="sickrage"):
        super(srLogger, self).__init__(name)
        self.propagate = False

        self.consoleLogging = True
        self.fileLogging = False
        self.debugLogging = False

        self.logFile = None
        self.logSize = 1048576
        self.logNr = 5

        self.submitter_running = False

        self.CRITICAL = CRITICAL
        self.DEBUG = DEBUG
        self.ERROR = ERROR
        self.WARNING = WARNING
        self.INFO = INFO
        self.DB = 5

        self.logLevels = {
            'CRITICAL': self.CRITICAL,
            'ERROR': self.ERROR,
            'WARNING': self.WARNING,
            'INFO': self.INFO,
            'DEBUG': self.DEBUG,
            'DB': 5
        }

        self.logNameFilters = {
            '': 'No Filter',
            'DAILYSEARCHER': 'Daily Searcher',
            'BACKLOG': 'Backlog',
            'SHOWUPDATER': 'Show Updater',
            'VERSIONUPDATER': 'Check Version',
            'SHOWQUEUE': 'Show Queue',
            'SEARCHQUEUE': 'Search Queue',
            'FINDPROPERS': 'Find Propers',
            'POSTPROCESSOR': 'Postprocesser',
            'SUBTITLESEARCHER': 'Find Subtitles',
            'TRAKTSEARCHER': 'Trakt Checker',
            'EVENT': 'Event',
            'ERROR': 'Error',
            'TORNADO': 'Tornado',
            'Thread': 'Thread',
            'MAIN': 'Main',
        }

        # list of allowed loggers
        self.allowedLoggers = [
            'sickrage', 'tornado.general', 'tornado.application',
            'apscheduler.jobstores', 'apscheduler.scheduler'
        ]

        # set custom level for database logging
        logging.addLevelName(self.logLevels['DB'], 'DB')
        logging.getLogger("sickrage").setLevel(self.logLevels['DB'])

        # start logger
        self.start()

    def start(self):
        # remove all handlers
        self.handlers = []

        # console log handler
        if self.consoleLogging:
            console = logging.StreamHandler()
            console.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s::%(threadName)s::%(message)s',
                    '%H:%M:%S'))
            console.setLevel(self.logLevels['INFO'] if not self.debugLogging
                             else self.logLevels['DEBUG'])
            self.addHandler(console)

        # file log handlers
        if self.logFile and makeDir(os.path.dirname(self.logFile)):
            if sickrage.DEVELOPER:
                rfh = FileHandler(filename=self.logFile, )
            else:
                rfh = RotatingFileHandler(filename=self.logFile,
                                          maxBytes=self.logSize,
                                          backupCount=self.logNr)

            rfh_errors = RotatingFileHandler(filename=self.logFile.replace(
                '.log', '.error.log'),
                                             maxBytes=self.logSize,
                                             backupCount=self.logNr)

            rfh.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s::%(threadName)s::%(message)s',
                    '%Y-%m-%d %H:%M:%S'))
            rfh.setLevel(self.logLevels['INFO']
                         if not self.debugLogging else self.logLevels['DEBUG'])
            self.addHandler(rfh)

            rfh_errors.setFormatter(
                logging.Formatter(
                    '%(asctime)s %(levelname)s::%(threadName)s::%(message)s',
                    '%Y-%m-%d %H:%M:%S'))
            rfh_errors.setLevel(self.logLevels['ERROR'])
            self.addHandler(rfh_errors)

    def makeRecord(self,
                   name,
                   level,
                   fn,
                   lno,
                   msg,
                   args,
                   exc_info,
                   func=None,
                   extra=None):
        if (False, True)[name in self.allowedLoggers]:
            record = super(srLogger,
                           self).makeRecord(name, level, fn, lno, msg, args,
                                            exc_info, func, extra)

            try:
                record.msg = re.sub(
                    r"(.*)\b({})\b(.*)".format('|'.join([
                        x for x in
                        sickrage.srCore.srConfig.CENSORED_ITEMS.values()
                        if len(x)
                    ])), r"\1\3", record.msg)

                # needed because Newznab apikey isn't stored as key=value in a section.
                record.msg = re.sub(
                    r"([&?]r|[&?]apikey|[&?]api_key)=[^&]*([&\w]?)",
                    r"\1=**********\2", record.msg)
            except:
                pass

            # sending record to UI
            if record.levelno in [WARNING, ERROR]:
                from sickrage.core.classes import WarningViewer
                from sickrage.core.classes import ErrorViewer
                (WarningViewer(),
                 ErrorViewer())[record.levelno == ERROR].add(record.msg, True)

            return record

    def log(self, level, msg, *args, **kwargs):
        super(srLogger, self).log(level, msg, *args, **kwargs)

    def db(self, msg, *args, **kwargs):
        super(srLogger, self).log(self.logLevels['DB'], msg, *args, **kwargs)

    def info(self, msg, *args, **kwargs):
        super(srLogger, self).info(msg, *args, **kwargs)

    def debug(self, msg, *args, **kwargs):
        super(srLogger, self).debug(msg, *args, **kwargs)

    def critical(self, msg, *args, **kwargs):
        super(srLogger, self).critical(msg, *args, **kwargs)

    def exception(self, msg, *args, **kwargs):
        super(srLogger, self).exception(msg, *args, **kwargs)

    def error(self, msg, *args, **kwargs):
        super(srLogger, self).error(msg, exc_info=1, *args, **kwargs)

    def warning(self, msg, *args, **kwargs):
        super(srLogger, self).warning(msg, *args, **kwargs)

    def log_error_and_exit(self, msg, *args, **kwargs):
        if self.consoleLogging:
            sys.exit(super(srLogger, self).error(msg, *args, **kwargs))
        sys.exit(1)

    def submit_errors(self):  # Too many local variables, too many branches, pylint: disable=R0912,R0914
        import sickrage

        submitter_result = None
        issue_id = None

        from sickrage.core.classes import ErrorViewer
        if not (sickrage.srCore.srConfig.GIT_USERNAME
                and sickrage.srCore.srConfig.GIT_PASSWORD and sickrage.DEBUG
                and len(ErrorViewer.errors) > 0):
            submitter_result = 'Please set your GitHub username and password in the config and enable debug. Unable to submit issue ticket to GitHub!'
            return submitter_result, issue_id

        try:
            from version_updater import srVersionUpdater

            sickrage.srCore.VERSIONUPDATER.check_for_new_version()
        except Exception:
            submitter_result = 'Could not check if your SiCKRAGE is updated, unable to submit issue ticket to GitHub!'
            return submitter_result, issue_id

        if self.submitter_running:
            submitter_result = 'Issue submitter is running, please wait for it to complete'
            return submitter_result, issue_id

        self.submitter_running = True

        gh_org = sickrage.srCore.srConfig.GIT_ORG or 'SiCKRAGETV'
        gh_repo = 'sickrage-issues'

        import github
        gh = github.Github(
            login_or_token=sickrage.srCore.srConfig.GIT_USERNAME,
            password=sickrage.srCore.srConfig.GIT_PASSWORD,
            user_agent="SiCKRAGE")

        try:
            # read log file
            log_data = None

            if os.path.isfile(self.logFile):
                with io.open(self.logFile, 'r') as f:
                    log_data = f.readlines()

            for i in range(1, int(sickrage.srCore.srConfig.LOG_NR)):
                if os.path.isfile(self.logFile +
                                  ".%i" % i) and (len(log_data) <= 500):
                    with io.open(self.logFile + ".%i" % i, 'r') as f:
                        log_data += f.readlines()

            log_data = [line for line in reversed(log_data)]

            # parse and submit errors to issue tracker
            for curError in sorted(ErrorViewer.errors,
                                   key=lambda error: error.time,
                                   reverse=True)[:500]:

                try:
                    title_Error = "[APP SUBMITTED]: {}".format(curError.title)
                    if not len(title_Error) or title_Error == 'None':
                        title_Error = re.match(r"^[A-Z0-9\-\[\] :]+::\s*(.*)$",
                                               curError.message).group(1)

                    if len(title_Error) > 1000:
                        title_Error = title_Error[0:1000]
                except Exception as e:
                    super(srLogger, self).error(
                        "Unable to get error title : {}".format(e.message))

                gist = None
                regex = r"^({})\s+([A-Z]+)\s+([0-9A-Z\-]+)\s*(.*)$".format(
                    curError.time)
                for i, x in enumerate(log_data):
                    match = re.match(regex, x)
                    if match:
                        level = match.group(2)
                        # if level == srCore.LOGGER.ERROR:
                        # paste_data = "".join(log_data[i:i + 50])
                        # if paste_data:
                        #    gist = gh.get_user().create_gist(True, {"sickrage.log": InputFileContent(paste_data)})
                        # break
                    else:
                        gist = 'No ERROR found'

                message = "### INFO\n"
                message += "Python Version: **" + sys.version[:120].replace(
                    '\n', '') + "**\n"
                message += "Operating System: **" + platform.platform(
                ) + "**\n"
                try:
                    message += "Locale: " + locale.getdefaultlocale()[1] + "\n"
                except Exception:
                    message += "Locale: unknown" + "\n"
                message += "Version: **" + sickrage.srCore.VERSIONUPDATER.updater.version + "**\n"
                if hasattr(gist, 'html_url'):
                    message += "Link to Log: " + gist.html_url + "\n"
                else:
                    message += "No Log available with ERRORS: " + "\n"
                message += "### ERROR\n"
                message += "```\n"
                message += curError.message + "\n"
                message += "```\n"
                message += "---\n"
                message += "_STAFF NOTIFIED_: @SiCKRAGETV/owners @SiCKRAGETV/moderators"

                reports = gh.get_organization(gh_org).get_repo(
                    gh_repo).get_issues(state="all")

                def is_ascii_error(title):
                    return re.search(
                        r".* codec can't .*code .* in position .*:",
                        title) is not None

                def is_malformed_error(title):
                    return re.search(
                        r".* not well-formed \(invalid token\): line .* column .*",
                        title) is not None

                ascii_error = is_ascii_error(title_Error)
                malformed_error = is_malformed_error(title_Error)

                issue_found = False
                for report in reports:
                    if title_Error.rsplit(' :: ')[-1] in report.title or \
                            (malformed_error and is_malformed_error(report.title)) or \
                            (ascii_error and is_ascii_error(report.title)):

                        issue_id = report.number
                        if not report.raw_data['locked']:
                            if report.create_comment(message):
                                submitter_result = 'Commented on existing issue #%s successfully!' % issue_id
                            else:
                                submitter_result = 'Failed to comment on found issue #%s!' % issue_id
                        else:
                            submitter_result = 'Issue #%s is locked, check github to find info about the error.' % issue_id

                        issue_found = True
                        break

                if not issue_found:
                    issue = gh.get_organization(gh_org).get_repo(
                        gh_repo).create_issue(title_Error, message)
                    if issue:
                        issue_id = issue.number
                        submitter_result = 'Your issue ticket #%s was submitted successfully!' % issue_id
                    else:
                        submitter_result = 'Failed to create a new issue!'

                if issue_id and curError in ErrorViewer.errors:
                    # clear error from error list
                    ErrorViewer.errors.remove(curError)

        except Exception as e:
            super(srLogger, self).error(traceback.format_exc())
            submitter_result = 'Exception generated in issue submitter, please check the log'
        finally:
            self.submitter_running = False

        return submitter_result, issue_id

    @staticmethod
    def shutdown():
        logging.shutdown()
コード例 #57
0
codeparsercache = CodeParserCache()


def parser_cache_init(d):
    codeparsercache.init_cache(d)


def parser_cache_save():
    codeparsercache.save_extras()


def parser_cache_savemerge():
    codeparsercache.save_merge()


Logger = logging.getLoggerClass()


class BufferedLogger(Logger):
    def __init__(self, name, level=0, target=None):
        Logger.__init__(self, name)
        self.setLevel(level)
        self.buffer = []
        self.target = target

    def handle(self, record):
        self.buffer.append(record)

    def flush(self):
        for record in self.buffer:
            if self.target.isEnabledFor(record.levelno):
コード例 #58
0
ファイル: storage.py プロジェクト: nathanmlim/blues
def addLoggingLevel(levelName, levelNum, methodName=None):
    """
    Comprehensively adds a new logging level to the `logging` module and the
    currently configured logging class.

    `levelName` becomes an attribute of the `logging` module with the value
    `levelNum`. `methodName` becomes a convenience method for both `logging`
    itself and the class returned by `logging.getLoggerClass()` (usually just
    `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
    used.

    To avoid accidental clobberings of existing attributes, this method will
    raise an `AttributeError` if the level name is already an attribute of the
    `logging` module or if the method name is already present

    Parameters
    ----------
    levelName : str
        The new level name to be added to the `logging` module.
    levelNum : int
        The level number indicated for the logging module.
    methodName : str, default=None
        The method to call on the logging module for the new level name.
        For example if provided 'trace', you would call `logging.trace()`.

    Example
    -------
    >>> addLoggingLevel('TRACE', logging.DEBUG - 5)
    >>> logging.getLogger(__name__).setLevel("TRACE")
    >>> logging.getLogger(__name__).trace('that worked')
    >>> logging.trace('so did this')
    >>> logging.TRACE
    5

    """
    if not methodName:
        methodName = levelName.lower()

    if hasattr(logging, levelName):
        logging.warning(
            '{} already defined in logging module'.format(levelName))
    if hasattr(logging, methodName):
        logging.warning(
            '{} already defined in logging module'.format(methodName))
    if hasattr(logging.getLoggerClass(), methodName):
        logging.warning(
            '{} already defined in logger class'.format(methodName))

    # This method was inspired by the answers to Stack Overflow post
    # http://stackoverflow.com/q/2183233/2988730, especially
    # http://stackoverflow.com/a/13638084/2988730
    def logForLevel(self, message, *args, **kwargs):
        if self.isEnabledFor(levelNum):
            self._log(levelNum, message, args, **kwargs)

    def logToRoot(message, *args, **kwargs):
        logging.log(levelNum, message, *args, **kwargs)

    logging.addLevelName(levelNum, levelName)
    setattr(logging, levelName, levelNum)
    setattr(logging.getLoggerClass(), methodName, logForLevel)
    setattr(logging, methodName, logToRoot)
コード例 #59
0
class NAASLogger(logging.getLoggerClass()):
    @staticmethod
    def makeRecord(name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
        return NAASLogRecord(name, level, fn, lno, msg, args, exc_info, func)
コード例 #60
0
class MyLogger(logging.getLoggerClass()):

    myLog = {}
    logging.raiseExceptions = True

    # CONSTRUCTORS

    # TODO
    # Make sure logName is different for all loggers unless want to log to both?

    @classmethod
    def addFileLogger(cls, logFileName, logName, logFileMode="w", logLevel="ERROR"):
        """
        Create new File logger. Optional params by keyword:
        1) logFileName: name of log file
        2) logFileMode: file write mode, default is 'w' to truncate existing log
        3) logName: name for this logger
        4) logLevel: level to filter log, order is DEBUG,INFO,WARNING,ERROR (default)
        """

        if cls.myLog.get("File",None) is None:
            # create logger
            myFileLog = logging.getLogger(logName)
            myFileLog.setLevel(logLevel)           

            # create formatter
            myLogFormat = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        
            # create file handler for logger.
            myLogFH = MyFileHandler(logFileName, logFileMode)
            myLogFH.setLevel(logLevel)
            myLogFH.setFormatter(myLogFormat)

            # add the file handler to the logger
            myFileLog.addHandler(myLogFH)

            # add file logger to dict of loggers to use
            cls.myLog["File"] = myFileLog

    @classmethod
    def addDBLogger(cls, logName, logConnString, logDBName, logTableName, logLevel="ERROR"):
        """
        Create new Database logger. Optional params by keyword:
        1) logName: name for this logger
        2) logConnString: connection string to database
        3) logDBName: name of the database to log to 
        4) logTableName: name of the table to log to
        5) logLevel: level to filter log, order is DEBUG,INFO,WARNING,ERROR (default)
        """

        if cls.myLog.get("DB",None) is None:
            # create logger
            myDBLog = logging.getLogger(logName)
            myDBLog.setLevel(logLevel)

            # create formatter
            myLogFormat = logging.Formatter("{'TimeStamp':'%(asctime)s', 'LogName':'%(name)s', 'LogLevel':'%(levelname)s', 'Message':'%(message)s'}")
        
            # create file handler for logger.
            myLogDH = MongoDatabaseHandler(logConnString, logDBName, logTableName)
            myLogDH.setLevel(logLevel)
            myLogDH.setFormatter(myLogFormat)
            myLogDH.addFilter(FilterNoQuotes())

            # add the db handler to the logger
            myDBLog.addHandler(myLogDH)

            # add file logger to dict of loggers to use
            cls.myLog["DB"] = myDBLog

    # LOG METHODS

    @classmethod
    def logDebug(cls, loggers, msg):
        """
        Log a DEBUG level msg
        """
        availLoggers = cls.__checkForLogger(loggers)

        try:
            for logger in availLoggers:
                cls.myLog[logger].debug(msg)
            return True
        except:
            return False

    @classmethod
    def logInfo(cls, loggers, msg):
        """
        Log an INFO level msg
        """
        availLoggers = cls.__checkForLogger(loggers)

        strOutput = msg
        try:
            for logger in availLoggers:
                cls.myLog[logger].info(msg)
            return True
        except:
            return False

    @classmethod
    def logWarn(cls, loggers, msg):
        """
        Log a WARNING level msg
        """
        availLoggers = cls.__checkForLogger(loggers)

        strOutput = msg
        try:
            for logger in availLoggers:
                cls.myLog[logger].warning(msg)
            return True
        except:
            return False

    @classmethod
    def logError(cls, loggers, msg):
        """
        Log an ERROR level msg
        """
        availLoggers = cls.__checkForLogger(loggers)

        try:
            for logger in availLoggers:
                cls.myLog[logger].error(msg)
            return True
        except:
            return False

    @classmethod
    def logException(cls, loggers, Ex):
        """
        Log an ERROR level msg and the exception Ex
        """
        availLoggers = cls.__checkForLogger(loggers)

        try:
            for logger in availLoggers:
                cls.myLog[logger].exception(Ex)
            return True
        except:
            return False

    # DECORATOR

    def log(loggers):
        """
        Logging decorator for enter and exit of methods, catches all unknown Exceptions. Any explicit exceptions should be handled by client
        Decorator must be under the @classmethod decorator (bottom up processing) else error accessing __name__ attrib for class methods
        """
        def log_decorator(func):
            @functools.wraps(func) # preserves calling func info, not sure if needed
            def wrapper(*args,**kwargs):
                funcName = func.__qualname__ if func.__qualname__ else func.__name__
                args_repr = [repr(a) for a in args]                      
                kwargs_repr = [f"{k}={v!r}" for k, v in kwargs.items()]  #!r forces to use print friendlier __repr__ instead of __str__
                signature = ", ".join(args_repr + kwargs_repr)           

                # TODO 
                # DONT PRINT MyLogger stack trace, see Handler.handleError()

                with TraceLog(loggers, funcName, signature):
                    try:
                        return func(*args,**kwargs)
                    # Handle all unknown exceptions here
                    except Exception as Ex:
                        strOutputError = "Unhandled exception occurred."
                        print(strOutputError)

                        # Log and Quit
                        MyLogger.logException(["DB"], Ex)
                        raise
            return wrapper
        return log_decorator

    # PRIVATE METHODS

    @classmethod
    def __checkForLogger(cls, loggers):
        availLoggers = []
        for logger in loggers:
            if logger in cls.myLog:
                availLoggers.append(logger)

        if len(availLoggers) == 0:
            cls.__defaultFileLogger()
            availLoggers.append("File")

        return availLoggers

    # Create a default logger if one has not been created
    @classmethod
    def __defaultFileLogger(cls):
        logFileName = "GameCenter.log"
        logName = "GameCenterLogFile"

        cls.addFileLogger(logFileName, logName)

        strOutput = "MyLogger not initialized. Had to create file logger using default params."
        cls.logWarn(["File"],strOutput)