Beispiel #1
0
def addLoggingLevel(levelName, levelNum, methodName=None):
    """
    Comprehensively adds a new logging level to the `logging` module and the
    currently configured logging class.

    `levelName` becomes an attribute of the `logging` module with the value
    `levelNum`. `methodName` becomes a convenience method for both `logging`
    itself and the class returned by `logging.getLoggerClass()` (usually just
    `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is
    used.

    To avoid accidental clobberings of existing attributes, this method will
    raise an `AttributeError` if the level name is already an attribute of the
    `logging` module or if the method name is already present

    Parameters
    ----------
    levelName : str
        The new level name to be added to the `logging` module.
    levelNum : int
        The level number indicated for the logging module.
    methodName : str, default=None
        The method to call on the logging module for the new level name.
        For example if provided 'trace', you would call `logging.trace()`.

    Example
    -------
    >>> addLoggingLevel('TRACE', logging.DEBUG - 5)
    >>> logging.getLogger(__name__).setLevel("TRACE")
    >>> logging.getLogger(__name__).trace('that worked')
    >>> logging.trace('so did this')
    >>> logging.TRACE
    5

    """
    if not methodName:
        methodName = levelName.lower()

    if hasattr(logging, levelName):
        logging.warn('{} already defined in logging module'.format(levelName))
    if hasattr(logging, methodName):
        logging.warn('{} already defined in logging module'.format(methodName))
    if hasattr(logging.getLoggerClass(), methodName):
        logging.warn('{} already defined in logger class'.format(methodName))

    # This method was inspired by the answers to Stack Overflow post
    # http://stackoverflow.com/q/2183233/2988730, especially
    # http://stackoverflow.com/a/13638084/2988730
    def logForLevel(self, message, *args, **kwargs):
        if self.isEnabledFor(levelNum):
            self._log(levelNum, message, args, **kwargs)

    def logToRoot(message, *args, **kwargs):
        logging.log(levelNum, message, *args, **kwargs)

    logging.addLevelName(levelNum, levelName)
    setattr(logging, levelName, levelNum)
    setattr(logging.getLoggerClass(), methodName, logForLevel)
    setattr(logging, methodName, logToRoot)
Beispiel #2
0
 def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None):
   if extra is None:
     return logging.getLoggerClass().makeRecord(self, name, lvl, fn, lno, 
                        msg, args, exc_info, func=func, extra=None)
   else:
     
     return logging.getLoggerClass().makeRecord(self, name, lvl, extra['name'], extra['lno'],
                        msg, args, exc_info, func=extra['func'], extra=None)
Beispiel #3
0
    def test_patches(self):
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)
        _patch_logger_class()
        self.assertTrue(logging.getLoggerClass()._signal_safe)

        with in_sighandler():
            logging.getLoggerClass().log(get_logger('test'))
Beispiel #4
0
 def __init__(self, level=logging.INFO):
     logging.getLoggerClass().__init__(self, None)
     self.handler = logging.StreamHandler()
     self.formatter = logging.Formatter(self.message_format,
             self.datetime_format)
     self.handler.setFormatter(self.formatter)
     self.addHandler(self.handler)
     self.setLevel(level)
Beispiel #5
0
    def testLocalFactoryWithLocalRecipe(self):
        os.chdir(self.workDir)
        self.newpkg("foo", factory = "test")
        os.chdir("foo")
        self.writeFile("factory-test.recipe",
"""
class TestFactory(Factory):

    name = "factory-test"
    version = "1.0"

    def getRecipeClass(self):
        class TestSubclass(PackageRecipe):
            name = "testsubclass"
            version = "1.0"
            internalAbstractBaseClass = True
            clearBuildReqs()

        return TestSubclass
""")

        self.writeFile("foo.recipe",
"""
# CNY-2813. importing log inside a recipe used to reset the loglevel
from conary.lib import log
class FooRecipe(FactoryRecipeClass):

    name = "foo"
    version = "1.1"

    def setup(self):
        self.Create("/etc/foo", "foo")
""")

        self.addfile("foo.recipe")
        repos = self.openRepository()
        cstate = state.ConaryStateFromFile('CONARY')
        level = log.getVerbosity()
        try:
            log.setVerbosity(log.INFO)
            klass = logging.getLoggerClass()
            self.discardOutput(cook.cookCommand, self.cfg, [cstate], False, {})
            self.assertEquals(klass, logging.getLoggerClass())
        finally:
            log.setVerbosity(level)
        ccs = changeset.ChangeSetFromFile(os.path.join(self.workDir,
                'foo', 'foo-1.1.ccs'))
        trvs = [trove.Trove(x) for x in ccs.iterNewTroveList()]
        trv = [x for x in trvs if x.getName() == 'foo:debuginfo'][0]
        files = [x for x in trv.iterFileList() if \
                x[1] == '/usr/src/debug/buildlogs/foo-1.1-log.bz2']
        fileId, path, pathId, ver = files[0]
        fileInfo, fileObj = ccs.getFileContents(fileId, pathId)
        decomp = bz2.BZ2Decompressor()
        data = decomp.decompress(fileObj.f.read())
        self.assertFalse("+ Processing" not in data,
                "build log data appears to be incomplete")
Beispiel #6
0
def patch_logging():
    """
        This hack is used to log the context inside standard and thirdparty libraries which
        uses usually python logging. The context inherits from caller using contextlog.
    """
    if logging.getLoggerClass() != _SlaveContextLogger:
        logging.setLoggerClass(_SlaveContextLogger)
Beispiel #7
0
def setup_system():
  logger=logging.getLogger()#logging.getLogger('quicknxs')
  logger.setLevel(min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL))
  if not sys.platform.startswith('win'):
    # no console logger for windows (py2exe)
    console=logging.StreamHandler(sys.__stdout__)
    formatter=logging.Formatter('%(levelname) 7s: %(message)s')
    console.setFormatter(formatter)
    console.setLevel(CONSOLE_LEVEL)
    logger.addHandler(console)

  logfile=logging.FileHandler(paths.LOG_FILE, 'w')
  formatter=logging.Formatter('[%(levelname)s] - %(asctime)s - %(filename)s:%(lineno)i:%(funcName)s %(message)s', '')
  logfile.setFormatter(formatter)
  logfile.setLevel(FILE_LEVEL)
  logger.addHandler(logfile)

  logging.info('*** QuickNXS %s Logging started ***'%str_version)

  # define numpy warning behavior
  global nplogger
  old_class=logging.getLoggerClass()
  logging.setLoggerClass(NumpyLogger)
  nplogger=logging.getLogger('numpy')
  nplogger.setLevel(logging.DEBUG)
  null_handler=logging.StreamHandler(StringIO())
  null_handler.setLevel(logging.CRITICAL)
  nplogger.addHandler(null_handler)
  logging.setLoggerClass(old_class)
  seterr(divide='call', over='call', under='ignore', invalid='call')
  seterrcall(numpy_logger)

  # write information on program exit
  sys.excepthook=excepthook_overwrite
  atexit.register(goodby)
Beispiel #8
0
    def postOptions(self):
        from baca.application import app
        if self.opts['config'] == "~/.ilog":
            self.opt_config(self.opts['config'])


        if not isfile(join(app.config.dir, app.config.file)):
            app.config_initial_populate()
            app.config_save()
        app.config_load()

        # Setup logging
        from baca.utils.logger import Logging
        if logging.getLoggerClass() is not Logging:
            logging.config.fileConfig(
                usefull_path(str(app.config.logging_config_file))
            )
            logging.setLoggerClass(Logging)

            twisted_logging = PythonLoggingObserver('twisted')
            twisted_logging.start()

#        self._setup_database()
        app.setup_log()

        if not self.subCommand:
            self.opt_help()
    def test_tacker_command_meta_defines_log(self):
        class FakeCommand(tackerV10.TackerCommand):
            pass

        self.assertTrue(helpers.safe_hasattr(FakeCommand, 'log'))
        self.assertIsInstance(FakeCommand.log, logging.getLoggerClass())
        self.assertEqual(FakeCommand.log.name, __name__ + ".FakeCommand")
Beispiel #10
0
    def create_logger(self):
        """Creates a logger.  This logger works similar to a regular Python
        logger but changes the effective logging level based on the API's
        sandbox flag.
        Furthermore this function also removes all attached handlers in case
        there was a logger with the log name before.

        :copyright: (c) 2010 by Armin Ronacher.
        """
        Logger = getLoggerClass()

        class DebugLogger(Logger):
            def getEffectiveLevel(x):
                return DEBUG if self.sandbox else Logger.getEffectiveLevel(x)

        class DebugHandler(StreamHandler):
            def emit(x, record):
                StreamHandler.emit(x, record) if self.sandbox else None

        handler = DebugHandler()
        handler.setLevel(DEBUG)
        handler.setFormatter(Formatter(self.debug_log_format))
        logger = getLogger(self.logger_name)
        # just in case that was not a new logger, get rid of all the handlers
        # already attached to it.
        del logger.handlers[:]
        logger.__class__ = DebugLogger
        logger.addHandler(handler)
        return logger
Beispiel #11
0
def ensure_process_aware_logger(force=False):
    """Make sure process name is recorded when loggers are used."""
    global _process_aware
    if force or not _process_aware:
        logging._acquireLock()
        try:
            _process_aware = True
            Logger = logging.getLoggerClass()
            if getattr(Logger, '_process_aware', False):  # pragma: no cover
                return

            class ProcessAwareLogger(Logger):
                _signal_safe = True
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = Logger.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

                def log(self, *args, **kwargs):
                    if _in_sighandler:
                        return
                    return Logger.log(self, *args, **kwargs)
            logging.setLoggerClass(ProcessAwareLogger)
        finally:
            logging._releaseLock()
Beispiel #12
0
def catch_logmsg(pattern, level=logging.WARNING, count=None):
    '''Catch (and ignore) log messages matching *pattern*

    *pattern* is matched against the *unformatted* log message, i.e. before any
    arguments are merged.

    If *count* is not None, raise an exception unless exactly *count* matching
    messages are caught.
    '''

    logger_class = logging.getLoggerClass()
    handle_orig = logger_class.handle
    caught = [0]

    @wraps(handle_orig)
    def handle_new(self, record):
        if (record.levelno != level
            or not re.search(pattern, record.msg)):
            return handle_orig(self, record)
        caught[0] += 1

    logger_class.handle = handle_new
    try:
        yield

    finally:
        logger_class.handle = handle_orig

        if count is not None and caught[0] != count:
            raise AssertionError('Expected to catch %d log messages, but got only %d'
                                 % (count, caught[0]))
Beispiel #13
0
def TrLogger(logger_name, options):
    """
    Factory for the actual logging-derived logger
    """
    try:
        trEmbraceAndExtendErrno()

        old = logging.getLoggerClass()  # save old setting
        logging.setLoggerClass(TrLoggerClass)

        #
        # Provide a logging configuration backdoor for sites that want
        # to do something sophisticated.
        #
        if options.logconfig:
            if os.path.exists(options.logconfig):
                logging.config.fileConfig(options.logconfig)
            else:
                options.logconfig = None

        logger = logging.getLogger(logger_name)

        logging.setLoggerClass(old)  # restore

        if not options.logconfig:
            # In the typical case that there is no logging config file,
            # apply our usual handlers.
            logger.trBasicConfig(options)

    except Exception:
        logger = TrDesperationLogger(logger_name)
        logger.exception("logging configuration failed")

    return logger
Beispiel #14
0
def enablelogging(botname, channel):
    """ set loglevel to level_name. """
    global loggers
    global logfiles
    LOGDIR = initlog(getdatadir())
    logging.warn("enabling on (%s,%s)" % (botname, channel))
    channel = stripname(channel)
    logname = "%s_%s" % (botname, channel)
    #if logname in loggers: logging.warn("there is already a logger for %s" % logname) ; return
    logfile = LOGDIR + os.sep + logname + ".log"
    try:
        filehandler = logging.handlers.TimedRotatingFileHandler(logfile, 'midnight')
        formatter = logging.Formatter(format)
        filehandler.setFormatter(formatter)
        logfiles[logfile] = time.time()
    except IOError:
        filehandler = None
    chatlogger = logging.getLoggerClass()(logname)
    chatlogger.setLevel(logging.INFO)
    if chatlogger.handlers:
        for handler in chatlogger.handlers: chatlogger.removeHandler(handler)
    if filehandler: chatlogger.addHandler(filehandler) ; logging.warn("%s - logging enabled on %s" % (botname, channel))
    else: logging.error("no file handler found - not enabling logging.")
    global lastlogger
    lastlogger = chatlogger
    loggers[logname] = lastlogger
Beispiel #15
0
Datei: log.py Projekt: nzlosh/st2
def setup(config_file, redirect_stderr=True, excludes=None, disable_existing_loggers=False):
    """
    Configure logging from file.
    """
    try:
        logging.config.fileConfig(config_file,
                                  defaults=None,
                                  disable_existing_loggers=disable_existing_loggers)
        handlers = logging.getLoggerClass().manager.root.handlers
        _add_exclusion_filters(handlers=handlers, excludes=excludes)
        if redirect_stderr:
            _redirect_stderr()
    except Exception as exc:
        exc_cls = type(exc)
        tb_msg = traceback.format_exc()

        msg = str(exc)
        msg += '\n\n' + tb_msg

        # revert stderr redirection since there is no logger in place.
        sys.stderr = sys.__stderr__

        # No logger yet therefore write to stderr
        sys.stderr.write('ERROR: %s' % (msg))

        raise exc_cls(six.text_type(msg))
Beispiel #16
0
    def __init__(self, driver=None, request=None, trg=None, *args, **kwargs):
        """Create an eval controller
        @param driver {Driver} The OOP driver instance to communicate via
        @param request {Request} The request causing the evaluation
        """
        log.debug("__init__")
        EvalController.__init__(self, *args, **kwargs)

        self.driver = driver
        self.request = request
        self.trg = trg
        self.silent = request.get("silent", False)
        self.keep_existing = request.get("keep_existing", self.keep_existing)

        # Set up a *new* logger to record any errors
        # Note that the output will be discarded if there is no error
        self.log_stream = cStringIO.StringIO()
        self.log_hndlr = logging.StreamHandler(self.log_stream)
        loggerClass = logging.Logger.manager.loggerClass
        if not loggerClass:
            loggerClass = logging.getLoggerClass()
        self.log = loggerClass("codeintel.evaluator")
        self.log.setLevel(logging.WARN) # Only log warnings and worse
        self.log.manager = logging.Logger.manager
        self.log.propagate = False
        self.log.addHandler(self.log_hndlr)
        self.best_msg = (0, "")
        self.has_sent_response = False
Beispiel #17
0
def setLogger(gv, logFile=None, pkgName=None):

        # ------------------------------------------------
        # - del packageHier cerchiamo di prendere
        # - solo gli ultimi due qualificatori.
        # ------------------------------------------------
    packageHier = pkgName.split('.')
    loggerName  = ('.'.join(packageHier[-2:]))

    if logFile:
        try:
            logging.config.fileConfig(logFile, disable_existing_loggers=False)
        except Exception as why:
            gv.LN.sys.exit(gv, 2001, "{} - ERROR in file: {}".format(str(why), logFile), console=True)

        logger = logging.getLogger(loggerName)
        savedLevel = logger.getEffectiveLevel()
        logger.setLevel(logging.INFO)
        for i in range(1,10):   logger.info(' ')
        for i in range(1,5):    logger.info('-'*40 + 'Start LOGging' + '-'*20)
        logger.setLevel(savedLevel)
        logFileName = logging.getLoggerClass().root.handlers[0].baseFilename
        return logFileName

    logger = logging.getLogger(loggerName)
    return logger
Beispiel #18
0
def getLogger(name):
    og_class = logging.getLoggerClass()
    try:
        logging.setLoggerClass(Logger)
        return logging.getLogger(name)
    finally:
        logging.setLoggerClass(og_class)
def _initialize():
  """Initializes loggers and handlers."""
  global _absl_logger, _absl_handler

  if _absl_logger:
    return

  original_logger_class = logging.getLoggerClass()
  logging.setLoggerClass(ABSLLogger)
  _absl_logger = logging.getLogger('absl')
  logging.setLoggerClass(original_logger_class)

  python_logging_formatter = PythonFormatter()
  _absl_handler = ABSLHandler(python_logging_formatter)

  # The absl handler logs to stderr by default. To prevent double logging to
  # stderr, the following code tries its best to remove other handlers that emit
  # to stderr. Those handlers are most commonly added when logging.info/debug is
  # called before importing this module.
  handlers = [
      h for h in logging.root.handlers
      if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
  for h in handlers:
    logging.root.removeHandler(h)

  # The absl handler will always be attached to root, not the absl logger.
  if not logging.root.handlers:
    # Attach the absl handler at import time when there are no other handlers.
    # Otherwise it means users have explicitly configured logging, and the absl
    # handler will only be attached later in app.run(). For App Engine apps,
    # the absl handler is not used.
    logging.root.addHandler(_absl_handler)
def _check_logger_class():
    """
    Make sure process name is recorded when loggers are used
    """
    # XXX This function is unnecessary once logging is patched
    import logging

    if hasattr(logging, "multiprocessing"):
        return

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, "_process_aware", False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
Beispiel #21
0
def setupLogging(config_options=[]):
    """Set up overall logging for the program given the config options.

    Arguments:

        config_options (List of XML objects):
            Empty if the user did not elect to use logging.
            Otherwise could contain either or both of the
            <Logging>...</Logging> and <Metadata>...</Metadata>
            elements.
    """
    # There must be a metadata logger regardless of its level,
    # so that its internal functions may be called from any other
    # module without error.
    OldClass = logging.getLoggerClass()
    logging.setLoggerClass(MetaDataLogger)
    logging.getLogger('metadata')
    logging.setLoggerClass(OldClass)

    # this will be reset below if the loggers are configured.
    logging.getLogger().setLevel(level=logging.CRITICAL)
    logging.getLogger("metadata").setLevel(level=logging.CRITICAL)

    if not config_options:
        logging.disable(level=logging.CRITICAL)
    else:
        for child in config_options:
            if child.tag == "Logging":
                _setupLogging(child)
            elif child.tag == "Metadata":
                _setupLogging(child, "metadata")
            else:
                logging.getLogger('').critical("Logging configuration attempted for an object that is not a logger: %s" % str(child.tag))
                sys.exit()
Beispiel #22
0
def setup_logging(increase_padding=False):
    """
    Setup overall logging engine and add 2 more levels of logging lower than
    DEBUG, TRACE and GARBAGE.
    """
    import logging

    if increase_padding and logging.getLoggerClass() is not Logging:
        logging.setLoggerClass(Logging)

    if not hasattr(LoggingLoggerClass, 'trace'):
        def trace(cls, msg, *args, **kwargs):
            return cls.log(5, msg, *args, **kwargs)

        logging.addLevelName(5, 'TRACE')
        LoggingLoggerClass.trace = new.instancemethod(
            trace, None, LoggingLoggerClass
        )

    if not hasattr(LoggingLoggerClass, 'garbage'):
        def garbage(cls, msg, *args, **kwargs):
            return cls.log(1, msg, *args, **kwargs)

        logging.addLevelName(1, 'GARBAGE')
        LoggingLoggerClass.garbage = new.instancemethod(
            garbage, None, LoggingLoggerClass
        )

    # Set the root logger at the lowest level possible
    logging.getLogger().setLevel(1)
Beispiel #23
0
def create_logger():
    """Creates a logger for the given application.
    
    This logger works similar to a regular Python logger but changes the
    effective logging level based on the application's debug flag.  Furthermore
    this function also removes all attached handlers in case there was a logger
    with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(self):
            if self.level == 0:
                return DEBUG if web.config.DEBUG else INFO
            return super(DebugLogger, self).getEffectiveLevel()

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record)

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(web.config.LOG_FORMAT))
    logger = getLogger(web.config.LOGGER_NAME)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    if web.config.LOG_ENABLE:
        logger.addHandler(handler)
    return logger
    def test_neutron_command_meta_defines_log(self):
        class FakeCommand(neutronV20.NeutronCommand):
            pass

        self.assertTrue(helpers.safe_hasattr(FakeCommand, 'log'))
        self.assertIsInstance(FakeCommand.log, logging.getLoggerClass())
        self.assertEqual(__name__ + ".FakeCommand", FakeCommand.log.name)
Beispiel #25
0
def _patch_logger_class():
    """Make sure process name is recorded when loggers are used."""

    try:
        from multiprocessing.process import current_process
    except ImportError:
        current_process = None  # noqa

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, '_process_aware', False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    if current_process:
                        record.processName = current_process()._name
                    else:
                        record.processName = ""
                    return record
            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
Beispiel #26
0
def create_logger(name, debug=False, format=None):
        Logger = getLoggerClass()

        class DebugLogger(Logger):
            def getEffectiveLevel(x):
                if x.level == 0 and debug:
                    return DEBUG
                else:
                    return Logger.getEffectiveLevel(x)

        class DebugHandler(StreamHandler):
            def emit(x, record):
                StreamHandler.emit(x, record) if debug else None

        handler = DebugHandler()
        handler.setLevel(DEBUG)

        if format:
            handler.setFormatter(Formatter(format))

        logger = getLogger(name)
        del logger.handlers[:]
        logger.__class__ = DebugLogger
        logger.addHandler(handler)

        return logger
Beispiel #27
0
def get_callers_logger():
    """
    Get logger defined in caller's environment
    @return: logger instance (or None if none was found)
    """
    logger_cls = logging.getLoggerClass()
    frame = inspect.currentframe()
    logger = None

    # frame may be None, see https://docs.python.org/2/library/inspect.html#inspect.currentframe
    if frame is not None:
        try:
            # consider calling stack in reverse order, i.e. most inner frame (closest to caller) first
            for frameinfo in inspect.getouterframes(frame)[::-1]:
                bindings = inspect.getargvalues(frameinfo[0]).locals
                for val in bindings.values():
                    if isinstance(val, logger_cls):
                        logger = val
                        break
        finally:
            # make very sure that reference to frame object is removed, to avoid reference cycles
            # see https://docs.python.org/2/library/inspect.html#the-interpreter-stack
            del frame

    return logger
Beispiel #28
0
    def test_initialiazation(self):
        # Ensure metric data reset in __init__
        self.assertTrue(not self.connection.metric_data,
                        "Metric data is not empty")
        # Ensure the state dict is reset in __init__
        for state in self.connection.state.values():
            if not state['current_state'] == component_states.unknown \
                    or not state['reason'] == '' or state['metrics']:
                self.state_reset = False

        self.assertTrue(self.state_reset,
                        "State data is not empty")

        # Ensure the latency dict is reset in __init__
        for value in self.connection.latency.values():
            if value > 0:
                self.latency_reset = False
                break

        self.assertTrue(self.latency_reset,
                        "Latency data is not empty")
        # Ensure logger set to valid logger object in __init__
        self.assertIsInstance(self.connection.logger, logging.getLoggerClass(),
                              "Invalid logger object")
        # Ensure cache file path is not empty
        self.assertIsNotNone(self.connection.cache_file_path)
def create_logger(app):
    """Creates a logger for the given application.  This logger works
    similar to a regular Python logger but changes the effective logging
    level based on the application's debug flag.  Furthermore this
    function also removes all attached handlers in case there was a
    logger with the log name before.
    """
    Logger = getLoggerClass()

    class DebugLogger(Logger):
        def getEffectiveLevel(x):
            if x.level == 0 and app.debug:
                return DEBUG
            return Logger.getEffectiveLevel(x)

    class DebugHandler(StreamHandler):
        def emit(x, record):
            StreamHandler.emit(x, record) if app.debug else None

    handler = DebugHandler()
    handler.setLevel(DEBUG)
    handler.setFormatter(Formatter(app.debug_log_format))
    logger = getLogger(app.logger_name)
    # just in case that was not a new logger, get rid of all the handlers
    # already attached to it.
    del logger.handlers[:]
    logger.__class__ = DebugLogger
    logger.addHandler(handler)
    return logger
Beispiel #30
0
    def run_pred_feature_extraction(self):
        # grab the settings...

        if self.extraction_thread.isRunning():
            QtGui.QMessageBox.information(self, "Not implemented, lazy!", "Worker thread still running, please wait for previous orders to be finished!")
            return 0

        chunk_len   = int(self.chunk_len_box.text())
        ncores = self.cores_to_use.text()
        use_peaks_bool = self.run_peakdet_checkBox.isChecked()

        if ncores == 'all':
            ncores = -1
        else:
            ncores = int(ncores)

        try:
            logfilepath = logging.getLoggerClass().root.handlers[0].baseFilename
            self.logpath_dsplay.setText(str(logfilepath))
        except:
            print('couldnt get logpath')



        self.extraction_thread.set_params_for_extraction(h5_folder=self.h5directory,
                                            timewindow = chunk_len,
                                            run_peakdet_flag = use_peaks_bool,
                                            n_cores=ncores)
        self.extraction_thread.start()