Exemplo n.º 1
0
    def postOptions(self):
        from baca.application import app
        if self.opts['config'] == "~/.ilog":
            self.opt_config(self.opts['config'])


        if not isfile(join(app.config.dir, app.config.file)):
            app.config_initial_populate()
            app.config_save()
        app.config_load()

        # Setup logging
        from baca.utils.logger import Logging
        if logging.getLoggerClass() is not Logging:
            logging.config.fileConfig(
                usefull_path(str(app.config.logging_config_file))
            )
            logging.setLoggerClass(Logging)

            twisted_logging = PythonLoggingObserver('twisted')
            twisted_logging.start()

#        self._setup_database()
        app.setup_log()

        if not self.subCommand:
            self.opt_help()
Exemplo n.º 2
0
def bootstrap():
    logging.setLoggerClass(CustomLogger)
    logging.addLevelName(CONSOLE, 'CONSOLE')

    root_logger = logging.getLogger('engineer')
    root_logger.setLevel(logging.DEBUG)
    root_logger.addHandler(get_console_handler(logging.WARNING))
Exemplo n.º 3
0
def _patch_logger_class():
    """Make sure process name is recorded when loggers are used."""

    try:
        from multiprocessing.process import current_process
    except ImportError:
        current_process = None  # noqa

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, '_process_aware', False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    if current_process:
                        record.processName = current_process()._name
                    else:
                        record.processName = ""
                    return record
            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
    def tearDown(self):
        self.eng = None
        TestInstallEngine._instance = None
        InstallLogger.DEFAULTFILEHANDLER = None
        logging.Logger.manager.loggerDict = {}
        logging.setLoggerClass(logging.Logger)
        self.test_logger.name = None
        self.test_logger.destination = None
        self.test_logger.level = None
        self.test_logger.parent = None
        self.test_logger.propagate = 1
        self.test_logger.handlers = []
        self.test_logger.disabled = 0
        self.callfunction = None
        self.list = []
        logging._defaultFormatter = logging.Formatter()

        try:
            os.remove(solaris_install.logger.DEFAULTLOG)
        except OSError:
            # File doesn't exist
            pass

        try:
            os.remove("simplelog")
        except OSError:
            # File doesn't exist
            pass
Exemplo n.º 5
0
    def setup(cls, **kw):
        lbl = kw.get('label', "")
        if lbl:
            lbl = '(' + lbl + ')'
        lprm = {'datefmt': "%Y-%m-%d %H:%M:%S",
                'format': "[%(asctime)s.%(nsecs)d] %(lvlnam)s [%(module)s" +
                lbl + ":%(lineno)s:%(funcName)s] %(ctx)s: %(message)s"}
        lprm.update(kw)
        lvl = kw.get('level', logging.INFO)
        lprm['level'] = lvl
        logging.root = cls("root", lvl)
        logging.setLoggerClass(cls)
        logging.getLogger().handlers = []
        logging.getLogger().setLevel(lprm['level'])

        if 'filename' in lprm:
            try:
                logging_handler = handlers.WatchedFileHandler(lprm['filename'])
                formatter = logging.Formatter(fmt=lprm['format'],
                                              datefmt=lprm['datefmt'])
                logging_handler.setFormatter(formatter)
                logging.getLogger().addHandler(logging_handler)
            except AttributeError:
                # Python version < 2.6 will not have WatchedFileHandler
                # so fallback to logging without any handler.
                # Note: logrotate will not work if Python version is < 2.6
                logging.basicConfig(**lprm)
        else:
            # If filename not passed(not available in lprm) then it may be
            # streaming.(Ex: {"stream": "/dev/stdout"})
            logging.basicConfig(**lprm)
Exemplo n.º 6
0
def init(name, level, colors=True):

    global logger, critical, fatal, warn, warning, info, skip, debug, error

    logging.setLoggerClass(SkipHandler)
    logger = logging.getLogger(name)

    handler = TerminalHandler()
    if colors:
        handler.setFormatter(ANSIFormatter('%(message)s'))

    logger.addHandler(handler)
    logger.setLevel(level)

    error = logger.error
    fatal = logger.fatal
    critical = logger.critical
    warn = logger.warn
    warning = logger.warning
    info = logger.info
    skip = logger.skip
    debug = logger.debug

    warnings.resetwarnings()
    warnings.showwarning = showwarning if level == DEBUG else lambda *x: None
Exemplo n.º 7
0
def ensure_process_aware_logger(force=False):
    """Make sure process name is recorded when loggers are used."""
    global _process_aware
    if force or not _process_aware:
        logging._acquireLock()
        try:
            _process_aware = True
            Logger = logging.getLoggerClass()
            if getattr(Logger, '_process_aware', False):  # pragma: no cover
                return

            class ProcessAwareLogger(Logger):
                _signal_safe = True
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = Logger.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

                def log(self, *args, **kwargs):
                    if _in_sighandler:
                        return
                    return Logger.log(self, *args, **kwargs)
            logging.setLoggerClass(ProcessAwareLogger)
        finally:
            logging._releaseLock()
def _initialize():
  """Initializes loggers and handlers."""
  global _absl_logger, _absl_handler

  if _absl_logger:
    return

  original_logger_class = logging.getLoggerClass()
  logging.setLoggerClass(ABSLLogger)
  _absl_logger = logging.getLogger('absl')
  logging.setLoggerClass(original_logger_class)

  python_logging_formatter = PythonFormatter()
  _absl_handler = ABSLHandler(python_logging_formatter)

  # The absl handler logs to stderr by default. To prevent double logging to
  # stderr, the following code tries its best to remove other handlers that emit
  # to stderr. Those handlers are most commonly added when logging.info/debug is
  # called before importing this module.
  handlers = [
      h for h in logging.root.handlers
      if isinstance(h, logging.StreamHandler) and h.stream == sys.stderr]
  for h in handlers:
    logging.root.removeHandler(h)

  # The absl handler will always be attached to root, not the absl logger.
  if not logging.root.handlers:
    # Attach the absl handler at import time when there are no other handlers.
    # Otherwise it means users have explicitly configured logging, and the absl
    # handler will only be attached later in app.run(). For App Engine apps,
    # the absl handler is not used.
    logging.root.addHandler(_absl_handler)
Exemplo n.º 9
0
def config_logger():
    """Configures netlogger"""
    nllog.PROJECT_NAMESPACE = NETLOGGER_NAMESPACE
    #logging.setLoggerClass(nllog.PrettyBPLogger)
    logging.setLoggerClass(nllog.BPLogger)
    log = logging.getLogger(nllog.PROJECT_NAMESPACE)

    handler = logging.StreamHandler()
    handler.setFormatter(logging.Formatter("%(message)s"))
    log.addHandler(handler)
    if GN_ADDR:
        # setup socket to global node, GN
        socketHandler = logging.handlers.SocketHandler(GN_ADDR,
                                                       logging.handlers.DEFAULT_TCP_LOGGING_PORT)
        log.addHandler(socketHandler)
    # set level
    if TRACE:
        log_level = (logging.WARN, logging.INFO, logging.DEBUG,
                     nllog.TRACE)[3]
    elif DEBUG:
        log_level = (logging.WARN, logging.INFO, logging.DEBUG,
                     nllog.TRACE)[2]
    elif CONSOLE:	
        log_level = (logging.WARN, logging.INFO, logging.DEBUG,
                     25)[3]
    else:
        log_level = (logging.WARN, logging.INFO, logging.DEBUG,
                     nllog.TRACE)[1]
    log.setLevel(log_level)
Exemplo n.º 10
0
def getLogger(name):
    """
        Return the correct logger class.
    """
    logging.setLoggerClass(Logger)
    log = logging.getLogger(name=name)
    return log
Exemplo n.º 11
0
def setupLogging(config_options=[]):
    """Set up overall logging for the program given the config options.

    Arguments:

        config_options (List of XML objects):
            Empty if the user did not elect to use logging.
            Otherwise could contain either or both of the
            <Logging>...</Logging> and <Metadata>...</Metadata>
            elements.
    """
    # There must be a metadata logger regardless of its level,
    # so that its internal functions may be called from any other
    # module without error.
    OldClass = logging.getLoggerClass()
    logging.setLoggerClass(MetaDataLogger)
    logging.getLogger('metadata')
    logging.setLoggerClass(OldClass)

    # this will be reset below if the loggers are configured.
    logging.getLogger().setLevel(level=logging.CRITICAL)
    logging.getLogger("metadata").setLevel(level=logging.CRITICAL)

    if not config_options:
        logging.disable(level=logging.CRITICAL)
    else:
        for child in config_options:
            if child.tag == "Logging":
                _setupLogging(child)
            elif child.tag == "Metadata":
                _setupLogging(child, "metadata")
            else:
                logging.getLogger('').critical("Logging configuration attempted for an object that is not a logger: %s" % str(child.tag))
                sys.exit()
Exemplo n.º 12
0
def TrLogger(logger_name, options):
    """
    Factory for the actual logging-derived logger
    """
    try:
        trEmbraceAndExtendErrno()

        old = logging.getLoggerClass()  # save old setting
        logging.setLoggerClass(TrLoggerClass)

        #
        # Provide a logging configuration backdoor for sites that want
        # to do something sophisticated.
        #
        if options.logconfig:
            if os.path.exists(options.logconfig):
                logging.config.fileConfig(options.logconfig)
            else:
                options.logconfig = None

        logger = logging.getLogger(logger_name)

        logging.setLoggerClass(old)  # restore

        if not options.logconfig:
            # In the typical case that there is no logging config file,
            # apply our usual handlers.
            logger.trBasicConfig(options)

    except Exception:
        logger = TrDesperationLogger(logger_name)
        logger.exception("logging configuration failed")

    return logger
Exemplo n.º 13
0
    def __init__(self, args):
        self.args = args

        # -- Hijack the Yum logging ------------------------------------------
        logging.setLoggerClass(NBYumLogger)
        logging.addLevelName(PROGRESS_LEVEL, "progress")
        logging.addLevelName(RECAP_LEVEL, "recap")

        self.base = NBYumBase()

        # -- Deal with the preconfig stuff -----------------------------------
        if not args.debug:
            self.base.preconf.debuglevel = 0
        else:
            self.base.preconf.debuglevel = 6

        if args.config:
            self.base.preconf.fn = args.config

        self.base.prerepoconf.progressbar = NBYumTextMeter()

        # This sets up a bunch of stuff
        self.base.conf

        if self.args.func == "last_updated":
            args.force_cache = True

        if args.force_cache:
            if self.args.func == "rebuild_cache":
                self.base.logger.warning("Ignoring --force-cache argument, as"
                                         " we are rebuilding the cache")

            else:
                self.base.conf.cache = 1
Exemplo n.º 14
0
Arquivo: log.py Projeto: catlee/balrog
def configure_logging(stream=sys.stdout, formatter=JsonLogFormatter, format_=log_format, level=logging.DEBUG):
    logging.setLoggerClass(BalrogLogger)
    handler = logging.StreamHandler(stream)
    formatter = formatter(fmt=format_)
    handler.setFormatter(formatter)
    logging.root.addHandler(handler)
    logging.root.setLevel(level)
Exemplo n.º 15
0
def getLogger(name):
    og_class = logging.getLoggerClass()
    try:
        logging.setLoggerClass(Logger)
        return logging.getLogger(name)
    finally:
        logging.setLoggerClass(og_class)
Exemplo n.º 16
0
def patch_logging():
    """
        This hack is used to log the context inside standard and thirdparty libraries which
        uses usually python logging. The context inherits from caller using contextlog.
    """
    if logging.getLoggerClass() != _SlaveContextLogger:
        logging.setLoggerClass(_SlaveContextLogger)
Exemplo n.º 17
0
 def _enable_dev_mode(self):
     logging.setLoggerClass(JSLogger)
     self._logger.setLevel(logging.DEBUG)
     self._logger.propagate = False
     logging.lastResort = None
     for h in self.handlers.values():
         self._logger.addHandler(h)
    def __init__(self, download_to_disk=True):
        """
        Initialises the DataLoader class

        Parameters
        ----------
        download_to_disk : boolean, default True
            Whether to download all source data to local
        """

        self.load_web_source = False  # for debug; always download website
        self.source_url = "http://web.mta.info/developers/turnstile.html"
        self.data_url_base = "http://web.mta.info/developers/"
        self.data_folder = "data/"
        logging.setLoggerClass(GoodLogger)
        self.logger = logging.getLogger("2S_Turnstile")
        self.download_to_disk = download_to_disk
        self.data = None  # main data
        self.available_dates = []  # List of dates available for request
        # The start/end datetime of the data request
        self.request_start_dt = None
        self.request_end_dt = None
        # The start/end datetime of data pull from source
        self.data_start_dt = None
        self.data_end_dt = None

        self.N = 7  # For each request [Time A] to [Time B], the data for [Time A - N days] to [Time B - N days] will be prepared,  so that there are sufficient sample observations to imputation, default N = 40

        # Parse links
        self.data_urls = self.parse_source_url()

        # Download the hash table for stations
        self.station_table = self.get_station_table()
Exemplo n.º 19
0
def setup_system():
  logger=logging.getLogger()#logging.getLogger('quicknxs')
  logger.setLevel(min(FILE_LEVEL, CONSOLE_LEVEL, GUI_LEVEL))
  if not sys.platform.startswith('win'):
    # no console logger for windows (py2exe)
    console=logging.StreamHandler(sys.__stdout__)
    formatter=logging.Formatter('%(levelname) 7s: %(message)s')
    console.setFormatter(formatter)
    console.setLevel(CONSOLE_LEVEL)
    logger.addHandler(console)

  logfile=logging.FileHandler(paths.LOG_FILE, 'w')
  formatter=logging.Formatter('[%(levelname)s] - %(asctime)s - %(filename)s:%(lineno)i:%(funcName)s %(message)s', '')
  logfile.setFormatter(formatter)
  logfile.setLevel(FILE_LEVEL)
  logger.addHandler(logfile)

  logging.info('*** QuickNXS %s Logging started ***'%str_version)

  # define numpy warning behavior
  global nplogger
  old_class=logging.getLoggerClass()
  logging.setLoggerClass(NumpyLogger)
  nplogger=logging.getLogger('numpy')
  nplogger.setLevel(logging.DEBUG)
  null_handler=logging.StreamHandler(StringIO())
  null_handler.setLevel(logging.CRITICAL)
  nplogger.addHandler(null_handler)
  logging.setLoggerClass(old_class)
  seterr(divide='call', over='call', under='ignore', invalid='call')
  seterrcall(numpy_logger)

  # write information on program exit
  sys.excepthook=excepthook_overwrite
  atexit.register(goodby)
Exemplo n.º 20
0
def getLogger(name, handler=None):
    """
    Returns the logger with the specified name.
    If the logger doesn't exist, it is created.
    If handler is specified, adds it to the logger. Otherwise a default handler
    that logs to standard output will be used.

    :param name: The name of the logger to retrieve
    :param handler: A handler to add to the logger. If the logger already exists,
                    and a handler is specified, an exception will be raised. To
                    add a handler to an existing logger, call that logger's
                    addHandler method.
    """
    setLoggerClass(MozLogger)

    if name in Logger.manager.loggerDict:
        if handler:
            raise ValueError('The handler parameter requires ' + \
                             'that a logger by this name does ' + \
                             'not already exist')
        return Logger.manager.loggerDict[name]

    logger = getSysLogger(name)
    logger.setLevel(_default_level)

    if handler is None:
        handler = StreamHandler()
        handler.setFormatter(MozFormatter())

    logger.addHandler(handler)
    logger.propagate = False
    return logger
Exemplo n.º 21
0
def getLogger(name, logfile=None):
    """
    Returns the logger with the specified name.
    If the logger doesn't exist, it is created.

    name       - The name of the logger to retrieve
    [filePath] - If specified, the logger will log to the specified filePath
                 Otherwise, the logger logs to stdout
                 This parameter only has an effect if the logger doesn't exist
    """
    setLoggerClass(_MozLogger)

    if name in Logger.manager.loggerDict:
        return getSysLogger(name)

    logger = getSysLogger(name)
    logger.setLevel(_default_level)

    if logfile:
        handler = FileHandler(logfile)
    else:
        handler = StreamHandler()
    handler.setFormatter(_MozFormatter())
    logger.addHandler(handler)
    return logger
Exemplo n.º 22
0
    def _init_logger(self):
        """Init logger to be able to intercept message from each command."""
        class HandlerWrapper(logging.Handler):
            def emit(self, record):
                # FIXME Configure logging on executor threads
                w = self.bgutil_workers.get(record.threadName, None)
                if not w:
                    return

                w["output"].append(
                    "{:<7} {:<25} :: {}".format(
                        record.levelname,
                        record.filename + ":" + str(record.lineno),
                        record.getMessage(),
                    )
                )

        class LoggerWrapper(logging.Logger):
            def __init__(self, name):
                super(LoggerWrapper, self).__init__(name)
                self.addHandler(HandlerWrapper())
                self.propagate = True

        logging.setLoggerClass(LoggerWrapper)
        logging.getLogger().propagate = True
        logging.getLogger().addHandler(HandlerWrapper())
Exemplo n.º 23
0
def setup_logging(increase_padding=False):
    """
    Setup overall logging engine and add 2 more levels of logging lower than
    DEBUG, TRACE and GARBAGE.
    """
    import logging

    if increase_padding and logging.getLoggerClass() is not Logging:
        logging.setLoggerClass(Logging)

    if not hasattr(LoggingLoggerClass, 'trace'):
        def trace(cls, msg, *args, **kwargs):
            return cls.log(5, msg, *args, **kwargs)

        logging.addLevelName(5, 'TRACE')
        LoggingLoggerClass.trace = new.instancemethod(
            trace, None, LoggingLoggerClass
        )

    if not hasattr(LoggingLoggerClass, 'garbage'):
        def garbage(cls, msg, *args, **kwargs):
            return cls.log(1, msg, *args, **kwargs)

        logging.addLevelName(1, 'GARBAGE')
        LoggingLoggerClass.garbage = new.instancemethod(
            garbage, None, LoggingLoggerClass
        )

    # Set the root logger at the lowest level possible
    logging.getLogger().setLevel(1)
Exemplo n.º 24
0
def setup_logging():
    """ Method to setup the logging options. """
    global debug, info, warning, critical, error, root_logger, set_level,\
           setLevel, filename, level

    level = log_dict.get(config.get_parameter('brisa', 'logging'),
                         logging.DEBUG)
    filename = config.get_parameter('brisa', 'logging_output')

    if filename == 'file':
        filename = os.path.join(config.brisa_home, 'brisa.log')
        logging.basicConfig(level=level, filename=filename,
                            format='%(created)f %(levelname)s %(module)s:'\
                                   '%(lineno)d:%(funcName)s() %(message)s')
        root_logger = logging.getLogger('RootLogger')
    else:
        logging.setLoggerClass(ColoredLogger)
        root_logger = getLogger('RootLogger')
        root_logger.setLevel(level)

    def set_level(level):
        """ Real implementation of the set level function. """
        root_logger.setLevel(log_dict.get(level))

    def setLevel(level):
        """ Method to set the log level. """
        set_level(level)
Exemplo n.º 25
0
def robottelo_logger(request, worker_id):
    """Set up a separate logger for each pytest-xdist worker
    if worker_id != 'master' then xdist is running in multi-threading so
    a logfile named 'robottelo_gw{worker_id}.log' will be created.
    """
    logger = logging.getLogger('robottelo')
    if (hasattr(request.session.config, '_reportportal_configured') and
       request.session.config._reportportal_configured):
        logging.setLoggerClass(RPLogger)
    if '{0}'.format(worker_id) not in [h.get_name() for h in logger.handlers]:
        if worker_id != 'master':
            formatter = logging.Formatter(
                fmt='%(asctime)s - {0} - %(name)s - %(levelname)s -'
                    ' %(message)s'.format(worker_id),
                datefmt='%Y-%m-%d %H:%M:%S'
            )
            handler = logging.FileHandler(
                'robottelo_{0}.log'.format(worker_id)
            )
            handler.set_name('{0}'.format(worker_id))
            handler.setFormatter(formatter)
            logger.addHandler(handler)
            # Nailgun HTTP logs should also be included in gw* logs
            logging.getLogger('nailgun').addHandler(handler)
            if (hasattr(request.session.config, '_reportportal_configured') and
               request.session.config._reportportal_configured):
                rp_handler = RPLogHandler(request.node.config.py_test_service)
                rp_handler.set_name('{0}'.format(worker_id))
                rp_handler.setFormatter(formatter)
                logger.addHandler(rp_handler)
                logging.getLogger('nailgun').addHandler(rp_handler)
    return logger
Exemplo n.º 26
0
    def init_temp_log_service(self, msg_level, extra_log_id):
        """ Initialize the temporary logging service to record logs until know where to log to
        
            This is done by using a Memory handler temporarily
        """
        # Create and register the logger
        logging.setLoggerClass(TealLogger)
        logger = logging.getLogger('tealLogger')
        
        hdlr = logging.handlers.MemoryHandler(100, logging.NOTSET, target=None)

        # Set the logging format for this logger
        use_eli = extra_log_id
        if len(use_eli) != 0:
            use_eli = extra_log_id[:4]
            use_eli = use_eli.strip()
            use_eli = use_eli + ':'
                    
        log_format =  "%(asctime)-15s [%(process)d:%(thread)d] {0}%(module)s - %(levelname)s: %(message)s".format(use_eli)
        formatter = logging.Formatter(log_format)
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)
        # Define the string levels and set them in the logger
        levels = {'debug': logging.DEBUG,
                  'info': logging.INFO,
                  'warning': logging.WARNING,
                  'error': logging.ERROR,
                  'critical': logging.CRITICAL}
        
        # Set the lowest level of message to log
        level = levels.get(msg_level, logging.NOTSET)
        logger.setLevel(level)
        
        registry.register_service(SERVICE_LOGGER, logger)
        registry.register_service(SERVICE_MSG_LEVEL, msg_level)
def _check_logger_class():
    """
    Make sure process name is recorded when loggers are used
    """
    # XXX This function is unnecessary once logging is patched
    import logging

    if hasattr(logging, "multiprocessing"):
        return

    logging._acquireLock()
    try:
        OldLoggerClass = logging.getLoggerClass()
        if not getattr(OldLoggerClass, "_process_aware", False):

            class ProcessAwareLogger(OldLoggerClass):
                _process_aware = True

                def makeRecord(self, *args, **kwds):
                    record = OldLoggerClass.makeRecord(self, *args, **kwds)
                    record.processName = current_process()._name
                    return record

            logging.setLoggerClass(ProcessAwareLogger)
    finally:
        logging._releaseLock()
Exemplo n.º 28
0
def getLogger(name, logfile=None):
    """
    Returns the logger with the specified name.
    If the logger doesn't exist, it is created.

    :param name: The name of the logger to retrieve
    :param logfile: If specified, the logger will log to the specified file.
                    Otherwise, the logger logs to stdout.
                    This parameter only has an effect if the logger doesn't already exist.
    """
    setLoggerClass(MozLogger)

    if name in Logger.manager.loggerDict:
        return getSysLogger(name)

    logger = getSysLogger(name)
    logger.setLevel(_default_level)

    if logfile:
        handler = FileHandler(logfile)
    else:
        handler = StreamHandler()
    handler.setFormatter(_MozFormatter())
    logger.addHandler(handler)
    logger.propagate = False
    return logger
Exemplo n.º 29
0
def createLogger(name, console=True):
    # Start Logging Module
    logging.setLoggerClass(ColoredLogger)
    mylogger = logging.getLogger(name)
    mylogger.to_console = console
    mylogger.setLevel(LOGLEVEL)
    return mylogger
Exemplo n.º 30
0
Arquivo: log.py Projeto: cottrell/cli
    def setup(self):
        """Configure the :class:`LoggingMixin`.

        This method adds the :option:`-l`, :option:`q`,
        :option:`-s` and :option:`-v` parameters to the
        application and instantiates the :attr:`log` attribute.
        """
        # Add logging-related options.
        self.add_param("-l", "--logfile", default=self.logfile,
                       help="log to file (default: log to stdout)")
        self.add_param("-q", "--quiet", default=0, help="decrease the verbosity",
                       action="count")
        self.add_param("-s", "--silent", default=False, help="only log warnings",
                       action="store_true")
        self.add_param("-v", "--verbose", default=0, help="raise the verbosity",
                       action="count")

        # Create logger.
        logging.setLoggerClass(CommandLineLogger)
        self.log = logging.getLogger(self.name)
        self.formatter = Formatter(fmt=self.message_format, datefmt=self.date_format)

        self.log.level = self.log.default_level

        # If requested, make our logger the root.
        if self.root:
            logging.root = self.log
            logging.Logger.root = self.log
            logging.Logger.manager = logging.Manager(self.log)
Exemplo n.º 31
0
    (
        'en',
        'English',
    ),
    (
        'th',
        'Thai',
    ),
)
LANGUAGES = CMS_LANGUAGES

#logger
logging.getLogger('django.db.backends').setLevel(logging.ERROR)
logging.getLogger('PYREADLINE').setLevel(logging.ERROR)
logging.getLogger('south').setLevel(logging.ERROR)

logging.setLoggerClass(
    LoggerClass(
        file_suffix='.log',
        default_level=logging.INFO,
        files_path=path.abspath(path.join(PROJECT_ROOT, 'logs')),
    ))

# Use this for settings that you need in all environments but that shouldn't be
# kept in public version control, like Twitter API keys, etc.
#
# This does not catch an ImportError because it *should* be a fatal error in
# production if it's missing (database credentials, etc.). It will be put in
# place by Chef in production.
from secrets import *
Exemplo n.º 32
0
def _setup_logging(color: bool = bool(ColoredFormatter)):
    logging.getLogger("urllib3.connectionpool").setLevel(logging.ERROR)
    logging.setLoggerClass(LoggerWithSuccess)

    # Silence any noisy loggers here.
    logging.getLogger("watchdog.observers").setLevel(logging.INFO)
Exemplo n.º 33
0
# -*- coding: utf-8 -*-
# Author: Ztj

import logging
import logging.config
import os

from configfile import ConfigFile
from registry import Registry
from libs.logger import JsonLogger
"""全局设置"""
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(levelname)s - %(message)s')
"""全局变量"""
service_root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
"""初始化配置"""
GlobalConfigFile = ConfigFile(os.path.join(service_root_path, 'configs'))
"""加载配置文件"""
app_options = Registry(GlobalConfigFile.load_app('app'))
"""配置日志"""
log_options = GlobalConfigFile.load_app('log')
if log_options is not None:
    logging.setLoggerClass(JsonLogger)
    logging.config.dictConfig(log_options)

json_logger = logging.getLogger('json')
Exemplo n.º 34
0
CMDLINE_LOG_LEVELS = ['info', 'debug', 'debug_analyzer']

DEBUG_ANALYZER = logging.DEBUG_ANALYZER = 15
logging.addLevelName(DEBUG_ANALYZER, 'DEBUG_ANALYZER')


class CCLogger(logging.Logger):
    def __init__(self, name, level=NOTSET):
        super(CCLogger, self).__init__(name, level)

    def debug_analyzer(self, msg, *args, **kwargs):
        if self.isEnabledFor(logging.DEBUG_ANALYZER):
            self._log(logging.DEBUG_ANALYZER, msg, args, **kwargs)


logging.setLoggerClass(CCLogger)

package_root = os.environ.get('CC_PACKAGE_ROOT', '')
DEFAULT_LOG_CFG_FILE = os.path.join(package_root, 'config', 'logger.conf')

# Default config which can be used if reading log config from a
# file fails.
DEFAULT_LOG_CONFIG = '''{
  "version": 1,
  "disable_existing_loggers": false,
  "formatters": {
    "brief": {
      "format": "[%(asctime)s][%(levelname)s] - %(message)s",
      "datefmt": "%Y-%m-%d %H:%M"
    },
    "precise": {
Exemplo n.º 35
0
    if EXEC_PARAMS.command_mode:
        cmd_file_hndlr = logging.FileHandler(FILE_LOG_FILEPATH,
                                             mode='a',
                                             delay=True)
        logformat = LOG_REC_FORMAT_FILE_C.format(EXEC_PARAMS.command_name)
        formatter = logging.Formatter(logformat)
        cmd_file_hndlr.setFormatter(formatter)
        return cmd_file_hndlr
    else:
        return file_hndlr


# setting up public logger. this will be imported in with other modules -------
if not EXEC_PARAMS.doc_mode:
    logging.setLoggerClass(LoggerWrapper)

loggers = {}


def get_logger(logger_name):
    """Register and return a logger with given name.

    Caches all registered loggers and returns the same logger object on
    second call with the same logger name.

    Args:
        logger_name (str): logger name
        val (type): desc

    Returns:
Exemplo n.º 36
0
    messagesLogFilename = os.path.join(_logDir, 'messages.log')
    _handler = BetterFileHandler(messagesLogFilename)
except EnvironmentError, e:
    raise SystemExit, \
          'Error opening messages logfile (%s).  ' \
          'Generally, this is because you are running Supybot in a directory ' \
          'you don\'t have permissions to add files in, or you\'re running ' \
          'Supybot as a different user than you normal do.  The original ' \
          'error was: %s' % (messagesLogFilename, utils.gen.exnToString(e))

# These are public.
formatter = Formatter('NEVER SEEN; IF YOU SEE THIS, FILE A BUG!')
pluginFormatter = PluginFormatter('NEVER SEEN; IF YOU SEE THIS, FILE A BUG!')

# These are not.
logging.setLoggerClass(Logger)
_logger = logging.getLogger('supybot')
_stdoutHandler = StdoutStreamHandler(sys.stdout)


class ValidLogLevel(registry.String):
    """Invalid log level."""
    handler = None
    minimumLevel = -1

    def set(self, s):
        s = s.upper()
        try:
            level = logging._levelNames[s]
        except KeyError:
            try:
Exemplo n.º 37
0
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with this program.  If not, see <https://www.gnu.org/licenses/>.
#
###############################################################################
import signal

from common.logging_setup import VerboseLogger
from logging import setLoggerClass
# set custom logger class for VERBOSE log level
setLoggerClass(VerboseLogger)

import argparse
import sys
import multiprocessing

from __version import __version__


def parseArgs(argv=sys.argv[1:]):
    '''
    Parses command line arguments
    Returns parsed argument names and values in form {name: value}

    @return Returns parsed argument names and values [dict]
    '''
Exemplo n.º 38
0
import logging
try:
    from customlog import CustomLog
    logging.setLoggerClass(CustomLog)
except OSError:
    print "Could not instantiate logger class!"
Exemplo n.º 39
0
#! /usr/bin/env python3
"""Handles the logging."""

import logging


class MyFilter(logging.Filter):
    def filter(self, record):
        return record.name in ['detsbot', 'jstris', 'database', 'main_model']


class MyLogger(logging.Logger):
    def __init__(self, name):
        logging.Logger.__init__(self, name)
        self.addFilter(MyFilter())


logging.setLoggerClass(MyLogger)
logging.basicConfig(level=logging.DEBUG)
Exemplo n.º 40
0
    def __init__(self, name, isStderr=False):
        RunLogger.__init__(self, name)
        if isStderr:
            self.handlers = [logging.StreamHandler(sys.stderr)]
        else:
            self.handlers = [logging.StreamHandler(sys.stdout)]

    def addHandler(self, *args, **kwargs):
        """ensure this STAYS a null logger"""
        pass


# Setting the default logging class to be ours
logging.RunLogger = RunLogger
logging.setLoggerClass(RunLogger)


# ============ begin logging support ============


def createLogDir(logDir: str = None) -> None:
    """A helper method to create the log directory"""
    # the usual case is the user does not pass in a log dir path, so we use the global one
    if logDir is None:
        logDir = LOG_DIR

    # create the directory
    if not os.path.exists(logDir):
        try:
            os.makedirs(logDir)
Exemplo n.º 41
0
def monkey_patched_getLogger(*args, **kwargs):
    orig_class = logging.getLoggerClass()
    result = orig_getLogger(*args, **kwargs)
    logging.setLoggerClass(orig_class)
    return result
Exemplo n.º 42
0
                file_handler = FileHandler(
                    filename=f"logs/{self.room_name}/{chosen_level[1]}.log")
                file_handler.setLevel(level)
                file_handler.setFormatter(file_formatter)

                # secondary log file that contains only messages.
                if self.level == self.CHAT:
                    self.add_chat_handler()
                    stream_handler.addFilter(ChatFilter())
                    stream_handler.setFormatter(terminal_formatter)

                else:
                    stream_handler.addFilter(DebugFilter())
                    stream_handler.setFormatter(terminal_formatter)

                    if self.chat_handler_enabled:
                        self.add_chat_handler()

                self.addHandler(file_handler)

                # log to the terminal.

                self.addHandler(stream_handler)
                return True
        # level was not set
        return False


setLoggerClass(QuantumLogger)
Exemplo n.º 43
0
Arquivo: common.py Projeto: coyang/faf
        result = self.manager.getLogger(suffix)
        self._children.add(result)

        return result

    def setLevel(self, level):
        """
        Sets the level of the current logger and all of its children loggers.
        """

        self.level = level
        for child in self._children:
            child.setLevel(level)

logging.setLoggerClass(FafLogger)
logging.basicConfig()

# Invalid name "log" for type constant
# pylint: disable-msg=C0103
log = logging.getLogger(name="faf")
# pylint: enable-msg=C0103


def import_dir(module, dirname, prefix=None):
    """
    Imports python files from `dirname` into `module`.
    Ignores files whose name starts with underscore.
    """

    for filename in os.listdir(dirname):
Exemplo n.º 44
0
        logging.addLevelName(OUTPUT, "OUTPUT")

    def success(self, msg: str, *args: Any, **kwargs: Any) -> None:
        if self.isEnabledFor(SUCCESS):
            self._log(SUCCESS, msg, args, **kwargs)
        else:  # pragma: no cover
            pass

    def output(self, msg: str, *args: Any, **kwargs: Any) -> None:
        if self.isEnabledFor(OUTPUT):
            self._log(OUTPUT, msg, args, **kwargs)
        else:  # pragma: no cover
            pass


logging.setLoggerClass(LoggerWithSuccessAndOutput)
logger = cast(LoggerWithSuccessAndOutput, logging.getLogger("nox"))


def _get_formatter(color: bool, add_timestamp: bool) -> logging.Formatter:
    if color is True:
        return NoxColoredFormatter(
            reset=True,
            log_colors={
                "DEBUG": "cyan",
                "INFO": "blue",
                "WARNING": "yellow",
                "ERROR": "red",
                "CRITICAL": "red,bg_white",
                "SUCCESS": "green",
            },
Exemplo n.º 45
0
    def __init__(self, logger):
        """
        Object constructor.
        :param logger: The logger object instance
        """
        self.logger = logger

    def write(self, msg):
        """
        Write a message in the logger with severity 'ERROR'
        :param msg: The message to write
        """
        self.logger.error('STDERR %r' % msg)


logging.setLoggerClass(OutputHandler)


def getInstance(logfile='b3.log',
                loglevel=21,
                logsize=10485760,
                log2console=False):
    """
    Return a Logger instance.
    :param logfile: The logfile name.
    :param loglevel: The logging level.
    :param logsize: The size of the log file (in bytes)
    :param log2console: Whether or not to extend logging to the console.
    """
    global __output
Exemplo n.º 46
0
            'handlers': ['console'],
            'propagate': False,
        },
    }
}


class LoggerAutoInit(logging.getLoggerClass()):
    def handle(self, record):
        if len(logging.root.handlers) == 0:
            if init_logger():
                self.disabled = 0
        return super(LoggerAutoInit, self).handle(record)


logging.setLoggerClass(LoggerAutoInit)


def gen_logger(name=None, options=None):
    logger = logging.getLogger(name)
    return logger


def init_logger(options=None):
    global logging_conf
    conf_fname = os.path.join('etc', 'embers', 'logging.conf')
    if not logging_conf:
        if os.path.exists(conf_fname):
            logging_conf = yaml.load(open(conf_fname))
        else:
            logging_conf = default_conf
Exemplo n.º 47
0
        except Exception, e:
            jobID = hst._options.jobID
            completionReason = ClientJobsDAO.CMPL_REASON_ERROR
            completionMsg = "ERROR: %s" % (e, )
            raise
        finally:
            if jobID is not None:
                cjDAO = ClientJobsDAO.get()
                cjDAO.jobSetCompleted(jobID=jobID,
                                      completionReason=completionReason,
                                      completionMsg=completionMsg)

    return jobID


#############################################################################

if __name__ == "__main__":
    logging.setLoggerClass(ExtendedLogger)
    buildID = Configuration.get('nupic.software.buildNumber', 'N/A')
    logPrefix = '<BUILDID=%s, WORKER=HS, WRKID=N/A, JOBID=N/A> ' % buildID
    ExtendedLogger.setLogPrefix(logPrefix)

    try:
        main(sys.argv)
    except:
        logging.exception(
            "HypersearchWorker is exiting with unhandled exception; "
            "argv=%r", sys.argv)
        raise
Exemplo n.º 48
0
            logging.root.removeHandler(handler)
        logging.root = NovaRootLogger("nova")
        NovaLogger.root = logging.root
        NovaLogger.manager.root = logging.root
        for logger in NovaLogger.manager.loggerDict.itervalues():
            logger.root = logging.root
            if isinstance(logger, logging.Logger):
                NovaLogger.manager._fixupParents(logger)
        NovaLogger.manager.loggerDict["nova"] = logging.root
        logging._releaseLock()
        sys.excepthook = handle_exception
        reset()


root = logging.root
logging.setLoggerClass(NovaLogger)


def audit(msg, *args, **kwargs):
    """Shortcut for logging to root log with sevrity 'AUDIT'."""
    logging.root.log(AUDIT, msg, *args, **kwargs)


class WritableLogger(object):
    """A thin wrapper that responds to `write` and logs."""

    def __init__(self, logger, level=logging.INFO):
        self.logger = logger
        self.level = level

    def write(self, msg):
Exemplo n.º 49
0
import requests
import requests.packages.urllib3.util.connection as urllib3_connection
from requests.packages.urllib3.util.connection import allowed_gai_family

from streamlink import __version__, plugins
from streamlink.compat import is_win32, lru_cache
from streamlink.exceptions import NoPluginError, PluginError
from streamlink.logger import Logger, StreamlinkLogger
from streamlink.options import Options
from streamlink.plugin import api
from streamlink.utils import update_scheme
from streamlink.utils.l10n import Localization

# Ensure that the Logger class returned is Streamslink's for using the API (for backwards compatibility)
logging.setLoggerClass(StreamlinkLogger)
log = logging.getLogger(__name__)


def print_small_exception(start_after):
    type, value, traceback_ = sys.exc_info()

    tb = traceback.extract_tb(traceback_)
    index = 0

    for i, trace in enumerate(tb):
        if trace[2] == start_after:
            index = i + 1
            break

    lines = traceback.format_list(tb[index:])
Exemplo n.º 50
0
def setLoggerClass():
    logging.setLoggerClass(hxtool_logger)
Exemplo n.º 51
0
            try:
                self.checkPos(len(msg))
                self.stream.write(msg)
            except (UnicodeDecodeError, UnicodeEncodeError):
                msg = msg.encode("UTF-8")
                self.checkPos(len(msg))
                self.stream.write(msg)

            StreamHandler.lastwrite = StatusHandler
            self.flush()
        except:
            self.handleError(record)


setLoggerClass(Logger)

root = Logger()

_loggers = {None: root}


def getLogger(name=None):
    """
    Return a logger with the specified name, creating it if necessary.

    If no name is specified, return the root logger.
    """
    if name:
        logger = Logger.manager.getLogger(name)
        _loggers[name] = logger
Exemplo n.º 52
0
print = functools.partial(print, flush=True)


class TrackableLogger(logging.Logger):
    guid = str(uuid4())

    def _log(self, msg, *args, **kwargs):
        x = {'guid': self.guid}
        if 'extra' in kwargs:
            kwargs['extra'].update(x)
        else:
            kwargs['extra'] = x
        super()._log(msg, *args, **kwargs)


logging.setLoggerClass(TrackableLogger)
logger = logging.getLogger('prebuild')


def headSha():
    if shutil.which('git') is None:
        logger.warning(
            "Unable to find git executable, can't caclulate commit ID")
        return '0xDEADBEEF'
    repo_dir = os.path.dirname(os.path.abspath(__file__))
    git = subprocess.Popen(
        'git rev-parse --short HEAD',
        stdout=subprocess.PIPE,
        stderr=subprocess.PIPE,
        shell=True,
        cwd=repo_dir,
Exemplo n.º 53
0
    COLOR_FORMAT = formatter_message(FORMAT, use_color)

    def __init__(self, name):
        logging.Logger.__init__(self, name, logging.DEBUG)
        color_formatter = ColoredFormatter(self.COLOR_FORMAT,
                                           use_color=self.use_color)
        console = logging.StreamHandler()
        console.setFormatter(color_formatter)

        # Use the custom handler instead of streaming one.
        if hasattr(sys, '_kivy_logging_handler'):
            self.addHandler(getattr(sys, '_kivy_logging_handler'))
        else:
            self.addHandler(console)
        self.addHandler(HistoryHandler())
        self.addHandler(FileHandler())
        return


if 'nosetests' not in sys.argv:
    logging.setLoggerClass(ColoredLogger)

#: Kivy default logger instance
Logger = logging.getLogger('Kivy')
Logger.logfile_activated = False

Logger.trace = partial(Logger.log, logging.TRACE)

#: Kivy history handler
LoggerHistory = HistoryHandler
Exemplo n.º 54
0
    def remove_all_handlers(self):
        for handler in self.handlers:
            self.removeHandler(handler)

    @classmethod
    def _setup_console_handler(cls):
        if cls._console_handler is not None:
            return

        cls._console_handler = logging.StreamHandler()
        cls._console_handler.setFormatter(cls._formatter)

    @classmethod
    def _setup_file_handler(cls, filename):
        if cls._file_handler is not None:
            return

        log_directory = os.path.dirname(filename)
        if not os.path.exists(log_directory):
            os.makedirs(log_directory)

        cls._file_handler = logging.handlers.RotatingFileHandler(
            filename,
            maxBytes=consts.LOG_FILE_MAXBYTES,
            backupCount=consts.LOG_FILE_COUNT)
        cls._file_handler.setFormatter(cls._formatter)


logging.setLoggerClass(TunedLogger)
atexit.register(logging.shutdown)
Exemplo n.º 55
0
            new_msg = msg if isinstance(msg, str) else msg()
        except:
            locale_info = 'Unknown'
            try:
                locale_info = 'Terminal encoding=%s, LANG=%s, LANGUAGE=%s' % (
                    sys.stdout.encoding, os.getenv('LANG'), os.getenv('LANGUAGE'))
                logging.getLogger("variety").exception('Errors while logging. Locale info: %s' % locale_info)
                # TODO gather and log more info here
            except:
                pass
            new_msg = 'Errors while logging. Locale info: %s' % locale_info

        return super().makeRecord(name, level, fn, lno, new_msg, *args, **kwargs)


logging.setLoggerClass(SafeLogger)

# # Change default encoding from ascii to UTF8 - works OK on Linux and prevents various UnicodeEncodeErrors/UnicodeDecodeErrors
# Still, generally considerd bad practice, may cause some deep hidden errors, as various Python stuff depends on it
# reload(sys)
# sys.setdefaultencoding('UTF8')

import signal
import dbus, dbus.service, dbus.glib
import logging

import gi

gi.require_version('Gtk', '3.0')

from gi.repository import Gtk, Gdk, GObject  # pylint: disable=E0611
Exemplo n.º 56
0
import os

import logging
from covid.core.logger import CovidLogger

logging.setLoggerClass(CovidLogger)

working_directory = 'data'

os.makedirs(working_directory, exist_ok=True)

DATABASE_USER = os.environ.get('PGUSER')
DATABASE_PASS = os.environ.get('PGPASSWORD')
DATABASE_HOST = os.environ.get('PGHOST')
DATABASE_PORT = os.environ.get('PGPORT', '5432')
DATABASE_NAME = os.environ.get('PGDATABASE')

DATABASE_CONNECTION_STRING = os.environ.get(
    'TimeseriesDatabase__ConnectionString_ETL',
    f'postgresql+pg8000://{DATABASE_USER}:{DATABASE_PASS}@{DATABASE_HOST}:{DATABASE_PORT}/{DATABASE_NAME}'
)

COUNTRY_CONNECTION_STRING = os.environ.get('CountryDatabase__ConnectionString')
COUNTRY_DATBASENAME = os.environ.get('CountryDatabase__DatabaseName', 'covid')
COUNTRY_COLLECTIONNAME = os.environ.get('CountryDatabase__Collection',
                                        'countries')

FIELDS_CONNECTION_STRING = os.environ.get('FieldsDatabase__ConnectionString')
FIELDS_DATABASENAME = os.environ.get('FieldsDatabase__DatabaseName', 'covid')
FIELDS_COLLECTIONNAME = os.environ.get('FieldsDatabase__Collection', 'fields')