Ejemplo n.º 1
0
def setup(path, name, debug):
    for k, v in log_level.viewitems():
        logging.addLevelName(k, v)

    logger = logging.getLogger('task')

    error_handler = build_logger(path, name, 30, 'error')
    error_handler.setFormatter(base_format)
    error_handler.addFilter(ErrorFilter())
    logger.addHandler(error_handler)

    alert_handler = build_alert_logger(path, name, 60, 'alert')
    alert_handler.setFormatter(base_format)
    logger.addHandler(alert_handler)

    if debug:
        flow_handler = build_logger(path, name, 5, 'debug')
        flow_handler.setFormatter(base_format)
        logger.addHandler(flow_handler)
        logger.setLevel(10)
    else:
        flow_handler = build_logger(path, name, 10, 'main')
        flow_handler.setFormatter(base_format)
        flow_handler.addFilter(MainFilter())
        logger.addHandler(flow_handler)
        logger.setLevel(5)

    logger.log(0, 'Logger online')
Ejemplo n.º 2
0
 def __init__(self, _strName=None):
     EDObject.__init__(self)
     strName = None
     if _strName is None:
         strName = self.getClassName()
     else:
         strName = _strName
     iId = self.getId()
     #self.__loggingId = "%s-%d" % (strName, iId)
     self.__loggingId = strName
     EDLoggingPyLogging.__semaphoreLogging.acquire()
     if not EDLoggingPyLogging.__bInitisalised:
         logging.basicConfig(level=EDLoggingPyLogging.__logLevel, stream=sys.stdout)
         logging.addLevelName(EDLoggingPyLogging.UNIT_TEST_LEVEL, EDLoggingPyLogging.UNIT_TEST_NAME)
         logging.addLevelName(EDLoggingPyLogging.ASSERT_LEVEL, EDLoggingPyLogging.ASSERT_NAME)
         EDLoggingPyLogging.__bInitisalised = True
     if not self.__loggingId in EDLoggingPyLogging.__dictLoggers.keys():
         self.logger = logging.getLogger(self.__loggingId)
         self.logger.setLevel(EDLoggingPyLogging.__logLevel)
         EDLoggingPyLogging.__dictLoggers[self.__loggingId] = self.logger
     else:
         self.logger = EDLoggingPyLogging.__dictLoggers[self.__loggingId]
     EDLoggingPyLogging.__semaphoreLogging.release()
     self.__bIsTest = False
     self.__bIsVerboseDebug = False
     self.__strLogFileName = None
     self.__bIsLogFile = False
Ejemplo n.º 3
0
def get_logger():
    '''
    Returns logger used by multiprocessing
    '''
    global _logger
    import logging, atexit

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0
            logging.addLevelName(SUBDEBUG, 'SUBDEBUG')
            logging.addLevelName(SUBWARNING, 'SUBWARNING')

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, 'unregister'):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger
Ejemplo n.º 4
0
    def journal_set(self, logfile=None):
        " journal object json to disk "
        if logfile is None:
            logfile = self.config.logfile
        if not (self.config.journal and logfile):
            return

        null_format = logging.Formatter()

        JOURNAL = 100
        logging.addLevelName(JOURNAL, "JOURNAL")

        self._journal_logger_name = "%s.journal" % self._logger_name
        logger = logging.getLogger(self._journal_logger_name)
        logger.handlers = []

        logger.setLevel(logging.DEBUG)
        logger.propagate = False

        logfile = os.path.expanduser("%s.journal" % logfile)
        hdlr = logging.FileHandler(logfile)
        hdlr.setFormatter(null_format)
        logger.addHandler(hdlr)

        self.journal = logger
Ejemplo n.º 5
0
def bootstrap():
    logging.setLoggerClass(CustomLogger)
    logging.addLevelName(CONSOLE, 'CONSOLE')

    root_logger = logging.getLogger('engineer')
    root_logger.setLevel(logging.DEBUG)
    root_logger.addHandler(get_console_handler(logging.WARNING))
Ejemplo n.º 6
0
def configure_logging():
    logging.addLevelName(
        logging.WARNING, Fore.MAGENTA + logging.getLevelName(logging.WARNING))
    logging.addLevelName(
        logging.ERROR, Fore.RED + logging.getLevelName(logging.ERROR))
    logging.basicConfig(level=loglevel(),
                        format="%(levelname)s: %(message)s")
Ejemplo n.º 7
0
def setupLogListener():
    global __logging_queue
    global __listener

    # rename the log levels with COLORFUL versions of themselves
    # because COLORS.
    logging.addLevelName(50, "\033[1;31mCRITICAL\033[0m")
    logging.addLevelName(40, "\033[0;31mERROR   \033[0m")
    logging.addLevelName(30, "\033[0;33mWARNING \033[0m")
    logging.addLevelName(20, "\033[0;37mINFO    \033[0m")
    logging.addLevelName(10, "\033[0;34mDEBUG   \033[0m")


    q = Queue()

    console_handler = logging.StreamHandler()
    # includes terminfo color codes for easier visual grepping
    detailed_formatter = logging.Formatter('{levelname}[{asctime}.{msecs:03.0f}] line \033[1m{lineno:4d}\033[0m in \033[1;30m{funcName:40}\033[0m: \033[0;37m{message}\033[0m', datefmt='%H:%M:%S', style='{')
    # detailed_formatter = logging.Formatter('%(asctime)s %(name)-15s %(levelname)-8s %(message)s')
    # plain_formatter = logging.Formatter('%(asctime)s %(message)s')

    console_handler.setFormatter(detailed_formatter)
    # console_handler.setFormatter(plain_formatter)

    q_listener = handlers.QueueListener(q, console_handler)


    __logging_queue = q
    __listener = q_listener

    q_listener.start()
Ejemplo n.º 8
0
def initialize(unit_test=False):
    """Prepare logging.
    """
    global _logging_configured, _logging_started, _buff_handler

    if _logging_configured:
        return

    if 'dev' in __version__:
        warnings.filterwarnings('always', category=DeprecationWarning, module='flexget.*')
    warnings.simplefilter('once', append=True)
    logging.addLevelName(TRACE, 'TRACE')
    logging.addLevelName(VERBOSE, 'VERBOSE')
    _logging_configured = True

    # with unit test we want pytest to add the handlers
    if unit_test:
        _logging_started = True
        return

    # Store any log messages in a buffer until we `start` function is run
    logger = logging.getLogger()
    _buff_handler = logging.handlers.BufferingHandler(1000 * 1000)
    logger.addHandler(_buff_handler)
    logger.setLevel(logging.NOTSET)

    # Add a handler that sores the last 50 debug lines to `debug_buffer` for use in crash reports
    crash_handler = logging.StreamHandler(debug_buffer)
    crash_handler.setLevel(logging.DEBUG)
    crash_handler.setFormatter(FlexGetFormatter())
    logger.addHandler(crash_handler)
Ejemplo n.º 9
0
def create_logger(ob_ctx):
    logger = None
    try:
        logger = logging.getLogger()
        logger.setLevel(int(ob_ctx.log_level))

        handler = logging.handlers.RotatingFileHandler(
            ob_ctx.log_path,
            encoding='utf-8',
            maxBytes=50000000,
            backupCount=1
        )
        logFormat = logging.Formatter(
            u'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(logFormat)
        logger.addHandler(handler)

        logging.addLevelName(5, "DATADUMP")

        def datadump(self, message, *args, **kwargs):
            if self.isEnabledFor(5):
                self._log(5, message, args, **kwargs)

        logging.Logger.datadump = datadump

    except Exception as e:
        print "Could not setup logger, continuing: ", e.message
    return logger
Ejemplo n.º 10
0
Archivo: utils.py Proyecto: BoPeng/SOS
 def _set_logger(self, unused=None):
     if not hasattr(logging, 'TRACE'):
         logging.TRACE = 5
         logging.addLevelName(logging.TRACE, "TRACE")
     # create a logger, we current use the regular logger but we should
     # switch to multiprocessing.get_logger if we notice trouble in, for example,
     # logging from multiple processes.
     self._logger = logging.getLogger()
     # clear previous handler
     while self._logger.hasHandlers():
         self._logger.removeHandler(self._logger.handlers[0])
     self._logger.setLevel(logging.DEBUG)
     # output to standard output
     cout = logging.StreamHandler()
     levels = {
         0: logging.ERROR,
         1: logging.WARNING,
         2: logging.INFO,
         3: logging.DEBUG,
         4: logging.TRACE,
         None: logging.INFO
     }
     #
     cout.setLevel(levels[self._verbosity])
     cout.setFormatter(ColoredFormatter('%(color_levelname)s: %(color_msg)s'))
     self._logger.addHandler(cout)
     self._logger.trace = lambda msg, *args: self._logger._log(logging.TRACE, msg, args)
     # output to a log file
     if self._logfile is not None:
         ch = logging.FileHandler(self._logfile, mode = 'a')
         # debug informaiton and time is always written to the log file
         ch.setLevel(logging.DEBUG)
         ch.setFormatter(logging.Formatter('%(asctime)s: %(levelname)s: %(message)s'))
         self._logger.addHandler(ch)
Ejemplo n.º 11
0
def main(project, env, action, verbose, format, template_path):
    if verbose >= 2:
        level = 5
    elif verbose == 1:
        level = logging.DEBUG
    else:
        logging.getLogger('googleapiclient').setLevel(logging.ERROR)
        logging.getLogger('oauth2client').setLevel(logging.ERROR)
        level = logging.INFO

    logging.addLevelName(5, "TRACE")
    logging.basicConfig(format='%(asctime)s %(levelname)s:%(name)s:%(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S', level=level)

    logger.debug('Debug log enabled')
    logger.info("Log level: {}".format(level))

    if action in ['apply', 'template']:
        if template_path is None:
            logging.error('A path to a template file is required for {}'.format(action))
            sys.exit(1)
        template_class = load_template_module(template_path)
        template = template_class(project, env)

        if format == "json":
            template.formatter = template.asJSON

        if action == 'apply':
            template.__repr__()
            apply_deployment(project, template)
        elif action == 'template':
            t = template.__repr__()
            logger.info('Template successfully rendered, printing to stdout...')
            print(t)
            sys.exit(0)
Ejemplo n.º 12
0
def init_logging(prefix="", suffix=""):
	"""
	Initializes logging, sets custom logging format and adds one
	logging level with name and method to call.
	
	prefix and suffix arguments can be used to modify log level prefixes.
	"""
	logging.basicConfig(format=LOG_FORMAT)
	logger = logging.getLogger()
	# Rename levels
	logging.addLevelName(10, prefix + "D" + suffix)	# Debug
	logging.addLevelName(20, prefix + "I" + suffix)	# Info
	logging.addLevelName(30, prefix + "W" + suffix)	# Warning
	logging.addLevelName(40, prefix + "E" + suffix)	# Error
	# Create additional, "verbose" level
	logging.addLevelName(15, prefix + "V" + suffix)	# Verbose
	# Add 'logging.verbose' method
	def verbose(self, msg, *args, **kwargs):
		return self.log(15, msg, *args, **kwargs)
	logging.Logger.verbose = verbose
	# Wrap Logger._log in something that can handle utf-8 exceptions
	old_log = logging.Logger._log
	def _log(self, level, msg, args, exc_info=None, extra=None):
		args = tuple([
			(str(c).decode("utf-8") if type(c) is str else c)
			for c in args
		])
		msg = msg if type(msg) is unicode else str(msg).decode("utf-8")
		old_log(self, level, msg, args, exc_info, extra)
	logging.Logger._log = _log
Ejemplo n.º 13
0
 def add_debug_level(num, name):
     def fn(self, message, *args, **kwargs):
         if self.isEnabledFor(num):
             self._log(num, message, args, **kwargs)
     logging.addLevelName(num, name)
     setattr(logging, name, num)
     return fn
Ejemplo n.º 14
0
def configureLogging(arguments=None, wrap=None):
    import logging
    import sys
    global logger, STREAM, STDOUT, STDERR
    if not logger:
        # Essentially same as DEBUG, used when logging the output from commands
        # run in the guest system.
        STDOUT = logging.DEBUG + 1
        STDERR = logging.DEBUG + 2
        logging.addLevelName(STDOUT, "STDOUT")
        logging.addLevelName(STDERR, "STDERR")
        if arguments and arguments.coverage:
            STREAM = sys.stderr
        else:
            STREAM = sys.stdout
        logging.basicConfig(
            format="%(asctime)-15s | %(levelname)-7s | %(message)s",
            stream=STREAM)
        logger = logging.getLogger("critic")
        level = logging.INFO
        if arguments:
            if getattr(arguments, "debug", False):
                level = logging.DEBUG
            elif getattr(arguments, "quiet", False):
                level = logging.WARNING
        logger.setLevel(level)
        if wrap:
            logger = wrap(logger)
    return logger
Ejemplo n.º 15
0
    def _init_logging(self):
        """Initiate the logging"""
        if self.cmd_opts.silent:
            stdout_level = logging.CRITICAL
            file_level = logging.INFO
        elif self.cmd_opts.quiet:
            stdout_level = logging.ERROR
            file_level = logging.INFO
        elif self.cmd_opts.verbose:
            stdout_level = logging.DEBUG
            file_level = logging.DEBUG
        else:
            stdout_level = logging.INFO
            file_level = logging.INFO

        logging.basicConfig(level=file_level,
                            format='[%(asctime)s] %(levelname)s %(message)s',
                            datefmt='%Y%m%d %H:%M:%S',
                            filename="log.out",
                            filemode='a')

        logging.addLevelName(10, '--')
        logging.addLevelName(20, '>>')
        logging.addLevelName(30, '**')
        logging.addLevelName(40, '!!')
        logging.addLevelName(50, 'XX')

        console = ColouredConsoleHandler(sys.stdout)
        console.setLevel(stdout_level)
        formatter = logging.Formatter('%(levelname)s %(message)s')
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)
Ejemplo n.º 16
0
 def initLogging(self, consoleLogging=True):
 
     self.log_file = os.path.join(sickbeard.LOG_DIR, self.log_file)
 
     self.cur_handler = self._config_handler()
     
     logging.addLevelName(5,'DB')
     
     logging.getLogger('sickbeard').addHandler(self.cur_handler)
     logging.getLogger('subliminal').addHandler(self.cur_handler)
     logging.getLogger('imdbpy').addHandler(self.cur_handler)
 
     # define a Handler which writes INFO messages or higher to the sys.stderr
     if consoleLogging:
         console = logging.StreamHandler()
 
         console.setLevel(logging.INFO)
 
         # set a format which is simpler for console use
         console.setFormatter(DispatchingFormatter({'sickbeard'  : logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'),
                                                    'subliminal' : logging.Formatter('%(asctime)s %(levelname)s::SUBLIMINAL :: %(message)s', '%H:%M:%S'),
                                                    'imdbpy'     : logging.Formatter('%(asctime)s %(levelname)s::IMDBPY :: %(message)s', '%H:%M:%S')
                                                    },
                                                    logging.Formatter('%(message)s'),))
 
         # add the handler to the root logger
         logging.getLogger('sickbeard').addHandler(console)
         logging.getLogger('subliminal').addHandler(console)
         logging.getLogger('imdbpy').addHandler(console)
 
     logging.getLogger('sickbeard').setLevel(DB)
     logging.getLogger('subliminal').setLevel(logging.WARNING)
     logging.getLogger('imdbpy').setLevel(logging.WARNING)
Ejemplo n.º 17
0
    def setup (self):
        logging.getLogger('').handlers = []

        logging.addLevelName(TRACE, 'TRACE')
        logging.addLevelName(MOREINFO, 'MOREINFO')

        if self.enabled == True:

            # setup root logger
            self.root_logger = logging.getLogger('')
            if self.file is not None:
                formatter = logging.Formatter(self.format, self.datefmt)
                handler = MyFileHandler(self.file, 'a')
                handler.setFormatter(formatter)
                self.root_logger.addHandler(handler)
            else:
                self.root_logger.addHandler(DevnullHandler())
            self.root_logger.setLevel(logging._levelNames[self.level])

            self.root_logger.trace = new.instancemethod(trace_method, self.root_logger, self.root_logger.__class__)
            self.root_logger.moreinfo = new.instancemethod(moreinfo_method, self.root_logger, self.root_logger.__class__)
            self.root_logger.__repr__ = new.instancemethod(repr_method, self.root_logger, self.root_logger.__class__)

            # setup a console logger, if enabled
            if self.console == True:
                console_fmtr = logging.Formatter(self.console_format, self.console_datefmt)
                console_hdlr = logging.StreamHandler()
                console_hdlr.setFormatter(console_fmtr)
                console_hdlr.setLevel(logging._levelNames[self.console_level])
                self.root_logger.addHandler(console_hdlr)

        self.is_setup = True
Ejemplo n.º 18
0
def setup_logging(args):
    """
    Intended to be called by any top-level module to set up "sensible" logging.
    """
    # use lowercase (and abbreviated to max 5 chars) level names
    logging.addLevelName(logging.DEBUG, "debug")
    logging.addLevelName(logging.INFO, "info")
    logging.addLevelName(logging.WARNING, "warn")
    logging.addLevelName(logging.ERROR, "error")
    logging.addLevelName(logging.CRITICAL, "crit")

    tag = os.path.basename(__main__.__file__)

    loglevel = logging.INFO
    if args.verbose:
        loglevel = logging.DEBUG
    if args.quiet:
        loglevel = logging.WARN

    datefmt = '%b %d %H:%M:%S'
    fmt = ('%(asctime)s.%(msecs).3d ' + tag +
           ' [%(process)d] %(levelname)5s: %(message)s')

    logging.basicConfig(format=fmt, datefmt=datefmt, level=loglevel)
    logging.debug("Initialised logging.")
Ejemplo n.º 19
0
def configure_logging(console_detail, file_detail):
    """Configure logging for the application.

    Configuration for both the console and file (via socket) logging for the application.

    Parameters
    ----------
    console_detail : int
        The requested detail level for the console logger.
    """
    main_level = max(console_detail, file_detail)
    logging.basicConfig(level=DETAIL_LEVEL[main_level], format=CONSOLE_FORMAT)
    # Remove old console logger as it will double up messages when levels match.
    logging.getLogger().removeHandler(logging.getLogger().handlers[0])

    for level in LoggingLevel:
        logging.addLevelName(level.value, level.name)

    ch = logging.StreamHandler()
    ch.setLevel(DETAIL_LEVEL[console_detail])
    ch.setFormatter(logging.Formatter(CONSOLE_FORMAT))
    logging.getLogger().addHandler(ch)

    sh = logging.handlers.SocketHandler('localhost', logging.handlers.DEFAULT_TCP_LOGGING_PORT)
    logging.getLogger().addHandler(sh)
Ejemplo n.º 20
0
def init_logging(input_loglevel, input_logfile):
    """Initialize the logging module"""

    logging.VERBOSE = 25
    logging.addLevelName(logging.VERBOSE, 'VERBOSE')
    logging.INSANE = 5
    logging.addLevelName(logging.INSANE, 'INSANE')

    logLevels = {'debug': logging.DEBUG,
                 'info': logging.INFO,
                 'verbose': logging.VERBOSE,
                 'warning': logging.WARNING,
                 'error': logging.ERROR,
                 'insane': logging.INSANE,
                 'critical': logging.CRITICAL}
    logStreams = {'stdout': sys.stdout, 'stderr': sys.stderr}

    #NOTE: We're changing the options value from a string name to an int... this gets passed on to everything afterward!
    input_loglevel = logLevels.get(input_loglevel, logging.NOTSET)

    log_format = '%(relativeCreated)-8d %(module)-12s:%(lineno)-5s %(levelname)-8s (%(name)s) %(message)s'
    log_date = None

    if input_logfile == 'stdout' or input_logfile == 'stderr':
        log_stream = logStreams.get(input_logfile, logging.NOTSET)
        log_file = None
    else:
        log_stream = None
        log_file = input_logfile

    logging.basicConfig(level=input_loglevel, format=log_format, datefmt=log_date, filename=log_file, stream=log_stream, filemode='w')
    logging.getLogger().name = "MainThread"
    logging.log(logging.VERBOSE, "Processing started at %s" %(datetime.now()))
    return input_loglevel
Ejemplo n.º 21
0
    def __init__(self,name,logfile,loglevel='INFO'):
        """takes two arguments 
        name    -> Logger name
        logfile -> name of file we want to log to
        """
        rollover     = False
        if os.path.isfile(logfile):
            rollover = True

        self.logger  = logging.getLogger(name)
        datefmt      = '%b %d %H:%M:%S' 
        format       = logging.Formatter('[%(asctime)s %(process)d  -1 %(name)s %(levelname)s] {- -}  %(message)s', datefmt)
        hdlr         = logging.handlers.RotatingFileHandler(logfile, backupCount=5) 
        hdlr.setFormatter(format)

        if rollover:
            hdlr.doRollover()

        self.logger.addHandler(hdlr)
        logging.addLevelName(29, "NOTICE")
        
        level = logging.DEBUG
        loglevel_map = { 'DEBUG'     : logging.DEBUG,
                         'INFO'      : logging.INFO,
                         'NOTICE'    : 29,
                         'WARNING'   : logging.WARNING,
                         'CRIT'      : logging.CRITICAL,
                         'ERR'       : logging.ERROR,
                         'ALERT'     : 51,
                         'EMERG'     : 52} 
        if loglevel.upper() in loglevel_map:
            level = loglevel_map[loglevel.upper()]

        self.logger.setLevel(level)
Ejemplo n.º 22
0
    def instantiate( cls, streamType = "SCREEN", logLevel = "INFO" ):
        try:
            logging.VERBOSE = 5
            logging.addLevelName(logging.VERBOSE, "VERBOSE")
            logging.Logger.verbose = lambda inst, msg, *args, **kwargs: inst.log(logging.VERBOSE, msg, *args, **kwargs)
            logging.verbose = lambda msg, *args, **kwargs: logging.log(logging.VERBOSE, msg, *args, **kwargs)

            cls.logger = logging.getLogger()

            if logLevel not in logging._levelNames:
                raise Exception( 'Invalid file level' )

            cls.logger.setLevel( logging._levelNames[logLevel] )

            streamType = app.config['STREAMTYPE']

            if streamType == "SCREEN":
                stream = logging.StreamHandler()
            else:
                stream = logging.FileHandler( app.config['LOGFILE'] )

            formatter = logging.Formatter( '[%(levelname)-7s - %(asctime)s] %(message)s' )
            stream.setFormatter( formatter )
            cls.logger.addHandler( stream )
        except Exception, e:
            print( 'Unable to get/set log configurations. Error: %s'%( e ) )
            cls.logger = None
Ejemplo n.º 23
0
def init_logging(args):
    import logging

    format_ = "*** %%(levelname)s%s: %%(message)s"

    debug = args["debug"]
    if debug:
        if debug == 1:
            format_ %= " (%(filename)s:%(lineno)d)"
        elif debug == 2:
            format_ %= " (%(asctime)s, %(filename)s:%(lineno)d)"
        else:
            raise SystemExit("Maximum debug level is 2")
        level = logging.DEBUG
    else:
        format_ %= ""
        level = logging.INFO
    logging.basicConfig(format=format_, level=level,
            datefmt="%a %b %d %H:%M:%S %Y")

    colors = [
        (logging.INFO, "32"), (logging.DEBUG, "34"),
        (logging.WARNING, "31"), (logging.CRITICAL, "36")
    ]
    for level, color in colors:
        logging.addLevelName(level, "\033[1;%sm%s\033[1;m" %
                             (color, logging.getLevelName(level)))

    __builtins__.__dict__["print_d"] = logging.debug
    __builtins__.__dict__["print_i"] = logging.info
    __builtins__.__dict__["print_w"] = logging.warning
Ejemplo n.º 24
0
def setup_logger():
    log_format = logging.Formatter(
        "%(asctime)s [%(levelname)-6.6s] %(message)s")
    root_logger = logging.getLogger()
    logging.OUTPUT = 15
    logging.addLevelName(logging.OUTPUT, "OUTPUT")
    logging.OUTERR = 25
    logging.addLevelName(logging.OUTERR, "OUTERR")
    root_logger.setLevel(logging.OUTPUT)
    console_logger = logging.StreamHandler(sys.stdout)
    console_logger.setFormatter(log_format)
    root_logger.addHandler(console_logger)

    if config["logging"]["file"]:
        max_log_size = min(config["logging"]["maxsize"], 0) * 1024
        file_logger = logging.handlers.RotatingFileHandler(
            config["logging"]["file"],
            maxBytes=max_log_size,
            backupCount=9)
        file_logger.setFormatter(log_format)
        root_logger.addHandler(file_logger)

    if config["email"]["sendon"]:
        global email_log
        email_log = StringIO()
        email_logger = logging.StreamHandler(email_log)
        email_logger.setFormatter(log_format)
        if config["email"]["short"]:
            # Don't send programm stdout in email
            email_logger.setLevel(logging.INFO)
        root_logger.addHandler(email_logger)
Ejemplo n.º 25
0
def _make_log_level(name, level, exceptions, logging):
    """
    Create neccessary log levels, functions, and 
    logging attributes for a new log level.
    """
    logging.addLevelName(level, name)
    setattr(logging, name, level)

    func_name = name.lower()
    def log(self, message, *args, **kws):
        if exceptions:
            # Include exceptions by default
            kws['exc_info'] = kws.get('exc_info', True)

        self.log(level, message, *args, **kws) 

    log.__name__ = func_name

    setattr(logging.Logger, func_name, log)

    def log_root(message, *args, **kws):
        if exceptions:
            # Include exceptions by default
            kws['exc_info'] = kws.get('exc_info', True)

        logging.log(level, message, *args, **kws) 

    log_root.__name__ = func_name

    setattr(logging, func_name, log_root)
Ejemplo n.º 26
0
    def __init__(self, level=config.LOGGER_LEVEL):
        """ Init object.

        @param level: initial logger level
        @type level: str
        """
        super(Logger, self).__init__()

        logging.TRACE = LEVELS['trace']
        logging.EXCEPTION = LEVELS['exception']
        logging.raiseExceptions = 0
        logging.addLevelName(logging.TRACE, "TRACE")
        logging.addLevelName(logging.EXCEPTION, "EXCEPTION")

        # Logger
        self._logger = logging.getLogger(config.APP_NAME)
        self._logger.propagate = False

        # Handlers
        self._stdoutStreamHandler = logging.StreamHandler()
        streamFormatter = SpaceColorFormatter(config.LOGGER_STREAM_FORMAT)
        self._stdoutStreamHandler.setFormatter(streamFormatter)
        self._logger.addHandler(self._stdoutStreamHandler)

        if config.LOGGER_BACKUP_COUNT:
            loggerFilename = os.path.join(config.LOGGER_DIR, "%s.log" % config.APP_NAME)
            fileHandler = logging.handlers.RotatingFileHandler(loggerFilename, 'w',
                                                               config.LOGGER_MAX_BYTES,
                                                               config.LOGGER_BACKUP_COUNT)
            fileFormatter = SpaceFormatter(config.LOGGER_FILE_FORMAT)
            fileHandler.setFormatter(fileFormatter)
            self._logger.addHandler(fileHandler)

        self.setLevel(level)
Ejemplo n.º 27
0
    def __init__(self, args):
        self.args = args

        # -- Hijack the Yum logging ------------------------------------------
        logging.setLoggerClass(NBYumLogger)
        logging.addLevelName(PROGRESS_LEVEL, "progress")
        logging.addLevelName(RECAP_LEVEL, "recap")

        self.base = NBYumBase()

        # -- Deal with the preconfig stuff -----------------------------------
        if not args.debug:
            self.base.preconf.debuglevel = 0
        else:
            self.base.preconf.debuglevel = 6

        if args.config:
            self.base.preconf.fn = args.config

        self.base.prerepoconf.progressbar = NBYumTextMeter()

        # This sets up a bunch of stuff
        self.base.conf

        if self.args.func == "last_updated":
            args.force_cache = True

        if args.force_cache:
            if self.args.func == "rebuild_cache":
                self.base.logger.warning("Ignoring --force-cache argument, as"
                                         " we are rebuilding the cache")

            else:
                self.base.conf.cache = 1
Ejemplo n.º 28
0
def main():
    args = _getArgs()
    logging.basicConfig(level=args.log_level,
            format='%(asctime)s %(levelname)-s: %(message)s')
    logging.addLevelName(5, 'VERBOSE')

    ippool = IPPool(args.remote)
    ippool.register(args.local)

    if args.no_ssl:
        logging.info('Running without SSL.')
        factory = SSTPProtocolFactory(pppd=args.pppd, pppdConfigFile=args.pppd_config,
                local=args.local, remotePool=ippool, certHash=None)
        reactor.listenTCP(args.listen_port, factory)
    else:
        cert = _load_cert(args.pem_cert)
        sha1 = cert.digest('sha1').replace(':', '').decode('hex')
        sha256 = cert.digest('sha256').replace(':', '').decode('hex')
        cert_options = cert.options()

        if args.ciphers:
            cert_options.getContext().set_cipher_list(args.ciphers)

        factory = SSTPProtocolFactory(pppd=args.pppd, pppdConfigFile=args.pppd_config,
                local=args.local, remotePool=ippool, certHash=[sha1, sha256])
        reactor.listenSSL(args.listen_port, factory,
                cert_options, interface=args.listen)


    logging.info('Listening on %s:%s...' % (args.listen, args.listen_port))
    reactor.run()
Ejemplo n.º 29
0
    def __init__(self, defaultStreamHandler, defaultFileHandler):
        """ Init object.
        """
        QtCore.QObject.__init__(self)
        logging.TRACE = logging.DEBUG - 5
        logging.EXCEPTION = logging.ERROR + 5
        logging.raiseExceptions = 0
        logging.addLevelName(logging.TRACE, "TRACE")
        logging.addLevelName(logging.EXCEPTION, "EXCEPTION")

        # Formatters
        #defaultFormatter = DefaultFormatter(config.LOGGER_FORMAT)
        spaceFormatter = SpaceFormatter(config.LOGGER_FORMAT)
        #colorFormatter = ColorFormatter(config.LOGGER_FORMAT)
        spaceColorFormatter = SpaceColorFormatter(config.LOGGER_FORMAT)

        # Logger
        self.__logger = logging.getLogger('papywizard')
        self.__logger.setLevel(logging.TRACE)

        # Handlers
        if defaultStreamHandler:
            stdoutStreamHandler = logging.StreamHandler()
            #stdoutStreamHandler.setFormatter(colorFormatter)
            stdoutStreamHandler.setFormatter(spaceColorFormatter)
            self.__logger.addHandler(stdoutStreamHandler)
        if defaultFileHandler:
            loggerFilename = os.path.join(config.TMP_DIR, config.LOGGER_FILENAME)
            fileHandler = logging.handlers.RotatingFileHandler(loggerFilename, 'w',
                                                               config.LOGGER_MAX_BYTES,
                                                               config.LOGGER_BACKUP_COUNT)
            fileHandler.setFormatter(spaceFormatter)
            self.__logger.addHandler(fileHandler)
Ejemplo n.º 30
0
def get_logger(module_name, log_folder_path=os.getcwd()):
    """
        module_name just to distinguish where the logs come from
    """

    LOG_FILE_PATH = os.path.join(log_folder_path, "LOG_FILE.log")

    # adding a new logging level
    logging.SUCCESS = 19   # as ALL = 0, DEBUG = 10, INFO = 20, WARN = 30, ERROR = 40, FATAL = CRITICAL, CRITICAL = 50
    logging.addLevelName(logging.SUCCESS, 'SUCCESS')
    logger = logging.getLogger(module_name)
    logger.success = lambda msg, *args: logger._log(logging.SUCCESS, msg, args)

    # create formatters
    console_log_formatter = logging.Formatter('[%(levelname)s] - %(message)s')
    file_log_formatter = logging.Formatter('%(asctime)s - %(name)s - [%(levelname)s] - %(message)s')

    # create file handler
    file_handler = logging.FileHandler(LOG_FILE_PATH)
    file_handler.setFormatter(file_log_formatter)
    file_handler.setLevel(logging.DEBUG)

    # create console log handler
    stream_handler = logging.StreamHandler(sys.stdout)
    stream_handler.setFormatter(console_log_formatter)
    stream_handler.setLevel(logging.SUCCESS)

    logger.setLevel(logging.SUCCESS)
    logger.addHandler(file_handler)
    logger.addHandler(stream_handler)
    return logger
Ejemplo n.º 31
0
  def register_bootstrap_options(cls, register):
    """Register bootstrap options.

    "Bootstrap options" are a small set of options whose values are useful when registering other
    options. Therefore we must bootstrap them early, before other options are registered, let
    alone parsed.

    Bootstrap option values can be interpolated into the config file, and can be referenced
    programatically in registration code, e.g., as register.bootstrap.pants_workdir.

    Note that regular code can also access these options as normal global-scope options. Their
    status as "bootstrap options" is only pertinent during option registration.
    """
    buildroot = get_buildroot()

    # Although logging supports the WARN level, its not documented and could conceivably be yanked.
    # Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly
    # setup a 'WARN' logging level name that maps to 'WARNING'.
    logging.addLevelName(logging.WARNING, 'WARN')
    register('-l', '--level', choices=['debug', 'info', 'warn'], default='info', recursive=True,
             help='Set the logging level.')
    register('-q', '--quiet', action='store_true', recursive=True,
             help='Squelches most console output.')
    # Not really needed in bootstrap options, but putting it here means it displays right
    # after -l and -q in help output, which is conveniently contextual.
    register('--colors', action='store_true', default=True, recursive=True,
             help='Set whether log messages are displayed in color.')

    # NB: Right now this option is a placeholder that is unused within pants itself except when
    # specified on the command line to print the OSS pants version.  Both the IntelliJ Pants plugin
    # and the pantsbuild/setup bootstrap script grep for pants_version though so this option
    # registration serves in part as documentation of the dependency.
    # TODO(John Sirois): Move pantsbuild.pants bootstrapping into pants itself and have it use this
    # version option directly.
    register('-v', '-V', '--pants-version',
             nargs='?',  # Allows using the flag with no args on the CLI to print version as well
                         # as setting the version in pants.ini
             default=pants_version(),  # Displays the current version correctly in `./pants -h`.
             const=pants_version(),  # Displays the current version via `./pants -V`.
             help="Prints pants' version number and exits.")

    register('--plugins', advanced=True, type=list_option, help='Load these plugins.')
    register('--plugin-cache-dir', advanced=True,
             default=os.path.join(get_pants_cachedir(), 'plugins'),
             help='Cache resolved plugin requirements here.')

    register('--backend-packages', advanced=True, type=list_option,
             help='Load backends from these packages that are already on the path.')

    register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(),
             help='Use this dir for global cache.')
    register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(),
             help='Use this dir for global config files.')
    register('--pants-workdir', advanced=True, metavar='<dir>',
             default=os.path.join(buildroot, '.pants.d'),
             help='Write intermediate output files to this dir.')
    register('--pants-supportdir', advanced=True, metavar='<dir>',
             default=os.path.join(buildroot, 'build-support'),
             help='Use support files from this dir.')
    register('--pants-distdir', advanced=True, metavar='<dir>',
             default=os.path.join(buildroot, 'dist'),
             help='Write end-product artifacts to this dir.')
    register('--config-override', advanced=True, action='append', metavar='<path>',
             help='A second config file, to override pants.ini.')
    register('--pantsrc', advanced=True, action='store_true', default=True,
             help='Use pantsrc files.')
    register('--pantsrc-files', advanced=True, action='append', metavar='<path>',
             default=['/etc/pantsrc', '~/.pants.rc'],
             help='Override config with values from these files. '
                  'Later files override earlier ones.')
    register('--pythonpath', advanced=True, action='append',
             help='Add these directories to PYTHONPATH to search for plugins.')
    register('--target-spec-file', action='append', dest='target_spec_files',
             help='Read additional specs from this file, one per line')

    # These logging options are registered in the bootstrap phase so that plugins can log during
    # registration and not so that their values can be interpolated in configs.
    register('-d', '--logdir', advanced=True, metavar='<dir>',
             help='Write logs to files under this directory.')
Ejemplo n.º 32
0
import IPy

import vrnetlab

def handle_SIGCHLD(signal, frame):
    os.waitpid(-1, os.WNOHANG)

def handle_SIGTERM(signal, frame):
    sys.exit(0)

signal.signal(signal.SIGINT, handle_SIGTERM)
signal.signal(signal.SIGTERM, handle_SIGTERM)
signal.signal(signal.SIGCHLD, handle_SIGCHLD)

TRACE_LEVEL_NUM = 9
logging.addLevelName(TRACE_LEVEL_NUM, "TRACE")
def trace(self, message, *args, **kws):
    # Yes, logger takes its '*args' as 'args'.
    if self.isEnabledFor(TRACE_LEVEL_NUM):
        self._log(TRACE_LEVEL_NUM, message, args, **kws)
logging.Logger.trace = trace



class VMX_vcp(vrnetlab.VM):
    def __init__(self, username, password, image, install_mode=False):
        super(VMX_vcp, self).__init__(username, password, disk_image=image, ram=4096)
        self.install_mode = install_mode
        self.num_nics = 0
        self.qemu_args.extend(["-drive", "if=ide,file=/vmx/vmxhdd.img"])
        self.smbios = ["type=0,vendor=Juniper",
Ejemplo n.º 33
0
try:
    _suppress = int(os.environ.get("COCOTB_REDUCED_LOG_FMT", "1"))
except ValueError:
    _suppress = 1

# Column alignment
_LEVEL_CHARS = len("CRITICAL")  # noqa
_RECORD_CHARS = 35  # noqa
_FILENAME_CHARS = 20  # noqa
_LINENO_CHARS = 4  # noqa
_FUNCNAME_CHARS = 31  # noqa

# Custom log level
logging.TRACE = 5
logging.addLevelName(5, "TRACE")

# Default log level if not overwritten by the user.
_COCOTB_LOG_LEVEL_DEFAULT = "INFO"


def default_config():
    """ Apply the default cocotb log formatting to the root logger.

    This hooks up the logger to write to stdout, using either
    :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending
    on whether colored output is requested. It also adds a
    :class:`SimTimeContextFilter` filter so that
    :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.

    The logging level for cocotb logs is set based on the
Ejemplo n.º 34
0
import sys
import gc
import re
import uuid
import json
import tempfile
import psutil
import warnings
import subprocess
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from multiprocessing import cpu_count
from time import strftime
from glob import glob

logging.addLevelName(25,
                     'IMPORTANT')  # Add a new level between INFO and WARNING
logging.addLevelName(15, 'VERBOSE')  # Add a new level between INFO and DEBUG
logger = logging.getLogger('cli')


def _warn_redirect(message, category, filename, lineno, file=None, line=None):
    logger.warning('Captured warning (%s): %s', category, message)


def check_deps(workflow):
    from nipype.utils.filemanip import which
    return sorted((node.interface.__class__.__name__, node.interface._cmd)
                  for node in workflow._get_all_nodes()
                  if (hasattr(node.interface, '_cmd')
                      and which(node.interface._cmd.split()[0]) is None))
Ejemplo n.º 35
0
if is_frozen_exe:
    if logroot is None:
        logroot = os.path.splitdrive(sys.executable)[0]
        if logroot[-1] != os.sep:
            logroot += os.sep
    logname = os.path.split(sys.executable)[1]
else:
    logname = os.path.split(os.path.abspath(sys.argv[0]))[1]
logname = os.path.splitext(logname)[0] + '.log'
if logroot != '' and not os.path.exists(logroot):
    os.makedirs(logroot)
logpath = os.path.join(logroot, logname)

# becuase I'm generous.
STDERR = logging.CRITICAL + 10
logging.addLevelName(STDERR, 'STDERR')

# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
# set a format which is simpler for console use
#formatter = logging.Formatter(u'%(name)-12s: %(levelname)-8s %(message)s')
formatter = logging.Formatter(u'%(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)

bt_log_fmt = logging.Formatter(u'[' + unicode(version) +
                               u' %(asctime)s] %(levelname)-8s: %(message)s',
                               datefmt=u'%Y-%m-%d %H:%M:%S')
Ejemplo n.º 36
0
import logging
import os
import re
import string
import sys
import time
from queue import PriorityQueue
from random import randint

import pymysql
import pymysql.cursors
import requests

logging.addLevelName(level=25, levelName='MyINFO')
MyINFO = 25

logging.basicConfig(
    level=MyINFO,
    format='%(asctime)s [line:%(lineno)d] %(levelname)s: %(message)s',
    datefmt='%H:%M:%S')

CODE_LENGTH = 7
BASE_URL = "http://zzzpan.com/"
PARAMs = "?/file/view-"
SUFFIX = ".html"
ALPHABETS = string.ascii_uppercase[:] + string.digits
VISITED = set()
cnt = 0
url_cnt = 0
FAIL = 0
BANNED = 0
Ejemplo n.º 37
0
import logging

import six


class TraceLogger:
    def __init__(self, name):
        self.name = name

    def log(**kwargs):
        raise TypeError("Abstract Method")


# Add custom "TRACE" log level for ludicrous verbosity.
LOGLV_TRACE = 5
logging.addLevelName(LOGLV_TRACE, "TRACE")


def trace(self, message, *args, **kws):
    if self.isEnabledFor(LOGLV_TRACE):
        self._log(LOGLV_TRACE, message, args, **kws)


def get_logger(name=None):
    logger = logging.getLogger(name)
    logger.trace = six.create_bound_method(trace, logger)
    return logger
Ejemplo n.º 38
0
Archivo: utils.py Proyecto: vacary/woo
def find_executable(ex):
    'Thin wrapper for find_executable, using either shutil.which or distutils.spawn.find_executable.'
    try:
        import shutil
        return shutil.which(ex)
    except ImportError:
        import distutils.spawn
        return distutils.spawn.find_executable(ex)


# https://stackoverflow.com/a/55276759/761090
import logging
from functools import partial, partialmethod
logging.TRACE = logging.DEBUG - 5
logging.addLevelName(logging.TRACE, 'TRACE')
logging.Logger.trace = partialmethod(logging.Logger.log, logging.TRACE)
logging.trace = partial(logging.log, logging.TRACE)


def makeLog(name):
    import logging
    try:
        import coloredlogs
        coloredlogs.install(
            level='INFO',
            fmt=
            '%(asctime)s %(name)-8s %(filename)s:%(lineno)d [%(levelname)s] %(message)s',
            datefmt='%H:%M:%S')
        return logging.getLogger(name)
    except ImportError:
Ejemplo n.º 39
0
# markdown debug is also considered useless
logging.getLogger('MARKDOWN').setLevel(logging.INFO)
# asyncio runs in debug mode but we do not need INFO/DEBUG
logging.getLogger('asyncio').setLevel(logging.WARN)

LOGFILE = '/var/log/middlewared.log'
logging.TRACE = 6


def trace(self, message, *args, **kws):
    if self.isEnabledFor(logging.TRACE):
        self._log(logging.TRACE, message, args, **kws)


logging.addLevelName(logging.TRACE, "TRACE")
logging.Logger.trace = trace


class CrashReporting(object):
    """
    Pseudo-Class for remote crash reporting
    """
    def __init__(self, transport='threaded'):
        if transport == 'threaded':
            transport = ThreadedHTTPTransport
        else:
            raise ValueError(f'Unknown transport: {transport}')

        if sw_version_is_stable():
            self.sentinel_file_path = '/tmp/.crashreporting_disabled'
Ejemplo n.º 40
0
avg_new = 0
health = ""

#====================
# logging
#====================

logging.VERBOSE = 15


def log_verbose(self, message, *args, **kws):
    if self.isEnabledFor(logging.VERBOSE):
        self.log(logging.VERBOSE, message, *args, **kws)


logging.addLevelName(logging.VERBOSE, "VERBOSE")
logging.Logger.verbose = log_verbose

formatter = logging.Formatter(
    fmt='%(asctime)-15s.%(msecs)03d %(levelname)s: %(message)s',
    datefmt="%Y-%m-%d %H:%M:%S")

handler = logging.StreamHandler()
handler.setFormatter(formatter)

logger = logging.getLogger("bc-ceph-reweight-by-utilization")

logger.addHandler(handler)

#====================
Ejemplo n.º 41
0
               default='[instance: %(uuid)s] ',
               help='If an instance UUID is passed with the log message, '
               'format it like this'),
]

CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)

# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
#                module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')

try:
    NullHandler = logging.NullHandler
except AttributeError:  # NOTE(jkoelker) NullHandler added in Python 2.7

    class NullHandler(logging.Handler):
        def handle(self, record):
            pass

        def emit(self, record):
            pass

        def createLock(self):
            self.lock = None
        'INFOV': 'cyan,bold',
        'WARNING': 'yellow',
        'ERROR': 'red,bold',
        'CRITICAL': 'red,bg_white',
    },
    secondary_log_colors={},
    style='%')
ch.setFormatter(formatter)

log = logging.getLogger('videocap')
log.setLevel(logging.DEBUG)
log.handlers = []  # No duplicated handlers
log.propagate = False  # workaround for duplicated logs in ipython
log.addHandler(ch)

logging.addLevelName(logging.INFO + 1, 'INFOV')


def _infov(self, msg, *args, **kwargs):
    self.log(logging.INFO + 1, msg, *args, **kwargs)


logging.Logger.infov = _infov

# Image Processing Routine
# ========================

import numpy as np
import scipy.misc

imresize = scipy.misc.imresize
from rubikscubennnsolver.RubiksCube333 import solved_333
from rubikscubennnsolver.RubiksCube444 import solved_444
from rubikscubennnsolver.RubiksCube555 import solved_555
from rubikscubennnsolver.RubiksCube666 import solved_666
from rubikscubennnsolver.RubiksCube777 import solved_777
from rubikscubennnsolver.RubiksCubeNNNEven import solved_888, solved_101010, solved_121212, solved_141414
from rubikscubennnsolver.RubiksCubeNNNOdd import solved_999, solved_111111, solved_131313, solved_151515, solved_171717
import json
import logging

logging.basicConfig(level=logging.INFO,
                    format='%(asctime)s %(filename)12s %(levelname)8s: %(message)s')
log = logging.getLogger(__name__)

# Color the errors and warnings in red
logging.addLevelName(logging.ERROR, "\033[91m   %s\033[0m" % logging.getLevelName(logging.ERROR))
logging.addLevelName(logging.WARNING, "\033[91m %s\033[0m" % logging.getLevelName(logging.WARNING))

test_cases = {
    "2x2x2" : [],
    "3x3x3" : [],
    "4x4x4" : [],
    "5x5x5" : [],
    "6x6x6" : [],
    "7x7x7" : [],
    "8x8x8" : [],
    "9x9x9" : [],
    "10x10x10" : [],
    "11x11x11" : [],
    "12x12x12" : [],
    "13x13x13" : [],
Ejemplo n.º 44
0
# Copyright (c) 2018, UFACTORY, Inc.
# All rights reserved.
#
# Author: Vinman <*****@*****.**> <*****@*****.**>

import logging
import functools
import sys
import os

log_path = os.path.join(os.path.expanduser('~'), '.UFACTORY', 'log', 'xarm', 'sdk')
if not os.path.exists(log_path):
    os.makedirs(log_path)

logging.VERBOSE = 5
logging.addLevelName(logging.VERBOSE, 'VERBOSE')


class Logger(logging.Logger):
    logger_fmt = '{}[%(levelname)s][%(asctime)s][%(filename)s:%(lineno)d] - - %(message)s'
    logger_date_fmt = '%Y-%m-%d %H:%M:%S'
    stream_handler_fmt = logger_fmt.format('[SDK]')
    stream_handler_date_fmt = logger_date_fmt
    stream_handler = logging.StreamHandler(sys.stdout)
    stream_handler.setLevel(logging.VERBOSE)
    stream_handler.setFormatter(logging.Formatter(stream_handler_fmt, stream_handler_date_fmt))

    logger = logging.Logger(__name__)
    logger.setLevel(logging.VERBOSE)
    logger.addHandler(stream_handler)
Ejemplo n.º 45
0
import json
import logging
from optparse import OptionParser
import os
import re
import zipfile

import cp_connectors as cp

logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)-15s %(levelname)-7s %(name)-16s %(message)s")
log = logging.getLogger('org_migrator')

LOGLVL_TRACE = 5
logging.addLevelName(LOGLVL_TRACE, 'TRACE')


def get_cursor_columns(db, cursor):
    if hasattr(cursor, 'column_names'):
        return cursor.column_names
    elif hasattr(cursor, 'description'):
        return [col.name for col in cursor.description]
    else:
        raise Exception('Cannot determine column names for cursor')


def get_cursor_column_types(db, cursor):
    if hasattr(cursor, 'column_names'):
        return [db.get_type_as_string(col[1]) for col in cursor.description]
    elif hasattr(cursor, 'description'):
Ejemplo n.º 46
0
                    prefix = re.search(r"\s+", message).group(0)
                    message = message[len(prefix):]
                else:
                    prefix = ""

                message = "%s%s" % (prefix, ''.join(
                    (self.csi, ';'.join(params), 'm', message, self.reset)))

        return message

    def format(self, record):
        message = logging.StreamHandler.format(self, record)
        return self.colorize(message, record)


logging.addLevelName(15, "INFO")
logger = logging.getLogger('nhentai')
LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
FORMATTER = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s",
                              "%H:%M:%S")
LOGGER_HANDLER.setFormatter(FORMATTER)
LOGGER_HANDLER.level_map[logging.getLevelName("INFO")] = (None, "cyan", False)
logger.addHandler(LOGGER_HANDLER)
logger.setLevel(logging.DEBUG)

if __name__ == '__main__':
    logger.log(15, 'nhentai')
    logger.info('info')
    logger.warn('warn')
    logger.debug('debug')
    logger.error('error')
Ejemplo n.º 47
0
Archivo: misc.py Proyecto: ywong3/cpppo
#
# logging.normal	-- regular program output
# logging.detail	-- detail in addition to normal output
#
#     Augment logging with some new levels, between INFO and WARNING, used for normal/detail output.
#
#     Unfortunationly, logging uses a fragile method to find the logging function's name in the call
# stack; it looks for the first function whose co_filename is *not* the logger source file.  So, we
# need to change our functions to appear as if they originated from logging._srcfile.
#
#      .WARNING 	       == 30
logging.NORMAL = logging.INFO + 5
logging.DETAIL = logging.INFO + 3
#      .INFO    	       == 20

logging.addLevelName(logging.NORMAL, 'NORMAL')
logging.addLevelName(logging.DETAIL, 'DETAIL')


def __normal(self, msg, *args, **kwargs):
    if self.isEnabledFor(logging.NORMAL):
        self._log(logging.NORMAL, msg, args, **kwargs)


def __detail(self, msg, *args, **kwargs):
    if self.isEnabledFor(logging.DETAIL):
        self._log(logging.DETAIL, msg, args, **kwargs)


change_function(__normal, co_filename=logging._srcfile)
change_function(__detail, co_filename=logging._srcfile)
Ejemplo n.º 48
0
import os
import json
import threading
from subprocess import Popen, PIPE
from math import *
from time import sleep, time
import collections
import random
import operator
import tempfile
import re
import logging
import shutil

USERLOG = logging.DEBUG + 5
logging.addLevelName(USERLOG, 'USER')
gridsearch_formatter = logging.Formatter(
    '%(asctime)s - %(name)s {%(thread)d} %(levelname)s] %(message)s')

gridsearch_stream_handler = logging.StreamHandler()
gridsearch_stream_handler.setLevel(USERLOG)
gridsearch_stream_handler.setFormatter(gridsearch_formatter)


def OrderedDict(x=None):
    if x is None:
        return collections.OrderedDict()
    else:
        return collections.OrderedDict(sorted(x.items(), key=lambda x: x[0]))

Ejemplo n.º 49
0
def create_app(app_name=None):
    # Configuration settings
    import config
    if not app_name:
        app_name = config.APP_NAME

    # Only enable password related functionality in server mode.
    if config.SERVER_MODE is True:
        # Some times we need to access these config params where application
        # context is not available (we can't use current_app.config in those
        # cases even with current_app.app_context())
        # So update these params in config itself.
        # And also these updated config values will picked up by application
        # since we are updating config before the application instance is
        # created.

        config.SECURITY_RECOVERABLE = True
        config.SECURITY_CHANGEABLE = True
        # Now we'll open change password page in alertify dialog
        # we don't want it to redirect to main page after password
        # change operation so we will open the same password change page again.
        config.SECURITY_POST_CHANGE_VIEW = 'browser.change_password'
    """Create the Flask application, startup logging and dynamically load
    additional modules (blueprints) that are found in this directory."""
    app = PgAdmin(__name__, static_url_path='/static')
    # Removes unwanted whitespace from render_template function
    app.jinja_env.trim_blocks = True
    app.config.from_object(config)
    app.config.update(dict(PROPAGATE_EXCEPTIONS=True))

    ##########################################################################
    # Setup logging and log the application startup
    ##########################################################################

    # Add SQL level logging, and set the base logging level
    logging.addLevelName(25, 'SQL')
    app.logger.setLevel(logging.DEBUG)
    app.logger.handlers = []

    # We also need to update the handler on the webserver in order to see
    # request. Setting the level prevents werkzeug from setting up it's own
    # stream handler thus ensuring all the logging goes through the pgAdmin
    # logger.
    logger = logging.getLogger('werkzeug')
    logger.setLevel(logging.INFO)

    # Set SQLITE_PATH to TEST_SQLITE_PATH while running test cases
    if ('PGADMIN_TESTING_MODE' in os.environ
            and os.environ['PGADMIN_TESTING_MODE'] == '1'):
        config.SQLITE_PATH = config.TEST_SQLITE_PATH

    # Ensure the various working directories exist
    from pgadmin.setup import create_app_data_directory, db_upgrade
    create_app_data_directory(config)

    # File logging
    fh = logging.FileHandler(config.LOG_FILE, encoding='utf-8')
    fh.setLevel(config.FILE_LOG_LEVEL)
    fh.setFormatter(logging.Formatter(config.FILE_LOG_FORMAT))
    app.logger.addHandler(fh)
    logger.addHandler(fh)

    # Console logging
    ch = logging.StreamHandler()
    ch.setLevel(config.CONSOLE_LOG_LEVEL)
    ch.setFormatter(logging.Formatter(config.CONSOLE_LOG_FORMAT))
    app.logger.addHandler(ch)
    logger.addHandler(ch)

    # Log the startup
    app.logger.info('########################################################')
    app.logger.info('Starting %s v%s...', config.APP_NAME, config.APP_VERSION)
    app.logger.info('########################################################')
    app.logger.debug("Python syspath: %s", sys.path)

    ##########################################################################
    # Setup i18n
    ##########################################################################

    # Initialise i18n
    babel = Babel(app)

    app.logger.debug('Available translations: %s' % babel.list_translations())

    @babel.localeselector
    def get_locale():
        """Get the language for the user."""
        language = 'en'
        if config.SERVER_MODE is False:
            # Get the user language preference from the miscellaneous module
            if current_user.is_authenticated:
                user_id = current_user.id
            else:
                user = user_datastore.get_user(config.DESKTOP_USER)
                if user is not None:
                    user_id = user.id
            user_language = Preferences.raw_value('miscellaneous',
                                                  'user_language', None,
                                                  user_id)
            if user_language is not None:
                language = user_language
        else:
            # If language is available in get request then return the same
            # otherwise check the session or cookie
            data = request.form
            if 'language' in data:
                language = data['language'] or language
                setattr(session, 'PGADMIN_LANGUAGE', language)
            elif hasattr(session, 'PGADMIN_LANGUAGE'):
                language = getattr(session, 'PGADMIN_LANGUAGE', language)
            elif hasattr(request.cookies, 'PGADMIN_LANGUAGE'):
                language = getattr(request.cookies, 'PGADMIN_LANGUAGE',
                                   language)

        return language

    ##########################################################################
    # Setup authentication
    ##########################################################################

    app.config['SQLALCHEMY_DATABASE_URI'] = u'sqlite:///{0}?timeout={1}' \
        .format(config.SQLITE_PATH.replace(u'\\', u'/'),
                getattr(config, 'SQLITE_TIMEOUT', 500)
                )

    # Create database connection object and mailer
    db.init_app(app)

    ##########################################################################
    # Upgrade the schema (if required)
    ##########################################################################
    with app.app_context():
        # Run migration for the first time i.e. create database
        from config import SQLITE_PATH
        if not os.path.exists(SQLITE_PATH):
            db_upgrade(app)
        else:
            version = Version.query.filter_by(name='ConfigDB').first()
            schema_version = version.value

            # Run migration if current schema version is greater than the
            # schema version stored in version table
            if CURRENT_SCHEMA_VERSION >= schema_version:
                db_upgrade(app)

            # Update schema version to the latest
            if CURRENT_SCHEMA_VERSION > schema_version:
                version = Version.query.filter_by(name='ConfigDB').first()
                version.value = CURRENT_SCHEMA_VERSION
                db.session.commit()

    Mail(app)

    import pgadmin.utils.paths as paths
    paths.init_app(app)

    # Setup Flask-Security
    user_datastore = SQLAlchemyUserDatastore(db, User, Role)
    security = Security(None, user_datastore)

    ##########################################################################
    # Setup security
    ##########################################################################
    with app.app_context():
        config.CSRF_SESSION_KEY = Keys.query.filter_by(
            name='CSRF_SESSION_KEY').first().value
        config.SECRET_KEY = Keys.query.filter_by(
            name='SECRET_KEY').first().value
        config.SECURITY_PASSWORD_SALT = Keys.query.filter_by(
            name='SECURITY_PASSWORD_SALT').first().value

    # Update the app.config with proper security keyes for signing CSRF data,
    # signing cookies, and the SALT for hashing the passwords.
    app.config.update(
        dict({
            'CSRF_SESSION_KEY': config.CSRF_SESSION_KEY,
            'SECRET_KEY': config.SECRET_KEY,
            'SECURITY_PASSWORD_SALT': config.SECURITY_PASSWORD_SALT
        }))

    security.init_app(app, user_datastore)

    # register custom unauthorised handler.
    app.login_manager.unauthorized_handler(pga_unauthorised)

    app.session_interface = create_session_interface(app)

    # Make the Session more secure against XSS & CSRF when running in web mode
    if config.SERVER_MODE:
        paranoid = Paranoid(app)
        paranoid.redirect_view = 'browser.index'

    ##########################################################################
    # Load all available server drivers
    ##########################################################################
    driver.init_app(app)

    ##########################################################################
    # Register language to the preferences after login
    ##########################################################################
    @user_logged_in.connect_via(app)
    def register_language(sender, user):
        # After logged in, set the language in the preferences if we get from
        # the login page
        data = request.form
        if 'language' in data:
            language = data['language']

            # Set the user language preference
            misc_preference = Preferences.module('miscellaneous')
            user_languages = misc_preference.preference('user_language')

            if user_languages and language:
                language = user_languages.set(language)

    ##########################################################################
    # Register any local servers we can discover
    ##########################################################################
    @user_logged_in.connect_via(app)
    def on_user_logged_in(sender, user):
        # Keep hold of the user ID
        user_id = user.id

        # Get the first server group for the user
        servergroup_id = 1
        servergroups = ServerGroup.query.filter_by(
            user_id=user_id).order_by("id")

        if servergroups.count() > 0:
            servergroup = servergroups.first()
            servergroup_id = servergroup.id
        '''Add a server to the config database'''
        def add_server(user_id, servergroup_id, name, superuser, port,
                       discovery_id, comment):
            # Create a server object if needed, and store it.
            servers = Server.query.filter_by(
                user_id=user_id, discovery_id=svr_discovery_id).order_by("id")

            if servers.count() > 0:
                return

            svr = Server(user_id=user_id,
                         servergroup_id=servergroup_id,
                         name=name,
                         host='localhost',
                         port=port,
                         maintenance_db='postgres',
                         username=superuser,
                         ssl_mode='prefer',
                         comment=svr_comment,
                         discovery_id=discovery_id)

            db.session.add(svr)
            db.session.commit()

        # Figure out what servers are present
        if winreg is not None:
            arch_keys = set()
            proc_arch = os.environ['PROCESSOR_ARCHITECTURE'].lower()

            try:
                proc_arch64 = os.environ['PROCESSOR_ARCHITEW6432'].lower()
            except Exception as e:
                proc_arch64 = None

            if proc_arch == 'x86' and not proc_arch64:
                arch_keys.add(0)
            elif proc_arch == 'x86' or proc_arch == 'amd64':
                arch_keys.add(winreg.KEY_WOW64_32KEY)
                arch_keys.add(winreg.KEY_WOW64_64KEY)

            for arch_key in arch_keys:
                for server_type in ('PostgreSQL', 'EnterpriseDB'):
                    try:
                        root_key = winreg.OpenKey(
                            winreg.HKEY_LOCAL_MACHINE,
                            "SOFTWARE\\" + server_type + "\Services", 0,
                            winreg.KEY_READ | arch_key)
                        for i in xrange(0, winreg.QueryInfoKey(root_key)[0]):
                            inst_id = winreg.EnumKey(root_key, i)
                            inst_key = winreg.OpenKey(root_key, inst_id)

                            svr_name = winreg.QueryValueEx(
                                inst_key, 'Display Name')[0]
                            svr_superuser = winreg.QueryValueEx(
                                inst_key, 'Database Superuser')[0]
                            svr_port = winreg.QueryValueEx(inst_key, 'Port')[0]
                            svr_discovery_id = inst_id
                            svr_comment = gettext(
                                "Auto-detected %s installation with the data "
                                "directory at %s" %
                                (winreg.QueryValueEx(inst_key,
                                                     'Display Name')[0],
                                 winreg.QueryValueEx(inst_key,
                                                     'Data Directory')[0]))

                            add_server(user_id, servergroup_id, svr_name,
                                       svr_superuser, svr_port,
                                       svr_discovery_id, svr_comment)

                            inst_key.Close()
                    except Exception as e:
                        pass
        else:
            # We use the postgres-winreg.ini file on non-Windows
            try:
                from configparser import ConfigParser
            except ImportError:
                from ConfigParser import ConfigParser  # Python 2

            registry = ConfigParser()

        try:
            registry.read('/etc/postgres-reg.ini')
            sections = registry.sections()

            # Loop the sections, and get the data from any that are PG or PPAS
            for section in sections:
                if (section.startswith('PostgreSQL/')
                        or section.startswith('EnterpriseDB/')):
                    svr_name = registry.get(section, 'Description')
                    svr_superuser = registry.get(section, 'Superuser')
                    svr_port = registry.getint(section, 'Port')
                    svr_discovery_id = section
                    description = registry.get(section, 'Description')
                    data_directory = registry.get(section, 'DataDirectory')
                    if hasattr(str, 'decode'):
                        description = description.decode('utf-8')
                        data_directory = data_directory.decode('utf-8')
                    svr_comment = gettext(u"Auto-detected %s installation "
                                          u"with the data directory at %s" %
                                          (description, data_directory))
                    add_server(user_id, servergroup_id, svr_name,
                               svr_superuser, svr_port, svr_discovery_id,
                               svr_comment)

        except Exception as e:
            pass

    @user_logged_in.connect_via(app)
    @user_logged_out.connect_via(app)
    def force_session_write(app, user):
        session.force_write = True

    ##########################################################################
    # Load plugin modules
    ##########################################################################
    for module in app.find_submodules('pgadmin'):
        app.logger.info('Registering blueprint module: %s' % module)
        app.register_blueprint(module)

    ##########################################################################
    # Handle the desktop login
    ##########################################################################

    @app.before_request
    def before_request():
        """Login the default user if running in desktop mode"""

        # Check the auth key is valid, if it's set, and we're not in server
        # mode, and it's not a help file request.
        if not config.SERVER_MODE and app.PGADMIN_KEY != '':
            if (('key' not in request.args
                 or request.args['key'] != app.PGADMIN_KEY)
                    and request.cookies.get('PGADMIN_KEY') != app.PGADMIN_KEY
                    and request.endpoint != 'help.static'):
                abort(401)

        if not config.SERVER_MODE and not current_user.is_authenticated:
            user = user_datastore.get_user(config.DESKTOP_USER)
            # Throw an error if we failed to find the desktop user, to give
            # the sysadmin a hint. We'll continue to try to login anyway as
            # that'll through a nice 500 error for us.
            if user is None:
                app.logger.error(
                    'The desktop user %s was not found in the configuration '
                    'database.' % config.DESKTOP_USER)
                abort(401)
            login_user(user)

    @app.after_request
    def after_request(response):
        if 'key' in request.args:
            response.set_cookie('PGADMIN_KEY', value=request.args['key'])

        return response

    ##########################################################################
    # Minify output
    ##########################################################################
    # HTMLMIN doesn't work with Python 2.6.
    if not config.DEBUG and sys.version_info >= (2, 7):
        from flask_htmlmin import HTMLMIN
        HTMLMIN(app)

    @app.context_processor
    def inject_blueprint():
        """Inject a reference to the current blueprint, if any."""
        return {
            'current_app': current_app,
            'current_blueprint': current_blueprint
        }

    ##########################################################################
    # All done!
    ##########################################################################

    return app
Ejemplo n.º 50
0
import copy
import binascii
import subprocess
import os, getopt
import logging
import sys
import six
import errno
import netaddr
import netifaces

from functools import partial

logger = logging.getLogger('check Ip')
logger.setLevel(logging.DEBUG)
logging.addLevelName(logging.WARNING,
                     "\033[1;31m%s" % logging.getLevelName(logging.WARNING))
logging.addLevelName(logging.ERROR,
                     "\033[1;41m%s" % logging.getLevelName(logging.ERROR))
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s: %(message)s' + "\033[1;0m",
    datefmt='%d/%m/%Y %T')
ch.setFormatter(formatter)
logger.addHandler(ch)


def do_help(program):
    print "\nUsage: " + program + " --network=192.168.1.0/24 --network=193.168.1.0/24 --ipAddr=192.168.1.1 --ipAddr=193.168.1.1"
    print """ Mandatory arguments 
  -n, --network=CIDR	networks to check the IPs against
Ejemplo n.º 51
0
def config(mode='standard', file_name=None, file_lvl=15, stomp=False):
    """
    Controls Dragons logging configuration.

    Parameters
    ----------
    mode : <str> 
          logging mode: 'debug', 'standard', 'quiet'

    file_lvl : <int>
          file logging level

    file_name : <atr> 
          filename of the logger

    stomp: <bool>
          Controls append to logfiles found with same name
    
    Returns
    -------
    <void>

    """
    logfmt = None
    lmodes = ['debug', 'standard', 'quiet']
    fm = 'w' if stomp else 'a'
    mode = mode.lower()
    if mode not in lmodes:
        raise NameError("Unknown mode")

    rootlog = logging.getLogger('')
    rootlog.handlers = []  # every call on config clears the handlers list.

    # Add the new levels
    logging.addLevelName(ll['STATUS'], 'STATUS')
    logging.addLevelName(ll['STDINFO'], 'STDINFO')
    logging.addLevelName(ll['FULLINFO'], 'FULLINFO')

    # Define rootlog handler(s) through basicConfig() according to mode
    customize_log(rootlog)
    if mode == 'quiet':
        logfmt = STDFMT
        logging.basicConfig(level=file_lvl,
                            format=logfmt,
                            datefmt='%Y-%m-%d %H:%M:%S',
                            filename=file_name,
                            filemode=fm)

    elif mode == 'standard':
        logfmt = STDFMT
        console_lvl = 21
        logging.basicConfig(level=file_lvl,
                            format=logfmt,
                            datefmt='%Y-%m-%d %H:%M:%S',
                            filename=file_name,
                            filemode=fm)

        # add console handler for rootlog through addHandler()
        console = logging.StreamHandler()
        formatter = logging.Formatter('%(message)s')
        console.setFormatter(formatter)
        console.setLevel(console_lvl)
        rootlog.addHandler(console)

    elif mode == 'debug':
        logfmt = DBGFMT
        console_lvl = 10
        file_lvl = 10
        logging.basicConfig(level=file_lvl,
                            format=logfmt,
                            datefmt='%Y-%m-%d %H:%M:%S',
                            filename=file_name,
                            filemode=fm)

        # add console handler for rootlog through addHandler()
        console = logging.StreamHandler()
        formatter = logging.Formatter('%(message)s')
        console.setFormatter(formatter)
        console.setLevel(console_lvl)
        rootlog.addHandler(console)
    return
Ejemplo n.º 52
0
:func:`set_log_levels` methods.
"""

import logging
import os
import time
from types import MethodType

from pymor.core.defaults import defaults
from pymor.tools import mpi

BLOCK = logging.INFO + 5
BLOCK_TIME = BLOCK + 1
INFO2 = logging.INFO + 1
INFO3 = logging.INFO + 2
logging.addLevelName(BLOCK, 'BLOCK')
logging.addLevelName(BLOCK_TIME, 'BLOCK_TIME')
logging.addLevelName(INFO2, 'INFO2')
logging.addLevelName(INFO3, 'INFO3')

BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)

# The background is set with 40 plus the number of the color, and the foreground with 30
# These are the sequences needed to get colored output
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
COLORS = {
    'WARNING':  YELLOW,
    'INFO2':    YELLOW,
    'INFO3':    RED,
Ejemplo n.º 53
0
def get_config(is_local):
    if is_local:
        shortopts = 's:b:p:k:l:m:c:t:v'
        longopts = ['fast-open']
    else:
        shortopts = 's:p:k:m:c:t:v'
        longopts = ['fast-open', 'workers:']
    try:
        config_path = find_config()
        optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
        for key, value in optlist:
            if key == '-c':
                config_path = value

        if config_path:
            logging.info('loading config from %s' % config_path)
            with open(config_path, 'rb') as f:
                try:
                    config = json.load(f, object_hook=_decode_dict)
                except ValueError as e:
                    logging.error('found an error in config.json: %s',
                                  e.message)
                    sys.exit(1)
        else:
            config = {}

        optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
        v_count = 0
        for key, value in optlist:
            if key == '-p':
                config['server_port'] = int(value)
            elif key == '-k':
                config['password'] = value
            elif key == '-l':
                config['local_port'] = int(value)
            elif key == '-s':
                config['server'] = value
            elif key == '-m':
                config['method'] = value
            elif key == '-b':
                config['local_address'] = value
            elif key == '-v':
                v_count += 1
                # '-vv' turns on more verbose mode
                config['verbose'] = v_count
            elif key == '-t':
                config['timeout'] = int(value)
            elif key == '--fast-open':
                config['fast_open'] = True
            elif key == '--workers':
                config['workers'] = value
    except getopt.GetoptError as e:
        print >> sys.stderr, e
        if is_local:
            print_local_help()
        else:
            print_server_help()
        sys.exit(2)

    if not config['password'] and not config_path:
        sys.exit('config not specified, please read '
                 'https://github.com/clowwindy/shadowsocks')

    config['password'] = config.get('password', None)
    config['method'] = config.get('method', None)
    config['port_password'] = config.get('port_password', None)
    config['timeout'] = int(config.get('timeout', 300))
    config['fast_open'] = config.get('fast_open', False)
    config['workers'] = config.get('workers', 1)
    config['verbose'] = config.get('verbose', False)
    config['local_address'] = config.get('local_address', '127.0.0.1')

    logging.getLogger('').handlers = []
    logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE')
    if config['verbose'] == 2:
        level = VERBOSE_LEVEL
    elif config['verbose']:
        level = logging.DEBUG
    else:
        level = logging.INFO
    logging.basicConfig(level=level,
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S',
                        filemode='a+')

    check_config(config)

    return config
Ejemplo n.º 54
0
# pythonized by techtonik // gmail.com


execfile("config.inc.py")

from os import makedirs, walk
from os.path import isdir, join
from string import Template
import shutil
import logging
import subprocess
import re
import operator

logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)-6s %(message)s")
logging.addLevelName("WARN", 30)

#: just a shortcut
log = logging.info
warn = logging.warn



def make_chm_lang(lang):
  """@param lang : either 'rus' or 'eng'"""
  if lang not in ['rus', 'eng']: raise Exception("Invalid parameter")
  lang_code = lang[0:2]

  log("------------------------------------")
  log("preparing %s " % lang_code)
Ejemplo n.º 55
0
            msg = '%s %s %s' % (separator * int(ceil(num)), msg,
                                separator * int(floor(num)))
            self.info(msg)

    def separator(self, separator=''):
        """
        Log a separator with severity 'INFO'.
        """
        if self.isEnabledFor(logging.INFO):
            separator *= OutputHandler.HeadLength
            if len(separator) > OutputHandler.HeadLength:
                separator = separator[:OutputHandler.HeadLength]
            self.info(separator)


logging.addLevelName(logging.CRITICAL, 'CRITICAL')
logging.addLevelName(logging.ERROR, 'ERROR   ')
logging.addLevelName(logging.INFO, 'INFO    ')
logging.addLevelName(logging.WARNING, 'WARNING ')
logging.addLevelName(logging.DEBUG, 'DEBUG   ')

logging.setLoggerClass(OutputHandler)

__output = dict()


def getLogger(name=APPNAME):
    """
    Returns an instance of the logger for the given name.
    :type name: str
    :rtype: logging.Logger
Ejemplo n.º 56
0
    def init_logging(self,
                     console_logging=False,
                     file_logging=False,
                     debug_logging=False,
                     database_logging=False):
        """
        Initialize logging

        :param console_logging: True if logging to console
        :param file_logging: True if logging to file
        :param debug_logging: True if debug logging is enabled
        :param database_logging: True if logging database access
        """
        self.log_file = self.log_file or ek(os.path.join, sickbeard.LOG_DIR,
                                            'sickchill.log')

        global log_file
        log_file = self.log_file

        self.debug_logging = debug_logging
        self.console_logging = console_logging
        self.file_logging = file_logging
        self.database_logging = database_logging

        logging.addLevelName(DB, 'DB')  # add a new logging level DB
        logging.getLogger().addHandler(NullHandler())  # nullify root logger

        # set custom root logger
        for logger in self.loggers:
            if logger is not self.logger:
                logger.root = self.logger
                logger.parent = self.logger

        log_level = DB if self.database_logging else DEBUG if self.debug_logging else INFO

        # set minimum logging level allowed for loggers
        for logger in self.loggers:
            logger.setLevel(log_level)

        logging.getLogger("tornado.general").setLevel('ERROR')

        # console log handler
        if self.console_logging:
            console = logging.StreamHandler()
            console.setFormatter(
                CensoredFormatter('%(asctime)s %(levelname)s::%(message)s',
                                  '%H:%M:%S'))
            console.setLevel(log_level)

            for logger in self.loggers:
                logger.addHandler(console)

        # rotating log file handler
        if self.file_logging:
            rfh = logging.handlers.RotatingFileHandler(
                self.log_file,
                maxBytes=int(sickbeard.LOG_SIZE * 1048576),
                backupCount=sickbeard.LOG_NR,
                encoding='utf-8')
            rfh.setFormatter(
                CensoredFormatter('%(asctime)s %(levelname)-8s %(message)s',
                                  dateTimeFormat))
            rfh.setLevel(log_level)

            for logger in self.loggers:
                logger.addHandler(rfh)
Ejemplo n.º 57
0
"""
import logging
import sys
import time
from contextlib import contextmanager
from functools import partial
from importlib import import_module
from subprocess import check_output
from types import SimpleNamespace

import pyfiglet

from .constants import (CLASS_SEARCH_PATH, CUR_EXP_SCRIPT, HUTCH_COLORS,
                        SUCCESS_LEVEL)

logging.addLevelName('SUCCESS', SUCCESS_LEVEL)
logger = logging.getLogger(__name__)
logger.success = partial(logger.log, SUCCESS_LEVEL)


@contextmanager
def safe_load(name, cls=None):
    """
    Context manager to safely run a block of code.

    This will abort running code and resume the rest of the program if
    something fails. This can be used to wrap user code with unknown behavior.
    This will log standard messages to indicate success or failure.

    Parameters
    ----------
Ejemplo n.º 58
0
                 **kwargs):
        super(_action_increase_level, self).__init__(option_strings,
                                                     dest,
                                                     nargs=0,
                                                     required=required,
                                                     **kwargs)

    #
    # Python levels are 50, 40, 30, 20, 10 ... (debug) 9 8 7 6 5 ... :)
    def __call__(self, parser, namespace, values, option_string=None):
        if namespace.level == None:
            namespace.level = logging.ERROR
        namespace.level = _logging_verbosity_inc(namespace.level)


logging.addLevelName(50, "C")
logging.addLevelName(40, "E")
logging.addLevelName(30, "W")
logging.addLevelName(20, "I")
logging.addLevelName(10, "D")

# Initialize command line argument parser
arg_parser = argparse.ArgumentParser(
    description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)

arg_parser.set_defaults(level=logging.ERROR)
arg_parser.add_argument("-v",
                        "--verbose",
                        dest="level",
                        action=_action_increase_level,
                        nargs=0,
Ejemplo n.º 59
0
# Revision 11/13/2018

import sys
from getopt import getopt, GetoptError
from os.path import isfile
import os
import socket
import struct
import logging
from json import loads

logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s', \
                    level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')

if os.environ['TERM'] == "xterm-256color":
    logging.addLevelName(logging.ERROR, '[\033[31mERROR\033[0m]')
    logging.addLevelName(logging.WARNING, '[\033[33mWARNING\033[0m]')
    logging.addLevelName(logging.INFO, '[\033[32mINFO\033[0m]')
    logging.addLevelName(logging.DEBUG, '[\033[34mDEBUG\033[0m]')
else:
    logging.addLevelName(logging.ERROR, '[ERROR]')
    logging.addLevelName(logging.WARNING, '[WARNING]')
    logging.addLevelName(logging.INFO, '[INFO]')
    logging.addLevelName(logging.DEBUG, '[DEBUG]')

_ossec_path = '/var/ossec'
_verbose = True
_force = False


def _get_agents():
Ejemplo n.º 60
0
    def _setup_loggers(self, args=None, config=None):
        log_path = getattr(args, "log_path", os.path.join(file_path, 'logs'))
        eventgen_main_logger_path = os.path.join(log_path, 'eventgen-main.log')
        eventgen_controller_logger_path = os.path.join(
            log_path, 'eventgen-controller.log')
        eventgen_metrics_logger_path = os.path.join(log_path,
                                                    'eventgen-metrics.log')
        eventgen_error_logger_path = os.path.join(log_path,
                                                  'eventgen-errors.log')
        eventgen_server_logger_path = os.path.join(log_path,
                                                   'eventgen-server.log')
        if not config:
            log_format = '%(asctime)s %(name)-15s %(levelname)-8s %(processName)-10s %(message)s'
            date_format = '%Y-%m-%d %H:%M:%S'

            # Set up formatter
            detailed_formatter = logging.Formatter(log_format,
                                                   datefmt=date_format)
            json_formatter = JSONFormatter(log_format, datefmt=date_format)

            # Set up handlers
            console_handler = logging.StreamHandler()
            console_handler.setFormatter(detailed_formatter)
            console_handler.setLevel(logging.DEBUG)

            file_handler = logging.handlers.RotatingFileHandler(
                eventgen_main_logger_path, maxBytes=2500000, backupCount=20)
            file_handler.setFormatter(detailed_formatter)
            file_handler.setLevel(logging.DEBUG)

            eventgen_controller_file_handler = logging.handlers.RotatingFileHandler(
                eventgen_controller_logger_path,
                maxBytes=2500000,
                backupCount=20)
            eventgen_controller_file_handler.setFormatter(detailed_formatter)
            eventgen_controller_file_handler.setLevel(logging.DEBUG)

            error_file_handler = logging.handlers.RotatingFileHandler(
                eventgen_error_logger_path, maxBytes=2500000, backupCount=20)
            error_file_handler.setFormatter(detailed_formatter)
            error_file_handler.setLevel(logging.ERROR)

            metrics_file_handler = logging.handlers.RotatingFileHandler(
                eventgen_metrics_logger_path, maxBytes=2500000, backupCount=20)
            metrics_file_handler.setFormatter(json_formatter)
            metrics_file_handler.setLevel(logging.INFO)

            server_file_handler = logging.handlers.RotatingFileHandler(
                eventgen_server_logger_path, maxBytes=2500000, backupCount=10)
            server_file_handler.setFormatter(json_formatter)
            server_file_handler.setLevel(logging.INFO)

            # Configure eventgen logger
            logger = logging.getLogger('eventgen')
            logger.setLevel(self.args.verbosity or logging.ERROR)
            logger.propagate = False
            logger.handlers = []
            if args and not args.modinput_mode:
                logger.addHandler(console_handler)
            logger.addHandler(file_handler)
            logger.addHandler(error_file_handler)

            # Configure eventgen listener
            logger = logging.getLogger('eventgen_controller')
            logger.setLevel(self.args.verbosity or logging.ERROR)
            logger.propagate = False
            logger.handlers = []
            logger.addHandler(eventgen_controller_file_handler)
            logger.addHandler(error_file_handler)
            logger.addHandler(console_handler)

            # Configure eventgen mertics logger
            logger = logging.getLogger('eventgen_metrics')
            logger.setLevel(logging.INFO)
            logger.propagate = False
            logger.handlers = []
            logger.addHandler(metrics_file_handler)

            # Configure eventgen server logger
            logger = logging.getLogger('eventgen_server')
            logger.setLevel(logging.INFO)
            logger.propagate = False
            logger.handlers = []
            logger.addHandler(server_file_handler)
            logger.addHandler(console_handler)
        else:
            self.logger_config = config
            logging.config.dictConfig(self.logger_config)
        # We need to have debugv from the olderversions of eventgen.
        DEBUG_LEVELV_NUM = 9
        logging.addLevelName(DEBUG_LEVELV_NUM, "DEBUGV")

        def debugv(self, message, *args, **kws):
            # Yes, logger takes its '*args' as 'args'.
            if self.isEnabledFor(DEBUG_LEVELV_NUM):
                self._log(DEBUG_LEVELV_NUM, message, args, **kws)

        logging.Logger.debugv = debugv
        self.logger = logging.getLogger('eventgen')
        self.loggingQueue = None