Пример #1
0
def setup():
    cfg = parse_config()

    logging.getLogger().setLevel(getattr(logging, cfg.get('log', 'loglevel').upper()))
    logfile = cfg.get('log', 'logfile')
    if logfile != '':
        handler = WatchedFileHandler(logfile)
        handler.addFilter(RequestIdFilter())
        formatter = logging.Formatter(
            '%(asctime)s [%(process)d] %(levelname)-5s '
            '%(request_id)s %(name)s %(message)s'
        )
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)
    else:
        logging.basicConfig()

    if cfg.has_option("metrics", "sentry_dsn"):
        # Only import sentry if enabled
        import sentry_sdk
        from sentry_sdk.integrations.flask import FlaskIntegration
        sentry_sdk.init(
            dsn=cfg.get("metrics", "sentry_dsn"),
            integrations=[FlaskIntegration()],
        )

    if cfg.has_option("metrics", "prometheus_port"):
        prometheus_client.start_http_server(
            port=cfg.getint("metrics", "prometheus_port"),
            addr=cfg.get("metrics", "prometheus_addr"),
        )

    ctx = SygnalContext()
    ctx.database = sygnal.db.Db(cfg.get('db', 'dbfile'))

    for key,val in cfg.items('apps'):
        parts = key.rsplit('.', 1)
        if len(parts) < 2:
            continue
        if parts[1] == 'type':
            try:
                pushkins[parts[0]] = make_pushkin(val, parts[0])
            except:
                logger.exception("Failed to load module for kind %s", val)
                raise

    if len(pushkins) == 0:
        logger.error("No app IDs are configured. Edit sygnal.conf to define some.")
        sys.exit(1)

    for p in pushkins:
        pushkins[p].cfg = cfg
        pushkins[p].setup(ctx)
        logger.info("Configured with app IDs: %r", pushkins.keys())

    logger.error("Setup completed")
Пример #2
0
def setup():
    cfg = parse_config()

    logging.getLogger().setLevel(getattr(logging, cfg.get('log', 'loglevel').upper()))
    logfile = cfg.get('log', 'logfile')
    if logfile != '':
        handler = WatchedFileHandler(logfile)
        handler.addFilter(RequestIdFilter())
        formatter = logging.Formatter(
            '%(asctime)s [%(process)d] %(levelname)-5s '
            '%(request_id)s %(name)s %(message)s'
        )
        handler.setFormatter(formatter)
        logging.getLogger().addHandler(handler)
    else:
        logging.basicConfig()

    ctx = SygnalContext()
    ctx.database = sygnal.db.Db(cfg.get('db', 'dbfile'))

    for key,val in cfg.items('apps'):
        parts = key.rsplit('.', 1)
        if len(parts) < 2:
            continue
        if parts[1] == 'type':
            try:
                pushkins[parts[0]] = make_pushkin(val, parts[0])
            except:
                logger.exception("Failed to load module for kind %s", val)
                raise

    if len(pushkins) == 0:
        logger.error("No app IDs are configured. Edit sygnal.conf to define some.")
        sys.exit(1)

    for p in pushkins:
        pushkins[p].cfg = cfg
        pushkins[p].setup(ctx)
        logger.info("Configured with app IDs: %r", pushkins.keys())

    logger.error("Setup completed")
    def generate_logger(clazz_name):
        logger = logging.getLogger(clazz_name)
        formatter = logging.Formatter(
            '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s'
        )

        handler = logging.StreamHandler()
        handler.setLevel(INFO)
        handler.setFormatter(formatter)
        logger.addHandler(handler)

        file_handler = WatchedFileHandler('log/app.log')
        file_handler.setLevel(INFO)
        file_handler.setFormatter(formatter)
        file_handler.addFilter(LoggingFilter(ERROR))
        logger.addHandler(file_handler)

        logger.setLevel(INFO)
        logger.propagate = False

        return logger
Пример #4
0
Файл: log.py Проект: mennis/oTTo
    def __init__(
        self,
        level=logging.DEBUG,
        name=None,
        logdir="./",
        stdout=True,
        multiFile=False,
        post=False,
        ws="www-qa.coraid.com",
    ):
        self.logdir = logdir
        self.ws = ws
        self.instance = os.environ.get("instance") or ""
        self.level = level
        logging.addLevelName(COMMENT, "COMMENT")

        # Root Logger
        self.logger = logging.getLogger("otto" + self.instance)
        self.logger.addHandler(logging.NullHandler())

        """
        Root Logger Threshold is WARNING by default.
        We will set the threshold as low as possible
        """
        self.logger.setLevel(DEBUG)

        """
        The STDOUT handler will use the logger default threshold for printing.
        If the level is set to INFO the STDOUT should only display INFO messages and greater
        """
        if stdout:
            StdOutHandler = logging.StreamHandler(sys.stdout)
            StdOutHandler._name = "STDOUT"
            StdOutHandler.setLevel(level)
            StdOutHandler.setFormatter(Dispatcher())
            self.logger.addHandler(StdOutHandler)

        if name is None:
            frame = inspect.stack()[1]
            name = inspect.getfile(frame[0]).split("/")[-1].split(".py")[0]

        logFileBase = self.logdir + name + "-" + time.strftime("%Y%m%d_%H%M")

        """
        The Full log will contain every level of output and will be created
        in any configuration for use when posting the log to the web server.
        """
        fullLogFile = logFileBase + "_FULL.log"
        self.fullLogFile = fullLogFile
        FullLogFileHandler = WatchedFileHandler(fullLogFile)
        FullLogFileHandler.setLevel(level)
        FullLogFileHandler._name = "LogFile-FULL"
        FullLogFileHandler.setFormatter(Dispatcher())
        self.logger.addHandler(FullLogFileHandler)

        """
        In the case of multiFile = True:
        Create a FileHandler for each level and attatch the appropriate level name to the file suffix
        Then set a filter on each handler to return only the appropriate level per file
        """
        if multiFile:
            # Set up filename variables
            debugLogFile = logFileBase + "_DEBUG.log"
            commentLogFile = logFileBase + "_COMMENT.log"
            infoLogFile = logFileBase + "_INFO.log"
            warningLogFile = logFileBase + "_WARNING.log"
            errorLogFile = logFileBase + "_ERROR.log"

            # Create FileHandler objects
            DebugFileHandler = WatchedFileHandler(debugLogFile)
            DebugFileHandler._name = "LogFile-DEBUG"
            CommentFileHandler = WatchedFileHandler(commentLogFile)
            CommentFileHandler._name = "LogFile-COMMENT"
            InfoFileHandler = WatchedFileHandler(infoLogFile)
            InfoFileHandler._name = "LogFile-INFO"
            WarningFileHandler = WatchedFileHandler(warningLogFile)
            WarningFileHandler._name = "LogFile-WARNING"
            ErrorFileHandler = WatchedFileHandler(errorLogFile)
            ErrorFileHandler._name = "LogFile-ERROR"

            # Add filters at corresponding levels
            DebugFileHandler.addFilter(LogFilter(DEBUG))
            CommentFileHandler.addFilter(LogFilter(COMMENT))
            InfoFileHandler.addFilter(LogFilter(INFO))
            WarningFileHandler.addFilter(LogFilter(WARNING))
            ErrorFileHandler.addFilter(LogFilter(ERROR))

            # Add format Dispatcher
            DebugFileHandler.setFormatter(Dispatcher())
            CommentFileHandler.setFormatter(Dispatcher())
            InfoFileHandler.setFormatter(Dispatcher())
            WarningFileHandler.setFormatter(Dispatcher())
            ErrorFileHandler.setFormatter(Dispatcher())

            # Add handlers to root logger
            self.logger.addHandler(DebugFileHandler)
            self.logger.addHandler(CommentFileHandler)
            self.logger.addHandler(InfoFileHandler)
            self.logger.addHandler(WarningFileHandler)
            self.logger.addHandler(ErrorFileHandler)
Пример #5
0
    def logging_init(cls,
                     log_level="INFO",
                     force_reset=False,
                     log_callback=None,
                     log_to_file=None,
                     log_to_syslog=True,
                     log_to_syslog_facility=SysLogHandler.LOG_LOCAL0,
                     log_to_console=True,
                     log_to_file_mode="watched_file",
                     context_filter=None):
        """
        Initialize logging sub system with default settings (console, pre-formatted output)
        :param log_to_console: if True to to console
        :type log_to_console: bool
        :param log_level: The log level to set. Any value in "DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"
        :type log_level: str
        :param force_reset: If true, logging system is reset.
        :type force_reset: bool
        :param log_to_file: If specified, log to file
        :type log_to_file: str,None
        :param log_to_syslog: If specified, log to syslog
        :type log_to_syslog: bool
        :param log_to_syslog_facility: Syslog facility.
        :type log_to_syslog_facility: int
        :param log_to_file_mode: str "watched_file" for WatchedFileHandler, "time_file" for TimedRotatingFileHandler (or time_file_seconds for unittest)
        :type log_to_file_mode: str
        :param log_callback: Callback for unittest
        :param context_filter: Context filter. If None, pysolbase.ContextFilter.ContextFilter is used. If used instance has an attr "filter", it is added to all handlers and "%(kfilter)s" will be populated by all thread context key/values, using filter method call. Refer to our ContextFilter default implementation for details.
        :type context_filter: None,object
        :return Nothing.
        """

        if cls._logging_initialized and not force_reset:
            return

        with cls._logging_lock:
            if cls._logging_initialized and not force_reset:
                return

            # Default
            logging.basicConfig(level=log_level)

            # Filter
            if context_filter:
                c_filter = context_filter
            else:
                c_filter = ContextFilter()

            # Format begin
            s_f = "%(asctime)s | %(levelname)s | %(module)s@%(funcName)s@%(lineno)d | %(message)s "

            # Browse
            if hasattr(c_filter, "filter"):
                # Push generic field
                # We expect it to be formatted like our pysolbase.ContextFilter.ContextFilter#filter method.
                s_f += "|%(kfilter)s"

            # Format end
            s_f += "| %(thread)d:%(threadName)s | %(process)d:%(processName)s"

            # Formatter
            f = logging.Formatter(s_f)

            # Console handler
            c = None
            if log_to_console:
                # This can be override by unittest, we use __stdout__
                c = logging.StreamHandler(sys.__stdout__)
                c.setLevel(logging.getLevelName(log_level))
                c.setFormatter(f)

            # File handler to /tmp
            cf = None
            if log_to_file:
                if log_to_file_mode == "watched_file":
                    cf = WatchedFileHandler(log_to_file, encoding="utf-8")
                    cf.setLevel(logging.getLevelName(log_level))
                    cf.setFormatter(f)
                elif log_to_file_mode == "time_file":
                    cf = TimedRotatingFileHandler(log_to_file,
                                                  encoding="utf-8",
                                                  utc=True,
                                                  when="D",
                                                  interval=1,
                                                  backupCount=7)
                    cf.setLevel(logging.getLevelName(log_level))
                    cf.setFormatter(f)
                elif log_to_file_mode == "time_file_seconds":
                    # For unittest only
                    cf = TimedRotatingFileHandler(log_to_file,
                                                  encoding="utf-8",
                                                  utc=True,
                                                  when="S",
                                                  interval=1,
                                                  backupCount=7)
                    cf.setLevel(logging.getLevelName(log_level))
                    cf.setFormatter(f)
                else:
                    logger.warning("Invalid log_to_file_mode=%s",
                                   log_to_file_mode)

            # Syslog handler
            syslog = None
            if log_to_syslog:
                try:
                    from pysolbase.SysLogger import SysLogger

                    syslog = SysLogger(log_callback=log_callback,
                                       facility=log_to_syslog_facility)
                    syslog.setLevel(logging.getLevelName(log_level))
                    syslog.setFormatter(f)
                except Exception as e:
                    # This will fail on windows (no attr AF_UNIX)
                    logger.debug("Unable to import SysLogger, e=%s",
                                 SolBase.extostr(e))
                    syslog = False

            # Initialize
            root = logging.getLogger()
            root.setLevel(logging.getLevelName(log_level))
            root.handlers = []
            if log_to_console:
                c.addFilter(c_filter)
                root.addHandler(c)
            if log_to_file and cf:
                cf.addFilter(c_filter)
                root.addHandler(cf)
            if log_to_syslog and syslog:
                syslog.addFilter(c_filter)
                root.addHandler(syslog)

            # Done
            cls._logging_initialized = True
            if force_reset:
                lifecyclelogger.info(
                    "Logging : initialized from memory, log_level=%s, force_reset=%s",
                    log_level, force_reset)
            else:
                lifecyclelogger.debug(
                    "Logging : initialized from memory, log_level=%s, force_reset=%s",
                    log_level, force_reset)
Пример #6
0
    def __init__(self, level=logging.DEBUG, name=None, logdir='./', stdout=True, multiFile=False, post=False,
                 ws='www-qa.coraid.com'):
        self.logdir = logdir
        self.ws = ws
        self.instance = os.environ.get('instance') or ''
        self.level = level
        logging.addLevelName(COMMENT, "COMMENT")

        # Root Logger
        self.logger = logging.getLogger('otto' + self.instance)
        self.logger.addHandler(logging.NullHandler())

        """
        Root Logger Threshold is WARNING by default.
        We will set the threshold as low as possible
        """
        self.logger.setLevel(DEBUG)

        """
        The STDOUT handler will use the logger default threshold for printing.
        If the level is set to INFO the STDOUT should only display INFO messages and greater
        """
        if stdout:
            StdOutHandler = logging.StreamHandler(sys.stdout)
            StdOutHandler._name = "STDOUT"
            StdOutHandler.setLevel(level)
            StdOutHandler.setFormatter(Dispatcher())
            self.logger.addHandler(StdOutHandler)

        if name is None:
            frame = inspect.stack()[1]
            name = inspect.getfile(frame[0]).split('/')[-1].split(".py")[0]

        logFileBase = self.logdir + name + "-" + time.strftime('%Y%m%d_%H%M')

        """
        The Full log will contain every level of output and will be created
        in any configuration for use when posting the log to the web server.
        """
        fullLogFile = logFileBase + "_FULL.log"
        self.fullLogFile = fullLogFile
        FullLogFileHandler = WatchedFileHandler(fullLogFile)
        FullLogFileHandler.setLevel(level)
        FullLogFileHandler._name = "LogFile-FULL"
        FullLogFileHandler.setFormatter(Dispatcher())
        self.logger.addHandler(FullLogFileHandler)

        """
        In the case of multiFile = True:
        Create a FileHandler for each level and attatch the appropriate level name to the file suffix
        Then set a filter on each handler to return only the appropriate level per file
        """
        if multiFile:
            # Set up filename variables
            debugLogFile = logFileBase + "_DEBUG.log"
            commentLogFile = logFileBase + "_COMMENT.log"
            infoLogFile = logFileBase + "_INFO.log"
            warningLogFile = logFileBase + "_WARNING.log"
            errorLogFile = logFileBase + "_ERROR.log"

            # Create FileHandler objects
            DebugFileHandler = WatchedFileHandler(debugLogFile)
            DebugFileHandler._name = "LogFile-DEBUG"
            CommentFileHandler = WatchedFileHandler(commentLogFile)
            CommentFileHandler._name = "LogFile-COMMENT"
            InfoFileHandler = WatchedFileHandler(infoLogFile)
            InfoFileHandler._name = "LogFile-INFO"
            WarningFileHandler = WatchedFileHandler(warningLogFile)
            WarningFileHandler._name = "LogFile-WARNING"
            ErrorFileHandler = WatchedFileHandler(errorLogFile)
            ErrorFileHandler._name = "LogFile-ERROR"

            # Add filters at corresponding levels
            DebugFileHandler.addFilter(LogFilter(DEBUG))
            CommentFileHandler.addFilter(LogFilter(COMMENT))
            InfoFileHandler.addFilter(LogFilter(INFO))
            WarningFileHandler.addFilter(LogFilter(WARNING))
            ErrorFileHandler.addFilter(LogFilter(ERROR))

            # Add format Dispatcher
            DebugFileHandler.setFormatter(Dispatcher())
            CommentFileHandler.setFormatter(Dispatcher())
            InfoFileHandler.setFormatter(Dispatcher())
            WarningFileHandler.setFormatter(Dispatcher())
            ErrorFileHandler.setFormatter(Dispatcher())

            # Add handlers to root logger
            self.logger.addHandler(DebugFileHandler)
            self.logger.addHandler(CommentFileHandler)
            self.logger.addHandler(InfoFileHandler)
            self.logger.addHandler(WarningFileHandler)
            self.logger.addHandler(ErrorFileHandler)
Пример #7
0
    def setup_logging(self):
        root_logger = logging.getLogger()

        if self.args.debug:
            root_logger.setLevel(logging.DEBUG)
        else:
            root_logger.setLevel(logging.INFO)

        if not self.args.quiet:
            h = logging.StreamHandler()
            if self.args.journald:
                h.setFormatter(JournaldFormatter())
            else:
                h.setFormatter(
                    logging.Formatter(
                        "%(asctime)-15s %(levelname)s %(message)s"))
            if self.args.debug:
                h.setLevel(logging.DEBUG)
            elif self.args.verbose:
                h.setLevel(logging.INFO)
            else:
                h.setLevel(logging.WARN)
            h.addFilter(DefaultLogFilter(self.args.accesslog))
            root_logger.addHandler(h)

        if self.args.accesslog:
            from logging.handlers import WatchedFileHandler
            h = WatchedFileHandler(self.args.accesslog)
            h.setFormatter(logging.Formatter("%(message)s"))
            h.setLevel(logging.INFO)

            class Filter:
                def filter(self, record):
                    if not getattr(record, "access_log", False):
                        return False
                    return record.levelno < logging.WARN

            h.addFilter(Filter())
            root_logger.addHandler(h)

        if self.args.perflog:
            from logging.handlers import WatchedFileHandler
            h = WatchedFileHandler(self.args.perflog)

            class PerfFormatter(logging.Formatter):
                def format(self, record):
                    import json
                    info = getattr(record, "perf", None)
                    return json.dumps(info, sort_keys=True)

            h.setFormatter(PerfFormatter())
            h.setLevel(logging.INFO)

            class Filter:
                def filter(self, record):
                    return getattr(record, "perf", None) is not None

            h.addFilter(Filter())
            root_logger.addHandler(h)

        if self.args.errorlog:
            from logging.handlers import WatchedFileHandler
            h = WatchedFileHandler(self.args.errorlog)
            h.setFormatter(
                logging.Formatter("%(asctime)-15s %(levelname)s %(message)s"))
            h.setLevel(logging.WARN)
            root_logger.addHandler(h)

        if self.args.syslog:
            h = logging.SyslogHandler()
            h.setFormatter(
                logging.Formatter("%(asctime)-15s %(levelname)s %(message)s"))
            root_logger.addHandler(h)
Пример #8
0
log.addHandler(handler_out)


class FilterErr(logging.Filter):
    def filter(self, rec):
        return is_error(rec.levelno)

handler_err = logging.StreamHandler(sys.stderr)
handler_err.setFormatter(formatter)
if LOG_LEVEL:
    handler_err.setLevel(LOG_LEVEL)
handler_err.addFilter(FilterErr())
log.addHandler(handler_err)

if LOG_LEVEL and LOG_DIR:
    try:
        os.makedirs(LOG_DIR)
    except OSError:
        if not os.path.isdir(LOG_DIR):
            raise
    filename = os.path.basename(sys.argv[0])
    if filename.endswith('.py'):
        filename = filename[:-3]
    filename += '.log'
    logfile = os.path.join(LOG_DIR, filename)
    file_handler_err = WatchedFileHandler(logfile)
    file_handler_err.setFormatter(json_formatter)
    file_handler_err.setLevel(LOG_LEVEL)
    file_handler_err.addFilter(FilterErr())
    log.addHandler(file_handler_err)