示例#1
0
文件: loggers.py 项目: sgnls/flent
def setup_console():
    global err_handler, out_handler, cache_handler

    if err_handler is not None:
        return

    logger = logging.getLogger()

    err_handler = StreamHandler(sys.stderr)
    err_handler.setLevel(logging.WARNING)
    fmt = LogFormatter(fmt="%(levelname)s: %(message)s",
                       output_markers=("", ""))
    fmt.format_exceptions = False
    err_handler.setFormatter(fmt)
    logger.addHandler(err_handler)

    out_handler = StreamHandler(sys.stdout)
    out_handler.setLevel(logging.INFO)
    out_handler.setFormatter(LogFormatter(fmt="%(message)s"))
    out_handler.addFilter(MaxFilter(logging.INFO))
    add_common_filters(out_handler)
    logger.addHandler(out_handler)

    cache_handler = CachingHandler()
    logger.addHandler(cache_handler)

    logger.setLevel(logging.INFO)

    logging.captureWarnings(True)
    logging.getLogger("py.warnings").addFilter(LevelDemoteFilter(DEBUG))
示例#2
0
def run():
    out = StringIO()
    handler = StreamHandler(out)
    handler.addFilter(DisableFilter())
    log.addHandler(handler)
    for x in xrange(500):
        log.warning('this is not handled')
示例#3
0
def cli_logger_factory(name, out, err):
    """
    Setup basic logging and return logger instance

    :return:

        logger instance with following setup:

            - DEBUG and INFO will be printed to `out`.
            - WARN/WARNING, ERROR, and CRITICAL will be printed to `err`.

    :rtype: logging.Logger
    """

    logger_ = getLogger(name)
    logger_.setLevel(DEBUG)

    # handles DEBUG and INFO
    handler_out = StreamHandler(out)
    level_filter = AllowedLevelsFilter([DEBUG, INFO])
    handler_out.addFilter(level_filter)
    logger_.addHandler(handler_out)

    # handles WARNING, ERROR, and CRITICAL
    handler_err = StreamHandler(err)
    level_filter = AllowedLevelsFilter([WARNING, ERROR, CRITICAL])

    handler_err.addFilter(level_filter)
    logger_.addHandler(handler_err)

    return logger_
示例#4
0
文件: loggers.py 项目: dchangtw/flent
def setup_console():
    global err_handler, out_handler, cache_handler

    if err_handler is not None:
        return

    logger = logging.getLogger()

    err_handler = StreamHandler(sys.stderr)
    err_handler.setLevel(logging.WARNING)
    fmt = LogFormatter(fmt="%(levelname)s: %(message)s",
                       output_markers=("", ""))
    fmt.format_exceptions = False
    err_handler.setFormatter(fmt)
    logger.addHandler(err_handler)

    out_handler = StreamHandler(sys.stdout)
    out_handler.setLevel(logging.INFO)
    out_handler.setFormatter(LogFormatter(fmt="%(message)s"))
    out_handler.addFilter(MaxFilter(logging.INFO))
    add_common_filters(out_handler)
    logger.addHandler(out_handler)

    cache_handler = CachingHandler()
    logger.addHandler(cache_handler)

    logger.setLevel(logging.INFO)

    logging.captureWarnings(True)
    logging.getLogger("py.warnings").addFilter(LevelDemoteFilter(DEBUG))
示例#5
0
def setup_logging(log_level=INFO, log_file=None, use_colors=None):
    # type: (int, str, bool) -> None
    global _logging_initialized

    if _logging_initialized:
        return

    logger = getLogger()
    if logger.handlers:
        return

    addLevelName(NOTICE, 'NOTICE')

    from .this_package_metadata import package_metadata

    colors_feature = package_metadata.extra_features['color']

    if use_colors and not colors_feature.is_installed:
        colors_feature.raise_not_installed('Unable to use colors with logging')

    if use_colors is None and package_metadata.extra_features.is_installed(
            'color'):
        use_colors = True
    else:
        use_colors = False

    default_formatter, notice_formatter = _create_color_formatters(
    ) if use_colors else _create_no_color_formatters()

    level_formatter = LevelFormatter(default_formatter=default_formatter)
    level_formatter.add_formatter_for_level(INFO, Formatter('%(message)s'))
    level_formatter.add_formatter_for_level(NOTICE, notice_formatter)
    console_formatter = level_formatter

    from sys import stdout, stderr
    stdout_handler = StreamHandler(stdout)
    stdout_handler.setLevel(DEBUG)
    stdout_handler.addFilter(LevelFilter(NOTICE))
    stdout_handler.setFormatter(console_formatter)

    stderr_handler = StreamHandler(stderr)
    stderr_handler.setLevel(WARNING)
    stderr_handler.setFormatter(console_formatter)

    logger.setLevel(log_level)
    logger.addHandler(stdout_handler)
    logger.addHandler(stderr_handler)

    _logging_initialized = True

    if not log_file:
        return

    file_formatter = Formatter(
        '%(name)-20s - %(levelname)-10s - %(asctime)-30s:  %(message)s')
    log_file_handler = FileHandler(log_file)
    log_file_handler.setLevel(DEBUG)
    log_file_handler.setFormatter(file_formatter)
    logger.addHandler(log_file_handler)
示例#6
0
文件: logger.py 项目: Haner27/pyrpc
    def add_stream_handler(self, level=None, formatter=None, filters=None):
        handler = StreamHandler()
        handler.setLevel(level or self.level)
        handler.setFormatter(formatter or self.formatter)

        if filters is None:
            filters = []
        for f in filters:
            handler.addFilter(f)
        self.addHandler(handler)
        return self
示例#7
0
 def update(self, raw):
     self.raw = raw
     log = StringIO()
     handler = StreamHandler(log)
     handler.addFilter(ExceptionFilter())
     handler.setFormatter(Formatter('<span class="level">%(levelname)s</span>: <span class="message">%(message)s</span><br />'))
     scss.log.addHandler(handler)
     self.compressed = scss.Scss().compile(self.raw)
     scss.log.removeHandler(handler)
     handler.flush()
     self.put()
     return log.getvalue()
示例#8
0
def log(name: str, version: str, **options):
    """
    log configuration setup, logging all goes to standard out by default
    :param str name: endpoint name
    :param str version: endpoint version
    :param options: TODO other options for other chat notifications (sms,
    :return:
    """
    # DO NOT 'log' anything here, logging is being set up so infinite loops happen
    print(f'entered log({locals()})')
    log_format = '{asctime} [{app_host} | {app_name} | {app_vers} | {process}] [{levelname}] {filename}:{lineno} - {message}'

    try:
        host = socket.gethostname()
    except:
        host = "?"

    def filter_factory():
        class LoggingFilter(logging.Filter):
            def filter(self, record):
                try:
                    record.app_host = host
                except:
                    record.app_host = '?host?'

                try:
                    record.app_name = name
                except:
                    record.app_name = '?name?'

                try:
                    record.app_vers = version
                except:
                    record.app_vers = '?vers?'

                return True

        return LoggingFilter

    formatter = logging.Formatter(log_format, style="{")
    LogFilter = filter_factory()
    new_logger = logging.root
    new_logger.setLevel(logging.DEBUG)

    # standard out
    log_handler_s = StreamHandler(sys.stdout)
    log_handler_s.setLevel(logging.DEBUG)
    log_handler_s.setFormatter(formatter)
    log_handler_s.addFilter(LogFilter())
    new_logger.addHandler(log_handler_s)

    return new_logger
示例#9
0
def log_init():
    out = StreamHandler(stdout)
    out.addFilter(lambda record: record.levelno <= INFO)
    out.setLevel(INFO)

    err = StreamHandler(stderr)
    err.setLevel(ERROR)

    basicConfig(
        level=INFO,
        format="%(message)s",
        handlers=[out, err],
    )
示例#10
0
文件: cli.py 项目: Starbat/tocc
    def configure_root_logger(self):
        logger = getLogger('')
        logger.setLevel(INFO)

        stdout_handler = StreamHandler(sys.stdout)
        stdout_handler.addFilter(MaxLevelFilter(WARNING))
        stdout_handler.setLevel(INFO)
        logger.addHandler(stdout_handler)

        stderr_handler = StreamHandler(sys.stderr)
        stderr_handler.setLevel(WARNING)
        logger.addHandler(stderr_handler)
        return logger
def add_console_handler(logger):
    stdout_handler = StreamHandler(sys.stdout)
    stderr_handler = StreamHandler(sys.stderr)
    log_filter = LogFilter(logging.WARNING)

    stdout_handler.addFilter(log_filter)
    stdout_handler.setLevel(logging.INFO)
    stdout_handler.setFormatter(SIMPLE_FORMATTER)

    stderr_handler.setLevel(max(MIN_LOG_LEVEL, logging.WARNING))
    stderr_handler.setFormatter(SIMPLE_FORMATTER)

    logger.addHandler(stdout_handler)
    logger.addHandler(stderr_handler)
def add_console_handler(logger):
    stdout_handler = StreamHandler(sys.stdout)
    stderr_handler = StreamHandler(sys.stderr)
    log_filter = LogFilter(logging.WARNING)

    stdout_handler.addFilter(log_filter)
    stdout_handler.setLevel(logging.INFO)
    stdout_handler.setFormatter(SIMPLE_FORMATTER)

    stderr_handler.setLevel(max(MIN_LOG_LEVEL, logging.WARNING))
    stderr_handler.setFormatter(SIMPLE_FORMATTER)

    logger.addHandler(stdout_handler)
    logger.addHandler(stderr_handler)
def init_logger():
    """
    Initializes the logger settings.
    """
    formatter = logging.Formatter(CALENDAR_LOG_FMT)
    calendar_log = logging.getLogger("attendance_management_bot")
    file_handler = StreamHandler()
    file_handler.setFormatter(formatter)

    calendar_log.setLevel(CALENDAR_LOG_LEVEL)
    file_handler.addFilter(attendance_management_bot.contextlog.RequestContextFilter())
    calendar_log.addHandler(file_handler)

    logging.getLogger("tornado.application").addHandler(file_handler)
    logging.getLogger("tornado.general").addHandler(file_handler)
示例#14
0
 def update(self, raw):
     self.raw = raw
     log = StringIO()
     handler = StreamHandler(log)
     handler.addFilter(ExceptionFilter())
     handler.setFormatter(
         Formatter(
             '<span class="level">%(levelname)s</span>: <span class="message">%(message)s</span><br />'
         ))
     scss.log.addHandler(handler)
     self.compressed = scss.Scss().compile(self.raw)
     scss.log.removeHandler(handler)
     handler.flush()
     self.put()
     return log.getvalue()
示例#15
0
def init_logger():
    """
    Initializes the root logger settings.
    """
    formatter = logging.Formatter(CALENDAR_LOG_FMT)
    root_logger = logging.getLogger()
    file_handler = StreamHandler()
    file_handler.setFormatter(formatter)

    root_logger.setLevel(CALENDAR_LOG_LEVEL)
    file_handler.addFilter(faq_bot.contextlog.RequestContextFilter())
    root_logger.addHandler(file_handler)

    logging.getLogger("tornado.application").addHandler(file_handler)
    logging.getLogger("tornado.general").addHandler(file_handler)
示例#16
0
def init_logger():
    """
    init logger setting
    """
    formatter = logging.Formatter(CALENDAR_LOG_FMT)
    calendar_log = logging.getLogger("calendar_bot")
    file_handler = StreamHandler()
    file_handler.setFormatter(formatter)

    calendar_log.setLevel(CALENDAR_LOG_LEVEL)
    file_handler.addFilter(calendar_bot.contextlog.RequestContextFilter())
    calendar_log.addHandler(file_handler)

    # add app/gen ERROR log
    logging.getLogger("tornado.application").addHandler(file_handler)
    logging.getLogger("tornado.general").addHandler(file_handler)
示例#17
0
文件: __init__.py 项目: dowski/aspen
    def configure_logging(self, filename, filter, format, level):
        """Used for configuring logging from the command line or aspen.conf.
        """

        # Handler
        # =======
        # sys.stdout or rotated file

        if filename is None:
            handler = StreamHandler(sys.stdout)
        else:
            # @@: Handle absolute paths on Windows
            #  http://sluggo.scrapping.cc/python/unipath/Unipath-current/unipath/abstractpath.py
            #  http://docs.python.org/library/os.path.html#os.path.splitunc
            if not filename.startswith("/"):
                filename = join(self.paths.root, filename)
                filename = realpath(filename)
            logdir = dirname(filename)
            if not isdir(logdir):
                os.makedirs(logdir, 0755)
            handler = TimedRotatingFileHandler(filename=filename, when="midnight", backupCount=7)
        # Filter
        # ======

        if filter is not None:
            filter = logging.Filter(filter)
            handler.addFilter(filter)

        # Format
        # ======

        formatter = logging.Formatter(fmt=format)
        handler.setFormatter(formatter)

        # Level
        # =====

        handler.setLevel(level)

        # Installation
        # ============

        root_logger = logging.getLogger()
        root_logger.addHandler(handler)
        root_logger.setLevel(level)  # bah
示例#18
0
def get_logger() -> Logger:
    logger = getLogger("{{cookiecutter.project_slug}}")

    logger.setLevel(DEBUG if not settings.python_env.startswith("prod") else INFO)

    std_handler = StreamHandler()
    std_handler.setLevel(INFO)
    std_handler.setFormatter(
        Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
    )
    debug_handler = StreamHandler()
    debug_handler.addFilter(lambda record: record.levelno == DEBUG)
    debug_handler.setFormatter(
        Formatter(
            "%(asctime)s %(name)s %(levelname)s %(pathname)s:%(lineno)d %(message)s"
        )
    )
    logger.handlers = [std_handler, debug_handler]

    return logger
示例#19
0
def main(args):
    logger = getLogger(__name__)
    sh = StreamHandler()
    sh.addFilter(Filter('HealthPlanetDataTool'))
    basicConfig(handlers=[sh])
    if args.verbose:
        getLogger('HealthPlanetDataTool').setLevel(DEBUG)
    else:
        getLogger('HealthPlanetDataTool').setLevel(INFO)

    passwd_client = Password(prompt='Please enter your password: ')
    passwd = passwd_client.launch()

    client = HealthPlanetExport(client_id=args.client_id,
                                client_secret=args.client_secret,
                                login_id=args.login_id,
                                login_pass=passwd)
    client.get_auth()
    client.get_token()
    client.get_data(args.from_date, args.to_date)
    client.save(args.out_file)
示例#20
0
    def set_level(self, level: int):
        self.setLevel(level)
        for chosen_level in self._choices:
            if level == chosen_level[0]:
                # reset handlers
                self.remove_handlers()
                stream_handler = StreamHandler(sys.stdout)
                stream_handler.setLevel(level)

                file_name = f"logs/{self.room_name}/{chosen_level[1]}.log"
                if not os.path.exists(f"logs/{self.room_name}/"):
                    os.mkdir(f"logs/{self.room_name}/")

                file_handler = FileHandler(
                    filename=f"logs/{self.room_name}/{chosen_level[1]}.log")
                file_handler.setLevel(level)
                file_handler.setFormatter(file_formatter)

                # secondary log file that contains only messages.
                if self.level == self.CHAT:
                    self.add_chat_handler()
                    stream_handler.addFilter(ChatFilter())
                    stream_handler.setFormatter(terminal_formatter)

                else:
                    stream_handler.addFilter(DebugFilter())
                    stream_handler.setFormatter(terminal_formatter)

                    if self.chat_handler_enabled:
                        self.add_chat_handler()

                self.addHandler(file_handler)

                # log to the terminal.

                self.addHandler(stream_handler)
                return True
        # level was not set
        return False
示例#21
0
def init_logging():
    # create formatter
    formatter = jsonlogger.JsonFormatter(format_str)
    # create stdout handler
    stdout_handler = StreamHandler(sys.stdout)
    stdout_handler.setFormatter(formatter)
    stdout_handler.setLevel(DEBUG)
    stdout_handler.addFilter(lambda record: record.levelno in {DEBUG, INFO})
    # create stderr handler
    stderr_handler = StreamHandler(sys.stderr)
    stderr_handler.setFormatter(formatter)
    stderr_handler.setLevel(WARNING)
    # configure logger
    app_logger.addHandler(stdout_handler)
    app_logger.addHandler(stderr_handler)

    for level in log_levels:
        if '=' in level:
            config = level.strip().split('=')
            if config[0] == logger_name:
                app_logger.setLevel(config[1].upper())
            getLogger(config[0]).setLevel(config[1].upper())
示例#22
0
def create_logger(name=None):
    # create logger
    log = getLogger(name)
    log.setLevel(DEBUG)

    # create formatter and add it to the handlers
    log_format = Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

    # create console handler with a higher log level
    info_handler = StreamHandler(stdout)
    info_handler.setLevel(INFO)
    info_handler.setFormatter(log_format)
    log.addHandler(info_handler)

    # create console handler with a higher log level
    debug_handler = StreamHandler(stderr)
    debug_handler.setLevel(DEBUG)
    debug_handler.setFormatter(log_format)
    debug_handler.addFilter(LogLevelFilter(DEBUG))
    log.addHandler(debug_handler)

    return log
示例#23
0
    def __init__(
        self,
        name,
        level: str = "INFO",
        tag: Optional[str] = None,
        file: Optional[Path] = None,
        clear: bool = False,
    ) -> None:
        """
        Initializes an object of type 'Log'.

        Parameters
        ----------
        name : str
            The name of the logger, usually '__name__' is best praxis.
        level : str
            The level to log on (DEBUG, INFO, WARNING, ERROR or CRITICAL).
        tag : str, optional
            Tag of the logging object (e.g. local or remote), by default None.
        file : Path, optional
            Path to the log file, by default None.
        clear : bool, optional
            Clear the provided log file, by default False.
        """

        self.name = name
        self.level = level
        self.tag = tag
        self.file = file

        # Clear log file if exists
        if clear:
            self.clear_log_file()

        # Logger config
        numeric_level = self._level_from_str(level)
        level = level.upper()
        formatter = Formatter(LOG_FORMAT, LOG_DATETIME_FORMAT)
        logger = getLogger(name)
        if logger.hasHandlers():
            logger.debug(self._format("Clear handlers"))
            logger.handlers.clear()
        logger.setLevel(numeric_level)

        # Stream handler
        stream_handler_stdout = StreamHandler(stdout)
        stream_handler_stdout.setFormatter(formatter)
        stream_handler_stdout.setLevel(DEBUG)
        stream_handler_stdout.addFilter(lambda record: record.levelno <= INFO)
        stream_handler_stderr = StreamHandler(stderr)
        stream_handler_stderr.setFormatter(formatter)
        stream_handler_stderr.setLevel(WARNING)
        logger.addHandler(stream_handler_stdout)
        logger.addHandler(stream_handler_stderr)

        # File handler
        if file is not None:
            file_handler = RotatingFileHandler(Path(file),
                                               maxBytes=512,
                                               backupCount=0)
            file_handler.setFormatter(formatter)
            file_handler.setLevel(DEBUG)
            logger.addHandler(file_handler)
            logger.debug(self._format(f"Set file handler to '{file}'"))

        # Complete initialization
        logger.debug(self._format("Set stream handler to 'STDOUT/STDERR'"))
        logger.debug(self._format(f"Set log level to '{level}'"))
        self.logger = logger
        self.logger.debug(self._format("Logger initialized"))
示例#24
0
def get_logger_settings(
    env_name,
    log_dir,
    log_file_name,
    application_log_level='DEBUG',
    logstash_listner_ip=None,
    logstash_listner_port=None,
    logstash_tags=[],
    cloudwatch_logging_enabled=False,
    aws_access_key_id=None,
    aws_secret_access_key=None,
    aws_region_name=None,
    cloudwatch_log_group=None,
    cloud_watch_log_stream=None,
    sentry_logging_enabled=False,
):
    boto3_session = Session(aws_access_key_id=aws_access_key_id,
                            aws_secret_access_key=aws_secret_access_key,
                            region_name=aws_region_name)

    # Formatters
    verbose_formatter = logging.Formatter(
        '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(pathname)s:%(lineno)d] %(message)s'
        .format(env_name=env_name),
        datefmt='%Y-%m-%d %H:%M:%S')

    console_handler = StreamHandler()
    console_handler.addFilter(RequireDebugTrue())
    console_handler.addFilter(TimestampFilter())
    console_handler.setFormatter(verbose_formatter)

    mail_admins_handler = AdminEmailHandler()
    mail_admins_handler.include_html = True
    mail_admins_handler.setLevel(logging.ERROR)
    mail_admins_handler.addFilter(RequireDebugFalse())
    mail_admins_handler.addFilter(TimestampFilter())
    mail_admins_handler.setFormatter(verbose_formatter)

    file_handler = RotatingFileHandler(filename=log_dir + '/' + log_file_name,
                                       maxBytes=20 * 1024 * 1024,
                                       backupCount=7)
    file_handler.setLevel(logging.DEBUG)
    file_handler.addFilter(TimestampFilter())
    file_handler.setFormatter(verbose_formatter)

    socket_handler = None
    if logstash_listner_ip is not None and logstash_listner_port is not None:
        socket_handler = SocketLogstashHandler(logstash_listner_ip,
                                               logstash_listner_port)
        socket_handler.setLevel(logging.ERROR)
        socket_handler.addFilter(RequireDebugTrue())
        socket_handler.addFilter(TimestampFilter())
        socket_handler.setFormatter(verbose_formatter)
        socket_handler.tags = logstash_tags

    logging_dict = {
        'version': 1,
        'disable_existing_loggers': False,
        'filters': {
            'require_debug_false': {
                '()': 'django.utils.log.RequireDebugFalse'
            },
            'require_debug_true': {
                '()': 'django.utils.log.RequireDebugTrue'
            },
            'execpath': {
                '()': ExecpathFilter,
            },
            'timestamp': {
                '()': TimestampFilter,
            },
        },
        'formatters': {
            'simple': {
                'format': '[%(asctime)s] %(levelname)s %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S'
            },
            'verbose': {
                'format':
                '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(pathname)s:%(lineno)d] %(message)s'
                .format(env_name=env_name),
                'datefmt':
                '%Y-%m-%d %H:%M:%S'
            },
            'default': {
                # To be used with default handler only.
                'format':
                '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(execpath)s:%(execline)d] %(execmsg)s'
                .format(env_name=env_name),
                'datefmt':
                '%Y-%m-%d %H:%M:%S'
            },
        },
        'handlers': {
            'default': {
                'level': 'DEBUG',
                'filters': ['timestamp', 'execpath'],
                'class': 'logging.FileHandler',
                'filename': log_dir + '/' + log_file_name,
                'formatter': 'default'
            },
            'console': {
                'filters': ['require_debug_true', 'timestamp'],
                'class': 'logging.StreamHandler',
                'formatter': 'verbose'
            },
            'mail_admins': {
                'filters': [
                    'require_debug_false',
                ],
                'class': 'django.utils.log.AdminEmailHandler',
                'include_html': True,
                'level': 'ERROR',
            },
            'file_error': {
                'class': 'logging.FileHandler',
                'filters': ['timestamp'],
                'filename': log_dir + '/' + log_file_name,
                'formatter': 'verbose',
            },
            'queue_handler': {
                'class': 'logging.handlers.QueueHandler',
                'filters': ['timestamp'],
                'formatter': 'verbose',
                'queue': log_queue
            },
        },
        'loggers': {
            'django.request': {
                'handlers': [
                    'default',
                    'mail_admins',
                ],
                'level': 'ERROR',
                'propagate': True
            },
            'django.security.DisallowedHost': {
                'level': 'ERROR',
                'handlers': [
                    'file_error',
                    'console',
                    'mail_admins',
                ],
                'propagate': True
            },
            'application': {
                'handlers': ['queue_handler'],
                'level': application_log_level,
                'propagate': True
            },
        },
    }
    if sentry_logging_enabled:
        logging_dict['handlers']['sentry'] = {
            'level':
            'ERROR',  # To capture more than ERROR, change to WARNING, INFO, etc.
            'class':
            'raven.contrib.django.raven_compat.handlers.SentryHandler',
            'tags': {},
        }
        logging_dict['loggers']['application']['handlers'].append('sentry')

    if cloudwatch_logging_enabled:
        logging_dict['handlers']['watchtower'] = {
            'level': 'DEBUG',
            'class': 'watchtower.CloudWatchLogHandler',
            'boto3_session': boto3_session,
            'log_group': cloudwatch_log_group,
            'stream_name': cloud_watch_log_stream,
            'formatter': 'verbose',
        }
        logging_dict['loggers']['application']['handlers'].append('watchtower')

    queue_listner.handlers = [
        console_handler, mail_admins_handler, file_handler
    ]
    if socket_handler:
        queue_listner.handlers.append(socket_handler)

    return logging_dict
示例#25
0
from email.mime.multipart import MIMEMultipart  # Used for email notification

from pathlib import Path  # Used for specifying a generic path
import logging  # Used in logging
from logging import StreamHandler, Formatter  # Used in logging

#region ~~~~~~~~~~  Global variables  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Looking for a better way to do this ...
normalfont = ('Helvetica', '10', 'normal')
boldfont = ('Helvetica', '10', 'bold')
EOL = '\r'  # Constant end of line character.  This must match with Arduino starter code.
#endregion

#region ~~~~~~~~~~  Code to including error report via logging ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stdout = StreamHandler(sys.stdout)
stdout.addFilter(lambda r: logging.INFO <= r.levelno < logging.WARNING)
stdout.setFormatter(Formatter())  # Don't take the default format we set later

stderr = StreamHandler(sys.stderr)
stderr.addFilter(lambda r: r.levelno >= logging.WARNING)
stderr.setFormatter(Formatter())  # Don't take the default format we set later

debug_output = StreamHandler(sys.stdout)
debug_output.addFilter(lambda r: logging.debug <= r.levelno < logging.INFO)

# Configure logging
logging.basicConfig(
    format="%(asctime)s -- %(name)s/ -- %(levelname)s :: %(message)s",
    datefmt="%c",
    level=logging.
    DEBUG,  # if debugging level disabled above, effectively sets level to logging.INFO
示例#26
0
drophour = 16

headless = False  #True

debug = 1


class Filter:
    def filter(event):
        return event.name not in (
            'requests.packages.urllib3.connectionpool',
            'selenium.webdriver.remote.remote_connection')


h = StreamHandler()
h.addFilter(Filter)

ll = WARNING
#ll=INFO
ll = DEBUG

basicConfig(format='{asctime} {threadName:11s}: {message}',
            datefmt="%H:%M:%S",
            style='{',
            level=ll,
            handlers=[h])

lg = getLogger(__name__)

accounts = []
with open('100 Gmail.csv') as f:
示例#27
0
def get_script_logger(debug=True, quiet=False, config=None):
    """Creates a script logger to log to files / streams."""

    log_file_format = (config and config['LOG_FILE_FORMAT']
                       or '%(levelname)s %(asctime)s %(message)s')

    log_file_datefmt = (config and config['LOG_FILE_DATEFMT']
                        or '[%Y-%m-%d %H:%M:%S]')

    script_log_info_file_handler = None

    if config:
        script_log_info_file_handler = RotatingFileHandler(path.join(
            config['LOG_FOLDER'], 'script_info.log'),
                                                           maxBytes=100000,
                                                           backupCount=5)
        script_log_info_file_handler.setLevel(logging.INFO)
        script_log_info_file_handler.setFormatter(
            Formatter(log_file_format, log_file_datefmt))
        script_log_info_file_handler.addFilter(
            LevelSpecificLogFilter(logging.WARNING))

    script_log_error_file_handler = None

    if config:
        script_log_error_file_handler = RotatingFileHandler(path.join(
            config['LOG_FOLDER'], 'script_error.log'),
                                                            maxBytes=100000,
                                                            backupCount=5)
        script_log_error_file_handler.setLevel(logging.ERROR)
        script_log_error_file_handler.setFormatter(
            Formatter(log_file_format, log_file_datefmt))

    script_log_info_stream_handler = StreamHandler(
        quiet and open(os.devnull, 'a') or sys.stdout)
    script_log_info_stream_handler.setLevel(logging.INFO)
    script_log_info_stream_handler.setFormatter(
        Formatter(log_file_format, log_file_datefmt))
    script_log_info_stream_handler.addFilter(
        LevelSpecificLogFilter(logging.WARNING))

    script_log_error_stream_handler = StreamHandler(
        quiet and open(os.devnull, 'a') or sys.stderr)
    script_log_error_stream_handler.setLevel(logging.ERROR)
    script_log_error_stream_handler.setFormatter(
        Formatter(log_file_format, log_file_datefmt))

    script_logger = logging.getLogger('script')
    script_logger.setLevel(logging.INFO)

    if config:
        script_logger.addHandler(script_log_info_file_handler)
        script_logger.addHandler(script_log_error_file_handler)

    script_logger.addHandler(script_log_info_stream_handler)
    script_logger.addHandler(script_log_error_stream_handler)

    if (not debug) and config:
        mail_handler = get_mail_handler(config)
        script_logger.addHandler(mail_handler)

    return script_logger
示例#28
0
fmt = f"{prefixedMessage}\n{sourceInfo}"

utcFormatter = UtcFormatter(fmt=fmt, datefmt=datetimeFormat, style=style)

# ---------------------------- initialize logging -----------------------------

rootLogger = getLogger()

# Unset log level on the root Logger instance (`WARNING` by default).
# - Let individual handlers filter on the basis of level, depending on
#   (a) their responsibility and (b) the currently set cmdline args.
# - Adjust on using class' initialization.
rootLogger.setLevel(LogLevel.NotSet)

# Let `Warning` (numeric value 30) be the threshold between stdout & stderr.
logLevelThresholdOutErr = LogLevel.Warning

stderrHandler = StreamHandler(stderr)
stdoutHandler = StreamHandler(stdout)

stderrHandler.setLevel(logLevelThresholdOutErr)
stdoutHandler.setLevel(LogLevel.NotSet)
stdoutHandler.addFilter(lambda record: 1
                        if record.levelno < logLevelThresholdOutErr else 0)

stderrHandler.setFormatter(utcFormatter)
stdoutHandler.setFormatter(utcFormatter)

rootLogger.addHandler(stderrHandler)
rootLogger.addHandler(stdoutHandler)
示例#29
0
def setup_syslog(verbosity, quiet=False, logdest="syslog"):
    '''
    TBD
    '''
    try:

        _fmsg = ""
        _status = 100

        _my_uuid, _oscp, _mscp, _lscp = get_stores_parms()

        # HACK ALERT - A very crude "syslog facility selector"
        _syslog_selector = {}
        _syslog_selector["16"] = SysLogHandler.LOG_LOCAL0
        _syslog_selector["17"] = SysLogHandler.LOG_LOCAL1
        _syslog_selector["18"] = SysLogHandler.LOG_LOCAL2
        _syslog_selector["19"] = SysLogHandler.LOG_LOCAL3
        _syslog_selector["20"] = SysLogHandler.LOG_LOCAL4
        _syslog_selector["21"] = SysLogHandler.LOG_LOCAL5
        _syslog_selector["22"] = SysLogHandler.LOG_LOCAL6
        _syslog_selector["23"] = SysLogHandler.LOG_LOCAL7

        _verbosity = int(verbosity)

        logger = getLogger()

        # Reset the logging handlers
        while len(logger.handlers) != 0:
            logger.removeHandler(logger.handlers[0])

        if logdest == "console" or (not _lscp["hostname"]
                                    or not _lscp["port"]):
            hdlr = StreamHandler(stdout)
        else:
            _facility = int(21)

            if _facility > 23 or _facility < 16:
                _facility = 23

            hdlr = SysLogHandler(address = (_lscp["hostname"], \
                                            int(_lscp["port"])), \
                                            facility=_syslog_selector[str(_facility)])

        formatter = Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

        if _verbosity:
            if int(_verbosity) >= 6:
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 5:
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 4:
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 3:
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                hdlr.addFilter(MsgFilter("Exit point"))
                hdlr.addFilter(MsgFilter("Entry point"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 2:
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "collectors" subdirectory.
                hdlr.addFilter(VerbosityFilter("collectors"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) == 1:
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                # Used to filter out all function calls from all modules in the
                # "collectors" subdirectory.
                hdlr.addFilter(VerbosityFilter("collectors"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("clouds"))
                logger.setLevel(DEBUG)
        else:
            logger.setLevel(INFO)

        if quiet:
            logger.setLevel(ERROR)

        _status = 0

    except Exception, e:
        _status = 23
        _fmsg = str(e)
示例#30
0
def get_logger_settings(env_name,
                        log_dir,
                        log_file_name,
                        application_log_level='DEBUG',
                        logstash_listner_ip=None,
                        logstash_listner_port=None,
                        logstash_tags=[]):

    # Formatters
    verbose_formatter = logging.Formatter(
        '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(pathname)s:%(lineno)d] %(message)s'
        .format(env_name=env_name),
        datefmt='%Y-%m-%d %H:%M:%S')

    console_handler = StreamHandler()
    console_handler.addFilter(RequireDebugTrue())
    console_handler.addFilter(TimestampFilter())
    console_handler.setFormatter(verbose_formatter)

    mail_admins_handler = AdminEmailHandler()
    mail_admins_handler.include_html = True
    mail_admins_handler.setLevel(logging.ERROR)
    mail_admins_handler.addFilter(RequireDebugFalse())
    mail_admins_handler.addFilter(TimestampFilter())
    mail_admins_handler.setFormatter(verbose_formatter)

    file_handler = RotatingFileHandler(filename=log_dir + '/' + log_file_name,
                                       maxBytes=20 * 1024 * 1024,
                                       backupCount=7)
    file_handler.setLevel(logging.DEBUG)
    file_handler.addFilter(TimestampFilter())
    file_handler.setFormatter(verbose_formatter)

    socket_handler = None
    if logstash_listner_ip is not None and logstash_listner_port is not None:
        socket_handler = SocketLogstashHandler(logstash_listner_ip,
                                               logstash_listner_port)
        socket_handler.setLevel(logging.ERROR)
        socket_handler.addFilter(RequireDebugTrue())
        socket_handler.addFilter(TimestampFilter())
        socket_handler.setFormatter(verbose_formatter)
        socket_handler.tags = logstash_tags

    logging_dict = {
        'version': 1,
        'disable_existing_loggers': False,
        'filters': {
            'require_debug_false': {
                '()': 'django.utils.log.RequireDebugFalse'
            },
            'require_debug_true': {
                '()': 'django.utils.log.RequireDebugTrue'
            },
            'execpath': {
                '()': ExecpathFilter,
            },
            'timestamp': {
                '()': TimestampFilter,
            },
        },
        'formatters': {
            'simple': {
                'format': '[%(asctime)s] %(levelname)s %(message)s',
                'datefmt': '%Y-%m-%d %H:%M:%S'
            },
            'verbose': {
                'format':
                '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(pathname)s:%(lineno)d] %(message)s'
                .format(env_name=env_name),
                'datefmt':
                '%Y-%m-%d %H:%M:%S'
            },
            'default': {
                # To be used with default handler only.
                'format':
                '[%(timestamp)s] [{env_name}] [%(levelname)s] [%(execpath)s:%(execline)d] %(execmsg)s'
                .format(env_name=env_name),
                'datefmt':
                '%Y-%m-%d %H:%M:%S'
            },
        },
        'handlers': {
            'default': {
                'level': 'DEBUG',
                'filters': ['timestamp', 'execpath'],
                'class': 'logging.FileHandler',
                'filename': log_dir + '/' + log_file_name,
                'formatter': 'default'
            },
            'console': {
                'filters': ['require_debug_true', 'timestamp'],
                'class': 'logging.StreamHandler',
                'formatter': 'verbose'
            },
            'mail_admins': {
                'filters': [
                    'require_debug_false',
                ],
                'class': 'django.utils.log.AdminEmailHandler',
                'include_html': True,
                'level': 'ERROR',
            },
            'file_error': {
                'class': 'logging.FileHandler',
                'filters': ['timestamp'],
                'filename': log_dir + '/' + log_file_name,
                'formatter': 'verbose',
            },
            'queue_handler': {
                'class': 'logging.handlers.QueueHandler',
                'filters': ['timestamp'],
                'formatter': 'verbose',
                'queue': log_queue
            }
        },
        'loggers': {
            'django.request': {
                'handlers': [
                    'default',
                    'mail_admins',
                ],
                'level': 'ERROR',
                'propagate': True
            },
            'django.security.DisallowedHost': {
                'level': 'ERROR',
                'handlers': [
                    'file_error',
                    'console',
                    'mail_admins',
                ],
                'propagate': True
            },
            'application': {
                'handlers': ['queue_handler'],
                'level': application_log_level,
                'propagate': True
            },
        },
    }

    queue_listner.handlers = [
        console_handler, mail_admins_handler, file_handler
    ]
    if socket_handler:
        queue_listner.handlers.append(socket_handler)

    return logging_dict
示例#31
0
class LogConf(object):
    '''
    Application-specific logging configuration.

    This configuration defines sane default filters, formatters, and handlers
    for the root logger, which callers may customize (e.g., according to
    user-defined settings) by setting various properties of this configuration.

    Caveats
    ----------
    **The :meth:`__init__` method may raise exceptions.** Hence, this class
    should be instantiated at application startup by an explicit call to the
    :func:`betse.util.io.log.conf.logconf.init` function *after* establishing
    default exception handling.

    **The :meth:`deinit` method must be called at application shutdown.** Doing
    so closes the logfile handle opened by the :meth:`__init__` method. Failure
    to do so will reliably raise non-fatal warnings (hidden by default, but
    visible while running tests) resembling:

        C:\\projects\\betse\\betse\\util\\io\\log\\conf\\logconf.py:44:
        ResourceWarning: unclosed file <_io.TextIOWrapper
        name='C:\\Users\\appveyor\\AppData\\Roaming\\betse\\betse.log' mode='a'
        encoding='utf-8'>
            _log_conf = LogConf()

    Default Settings
    ----------
    All loggers will implicitly propagate messages to the root logger
    configured by this class, whose output will be:

    * Formatted in a timestamped manner detailing the point of origin (e.g.,
      ``[2016-04-03 22:02:47] betse ERROR (util.py:50): File not found.``).
    * Labelled as the current logger's name, defaulting to `root`. Since this
      is *not* a terribly descriptive name, callers are encouraged to replace
      this by an application-specific name.
    * Printed to standard error if the logging level for this output is either
      ``WARNING``, ``ERROR``, or ``CRITICAL``.
    * Printed to standard output if the logging level for this output is
      ``INFO``. Together with the prior item, this suggests that output with a
      logging level of ``DEBUG`` will *not* be printed by default.
    * Appended to the user-specific file defined by the
      :meth:`app_meta.log_default_filename` property, whose:

      * Level defaults to :data:`logger.ALL`. Hence, *all* messages will be
        logged by default, including low-level debug messages. (This is
        helpful for debugging client-side errors.)
      * Contents will be automatically rotated on exceeding a sensible filesize
        (e.g., 16Kb).

    If the default log levels are undesirable, consider subsequently calling
    this logger's :meth:`Logger.setLevel` method. Since a desired log level is
    typically unavailable until after parsing CLI arguments and/or
    configuration file settings *and* since a logger is required before this
    level becomes available, this class provides a sane interim default.

    Attributes
    ----------
    _filename : str
        Absolute or relative path of the file logged to by the file handler,
        defaulting to :meth:`app_meta.log_default_filename`.
    _logger_root : Logger
        Root logger.
    _logger_root_handler_file : Handler
        Root logger handler appending to the current logfile.
    _logger_root_handler_stderr : Handler
        Root logger handler printing to standard error.
    _logger_root_handler_stdout : Handler
        Root logger handler printing to standard output.
    '''

    # ..................{ INITIALIZERS                      }..................
    def __init__(self):
        '''
        Initialize this logging configuration.

        Specifically, this method associates the root logger with a rotating
        logfile whose handle remains open until the :meth:`deinit` method is
        subsequently called at application shutdown.
        '''

        # Avoid circular import dependencies.
        from betse.util.test import tsttest

        # Initialize the superclass.
        super().__init__()

        # Initialize all non-property attributes to sane defaults. To avoid
        # chicken-and-egg issues, properties should *NOT* be set here.
        self._deinit_vars()

        # Initialize the root logger.
        self._init_logger_root()

        # Initialize root logger handlers *AFTER* the root logger, as the
        # former explicitly add themselves to the latter.
        self._init_logger_root_handler_std()
        self._init_logger_root_handler_file()

        # Redirect all warnings through the logging framewark *AFTER*
        # successfully performing the above initialization.
        logging.captureWarnings(True)

        # If the active Python interpreter is running an automated test suite,
        # manually increase logging verbosity as soon as feasible: i.e., here.
        # While deferring this configuration to elsewhere (namely, the
        # "betse_test.fixture.initter" submodule) is also feasible, doing so
        # would horrifyingly squelch all early-time debug messages.
        if tsttest.is_testing():
            self.is_verbose = True

    def _init_logger_root(self) -> None:
        '''
        Initialize the root logger.

        For safety, this function removes all previously initialized handlers
        from this logger.
        '''

        # Avoid circular import dependencies.
        from betse.util.app.meta import appmetaone

        # Root logger.
        self._logger_root = logging.getLogger()

        # For uniqueness, change the name of the root logger to that of our
        # top-level package "betse" from its ambiguous default "root".
        self._logger_root.name = appmetaone.get_app_meta().package_name

        # Instruct this logger to entertain all log requests, ensuring these
        # requests will be delegated to the handlers defined below. By default,
        # this logger ignores all log requests with level less than "WARNING",
        # preventing handlers from receiving these requests.
        self._logger_root.setLevel(LogLevel.ALL)

        # Safely remove all existing handlers from the root logger *BEFORE*
        # adding new handlers to this logger.
        #
        # The root logger should have no handlers under conventional use cases.
        # The root logger only has handlers for functional tests *NOT*
        # parallelized by "xdist" and hence running in the same Python process.
        self._deinit_logger_root_handlers()

    def _init_logger_root_handler_std(self) -> None:
        '''
        Initialize root logger handlers redirecting log messages to the
        standard stdout and stderr file handles.
        '''

        # Avoid circular import dependencies.
        from betse.util.io.log.logfilter import (LogFilterThirdPartyDebug,
                                                 LogFilterMoreThanInfo)
        from betse.util.io.log.conf.logconfformat import LogFormatterWrap
        from betse.util.os.command import cmds

        # Initialize the stdout handler to:
        #
        # * Log only informational messages by default.
        # * Unconditionally ignore all warning and error messages, which the
        #   stderr handler already logs.
        #
        # Sadly, the "StreamHandler" constructor does *NOT* accept the
        # customary "level" attribute accepted by its superclass constructor.
        self._logger_root_handler_stdout = StreamHandler(sys.stdout)
        self._logger_root_handler_stdout.setLevel(LogLevel.INFO)
        self._logger_root_handler_stdout.addFilter(LogFilterMoreThanInfo())

        # Initialize the stderr handler to:
        #
        # * Log only warning and error messages by default.
        # * Unconditionally ignore all informational and debug messages, which
        #   the stdout handler already logs.
        self._logger_root_handler_stderr = StreamHandler(sys.stderr)
        self._logger_root_handler_stderr.setLevel(LogLevel.WARNING)

        # Avoid printing third-party debug messages to the terminal.
        self._logger_root_handler_stdout.addFilter(LogFilterThirdPartyDebug())
        self._logger_root_handler_stderr.addFilter(LogFilterThirdPartyDebug())

        #FIXME: Consider colourizing this format string.

        # Format standard output and error in the conventional way. For a list
        # of all available log record attributes, see:
        #
        #     https://docs.python.org/3/library/logging.html#logrecord-attributes
        #
        # Note that "{{" and "}}" substrings in format() strings escape literal
        # "{" and "}" characters, respectively.
        stream_format = '[{}] {{message}}'.format(cmds.get_current_basename())

        # Formatter for this format.
        stream_formatter = LogFormatterWrap(fmt=stream_format, style='{')

        # Assign these formatters to these handlers.
        self._logger_root_handler_stdout.setFormatter(stream_formatter)
        self._logger_root_handler_stderr.setFormatter(stream_formatter)

        # Register these handlers with the root logger.
        self._logger_root.addHandler(self._logger_root_handler_stdout)
        self._logger_root.addHandler(self._logger_root_handler_stderr)

    def _init_logger_root_handler_file(self) -> None:
        '''
        Initialize the root logger handler appending log messages to the
        currently open logfile file handle.

        This method is designed to be called multiple times, permitting the
        filename associated with this handler to be modified at runtime.
        '''

        # Avoid circular import dependencies.
        from betse.util.io.log.logfilter import LogFilterThirdPartyDebug
        from betse.util.io.log.conf.logconfformat import LogFormatterWrap
        from betse.util.io.log.conf.logconfhandle import (
            LogHandlerFileRotateSafe)
        from betse.util.path import pathnames
        from betse.util.os.command import cmds
        from betse.util.type.numeric import ints

        # Absolute or relative path of the directory containing this file.
        file_dirname = pathnames.get_dirname(self._filename)

        # Minimum level of messages to be log to disk, defaulting to "INFO".
        file_level = LogLevel.INFO

        # If this handler has already been created...
        if self._logger_root_handler_file is not None:
            # Preserve the previously set minimum level of messages to log.
            file_level = self._logger_root_handler_file.level

            # If the root logger has also already been created, remove this
            # handler from this root logger.
            if self._logger_root is not None:
                self._logger_root.removeHandler(self._logger_root_handler_file)

        # If the dirname of the directory containing this file is non-empty,
        # create this directory if needed. Note this dirname is empty when this
        # filename is a pure basename (e.g., when the "--log-file=my.log"
        # option is passed).
        #
        # For safety, this directory is created with standard low-level Python
        # functionality rather than our custom higher-level
        # dirs.make_parent_unless_dir() function. The latter logs this
        # creation. Since the root logger is *NOT* fully configured yet,
        # calling that function here would induce subtle errors or exceptions.
        if file_dirname:
            os.makedirs(file_dirname, exist_ok=True)

        # Root logger file handler, preconfigured as documented above.
        self._logger_root_handler_file = LogHandlerFileRotateSafe(
            filename=self._filename,

            # Append rather than overwrite this file.
            mode='a',

            # Defer opening this file in a just-in-time manner (i.e., until the
            # first call to this handler's emit() method is called to write the
            # first log via this handler). Why? Because (in no particular
            # order):
            #
            # * If the end user requests that *NO* messages be logged to disk
            #   (e.g., by passing the "--log-level=none" option), this file
            #   should *NEVER* be opened and hence created. The simplest means
            #   of doing so is simply to indefinitely defer opening this file.
            # * Doing so slightly reduces the likelihood (but *NOT* eliminate
            #   the possibility) of race conditions between multiple BETSE
            #   processes attempting to concurrently rotate the same logfile.
            delay=True,

            # Encode this file's contents as UTF-8.
            encoding='utf-8',

            # Maximum filesize in bytes at which to rotate this file,
            # equivalent to 1MB.
            maxBytes=ints.MiB,

            # Maximum number of rotated logfiles to maintain.
            backupCount=8,
        )

        # Initialize this handler's level to the previously established level.
        self._logger_root_handler_file.setLevel(file_level)

        # Prevent third-party debug messages from being logged to disk.
        self._logger_root_handler_file.addFilter(LogFilterThirdPartyDebug())

        # Linux-style logfile format.
        #
        # Note that the "processName" attribute appears to *ALWAYS* expand to
        # "MainProcess", which is not terribly descriptive. Hence, the name of
        # the current process is manually embedded in this format.
        file_format = ('[{{asctime}}] '
                       '{} {{levelname}} '
                       '({{module}}.py:{{funcName}}():{{lineno}}) '
                       '<PID {{process}}>:\n'
                       '    {{message}}'.format(cmds.get_current_basename()))

        # Format this file according to this format.
        file_formatter = LogFormatterWrap(fmt=file_format, style='{')
        self._logger_root_handler_file.setFormatter(file_formatter)

        # Register this handler with the root logger.
        self._logger_root.addHandler(self._logger_root_handler_file)

    # ..................{ DEINITIALIZERS                    }..................
    def deinit(self) -> None:
        '''
        Deinitialize this logging configuration.

        See Also
        ----------
        :func:`_deinit_logger_root_handlers`
        :func:`_deinit_vars`
            Further details.
        '''

        # Deinitialize all root logger handlers.
        self._deinit_logger_root_handlers()

        # Deinitialize all instance variables.
        self._deinit_vars()

    def _deinit_logger_root_handlers(self) -> None:
        '''
        Deinitialize all root logger handlers.

        Specifically, this method iterates over all handlers previously added
        to the root logger and, for each such handler (in order):

        #. Closes all open file handles associated with that handler, including
           the logfile handle opened by the :meth:`__init__` method.
        #. Removes that handler from the root logger.
        '''

        # For each handler previously added to the root logger...
        #
        # For safety, a shallow copy of the list of handlers to be removed
        # rather than the actual list being modified here is iterated over.
        for root_handler in tuple(self._logger_root.handlers):
            # Close all open file handles associated with this handler.
            root_handler.close()

            # Remove this handler from the root logger.
            self._logger_root.removeHandler(root_handler)

    def _deinit_vars(self) -> None:
        '''
        Deinitialize all instance variables underlying this logging
        configuration to sane defaults.

        To circumvent chicken-and-egg issues, this method intentionally avoids
        deinitializing (i.e., setting) settable properties.
        '''

        # Avoid circular import dependencies.
        from betse.util.app.meta import appmetaone

        # Revert all non-property attributes to sane defaults.
        self._filename = appmetaone.get_app_meta().log_default_filename
        self._logger_root = None
        self._logger_root_handler_file = None
        self._logger_root_handler_stderr = None
        self._logger_root_handler_stdout = None

    # ..................{ PROPERTIES ~ logger               }..................
    # Read-only properties prohibiting write access to external callers.

    @property
    def logger_root(self) -> RootLogger:
        '''
        **Root logger** (i.e., transitive parent of all other loggers).
        '''

        return self._logger_root

    # ..................{ PROPERTIES ~ handler              }..................
    @property
    def handler_file(self) -> Handler:
        '''
        Root logger handler appending to the current logfile if file logging is
        enabled *or* ``None`` otherwise.
        '''

        return self._logger_root_handler_file

    @property
    def handler_stderr(self) -> Handler:
        '''
        Root logger handler printing to standard error.
        '''

        return self._logger_root_handler_stderr

    @property
    def handler_stdout(self) -> Handler:
        '''
        Root logger handler printing to standard output.
        '''

        return self._logger_root_handler_stdout

    # ..................{ PROPERTIES ~ level                }..................
    @property
    def file_level(self) -> LogLevel:
        '''
        Minimum level of messages to log to the file handler.
        '''

        return self._logger_root_handler_file.level

    @file_level.setter
    @type_check
    def file_level(self, file_level: LogLevel) -> None:
        '''
        Set the minimum level of messages to log to the file handler.
        '''

        self._logger_root_handler_file.setLevel(file_level)

    # ..................{ PROPERTIES ~ level : verbose      }..................
    @property
    def is_verbose(self) -> bool:
        '''
        ``True`` only if *all* messages are to be unconditionally logged to the
        stdout handler (and hence printed to stdout).

        Equivalently, this method returns ``True`` only if the logging level
        for the stdout handler is :attr:`LogLevel.ALL`.

        Note that this logging level is publicly retrievable by accessing the
        :attr:`handler_stdout.level` property.
        '''

        return self._logger_root_handler_stdout.level == LogLevel.ALL

    @is_verbose.setter
    @type_check
    def is_verbose(self, is_verbose: bool) -> None:
        '''
        Set the verbosity of the stdout handler.

        This method sets this handler's logging level to:

        * If the passed boolean is ``True``, :attr:`LogLevel.ALL` .
        * If the passed boolean is ``False``, :attr:`LogLevel.INFO`.
        '''

        # Avoid circular import dependencies.
        from betse.util.io.log import logs

        # Convert the passed boolean to a logging level for the stdout handler.
        self._logger_root_handler_stdout.setLevel(
            LogLevel.ALL if is_verbose else LogLevel.INFO)

        # If increasing stdout verbosity, log this fact *AFTER* doing so.
        #
        # Note that reversing this order of statements would silently squelch
        # this message -- which would quite defeat the purpose.
        if is_verbose:
            logs.log_debug('Standard output verbosity enabled.')

    # ..................{ PROPERTIES ~ path                 }..................
    @property
    def filename(self) -> str:
        '''
        Absolute or relative path of the file logged to by the file handler.
        '''

        return self._filename

    @filename.setter
    @type_check
    def filename(self, filename: str) -> None:
        '''
        Set the absolute or relative path of the file logged to by the file
        handler.

        Due to flaws in the upstream :mod:`logging` API, this method
        necessarily destroys and recreates the current file handler.
        '''

        # If the passed filename is the same as the current filename, avoid
        # unnecessarily destroying and recreating the file handler. This is
        # technically a negligible optimization, but every little bit helps.
        if self._filename == filename:
            return

        # Classify this filename *BEFORE* recreating the file handler, which
        # accesses this variable.
        self._filename = filename

        # Destroy and recreate the file handler.
        self._init_logger_root_handler_file()
示例#32
0
def setup_syslog(verbosity, quiet = False, logdest = "syslog") :
    '''
    TBD
    '''
    try :

        _fmsg = ""
        _status = 100
        
        _my_uuid, _oscp, _mscp, _lscp = get_stores_parms()
        
        # HACK ALERT - A very crude "syslog facility selector"
        _syslog_selector = {}
        _syslog_selector["16"] = SysLogHandler.LOG_LOCAL0
        _syslog_selector["17"] = SysLogHandler.LOG_LOCAL1
        _syslog_selector["18"] = SysLogHandler.LOG_LOCAL2
        _syslog_selector["19"] = SysLogHandler.LOG_LOCAL3
        _syslog_selector["20"] = SysLogHandler.LOG_LOCAL4
        _syslog_selector["21"] = SysLogHandler.LOG_LOCAL5
        _syslog_selector["22"] = SysLogHandler.LOG_LOCAL6
        _syslog_selector["23"] = SysLogHandler.LOG_LOCAL7
  
        _verbosity = int(verbosity)

        logger = getLogger()

        # Reset the logging handlers
        while len(logger.handlers) != 0 :
            logger.removeHandler(logger.handlers[0])

        if logdest == "console" or (not _lscp["hostname"] or not _lscp["port"]) :
            hdlr = StreamHandler(stdout)
        else :
            _facility = int(21)

            if _facility > 23 or _facility < 16 :
                _facility = 23

            hdlr = SysLogHandler(address = (_lscp["hostname"], \
                                            int(_lscp["port"])), \
                                            facility=_syslog_selector[str(_facility)])

        formatter = Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
        hdlr.setFormatter(formatter)
        logger.addHandler(hdlr)

        if _verbosity :
            if int(_verbosity) >= 6 :
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 5 :
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 4 :
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 3 :
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                hdlr.addFilter(MsgFilter("Exit point"))
                hdlr.addFilter(MsgFilter("Entry point"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) >= 2 :
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "collectors" subdirectory.
                hdlr.addFilter(VerbosityFilter("collectors"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                logger.setLevel(DEBUG)
            elif int(_verbosity) == 1 :
                # Filter out gmetad logging statements
                hdlr.addFilter(VerbosityFilter("gmetad"))
                # Used to filter out all function calls from the "auxiliary"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("auxiliary"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("stores"))
                hdlr.addFilter(VerbosityFilter("datastore"))
                # Used to filter out all function calls from all modules in the
                # "collectors" subdirectory.
                hdlr.addFilter(VerbosityFilter("collectors"))
                # Used to filter out all function calls from the "remote"
                # subdirectory.
                hdlr.addFilter(VerbosityFilter("remote"))
                # Used to filter out all function calls from all modules in the
                # "stores" subdirectory.
                hdlr.addFilter(VerbosityFilter("clouds"))
                logger.setLevel(DEBUG)
        else :
            logger.setLevel(INFO)

        if quiet :
            logger.setLevel(ERROR)

        _status = 0

    except Exception, e :
        _status = 23
        _fmsg = str(e)
示例#33
0
            record.url = ''
            record.remote_addr = ''
            record.username = ''
            record.method = ''
            record.data = ''
        return super().format(record)


formatter = PCFFormatter(
    '[%(levelname)s] ' +
    '%(remote_addr)s%(username)s%(method)s%(url)s%(data)s' +
    '[%(pathname)s:%(lineno)d] %(message)s')

sysout_handler = StreamHandler(stdout)
sysout_handler.setFormatter(formatter)
sysout_handler.addFilter(lambda record: record.levelno <= INFO)
sysout_handler.setLevel(DEBUG)

syserr_handler = StreamHandler()
syserr_handler.setFormatter(formatter)
syserr_handler.setLevel(WARNING)

pcf_logger = getLogger(__name__)
pcf_logger.addHandler(sysout_handler)
pcf_logger.addHandler(syserr_handler)
pcf_logger.setLevel(DEBUG)

gunicorn_logger = getLogger('gunicorn.error')
pcf_logger.setLevel(max(gunicorn_logger.level, DEBUG))
gunicorn_logger.handlers = pcf_logger.handlers
示例#34
0
            if not found:
                return 0
            for f in self.disabled_funcs:
                if record.funcName.endswith(f):
                    return 0
        return 1


enabled_mods = ["__main__", "console", "insulatedshell", "printhooks", "matplotlib_figure_Figure"]
disabled_funcs = ["router._send", "router._recv", "__init__"]
filt = Filter(enabled_modules=enabled_mods, disabled_funcs=disabled_funcs)
formatter = logging.Formatter(
    fmt="%(name)s (PID %(process)d ): %(levelname)s %(filename)s:%(lineno)d:%(funcName)s: %(message)s"
)
handler = StreamHandler(stream=sys.__stderr__)
handler.addFilter(filt)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)

root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.DEBUG)

# logging.basicConfig( stream = sys.__stderr__, format = "%(name)s (PID %(process)d ): %(levelname)s %(module)s.%(funcName)s: %(message)s" )
logger = logging.getLogger(__name__)


def msg(*args):
    ret = ""
    for a in args:
        try:
示例#35
0
        Filter.__init__(self, patterns)
        self.patterns = patterns

    def filter(self, record):
        if not self.patterns:
            return True
        for p in self.patterns:
            if match(record.name, p):
                return True
        return False


root = getLogger()
handler = StreamHandler(sys.stdout)
filter = PatternFilter(*config.log_categories)
handler.addFilter(filter)
handler.setFormatter(Formatter("%(asctime)s %(levelname)s %(message)s"))
root.addHandler(handler)
root.setLevel(WARN)

log = getLogger("proton.test")

PASS = "******"
SKIP = "skip"
FAIL = "fail"


class Runner:
    def __init__(self):
        self.exception = None
        self.exception_phase_name = None
def get_script_logger(debug=True, quiet=False, config=None):
    """Creates a script logger to log to files / streams."""

    log_file_format = (config
        and config['LOG_FILE_FORMAT']
        or '%(levelname)s %(asctime)s %(message)s')

    log_file_datefmt = (config
        and config['LOG_FILE_DATEFMT']
        or '[%Y-%m-%d %H:%M:%S]')

    script_log_info_file_handler = None

    if config:
        script_log_info_file_handler = RotatingFileHandler(
            path.join(config['LOG_FOLDER'], 'script_info.log'),
            maxBytes=100000, backupCount=5)
        script_log_info_file_handler.setLevel(logging.INFO)
        script_log_info_file_handler.setFormatter(Formatter(
            log_file_format, log_file_datefmt))
        script_log_info_file_handler.addFilter(
            LevelSpecificLogFilter(logging.WARNING))

    script_log_error_file_handler = None

    if config:
        script_log_error_file_handler = RotatingFileHandler(
            path.join(config['LOG_FOLDER'], 'script_error.log'),
            maxBytes=100000, backupCount=5)
        script_log_error_file_handler.setLevel(logging.ERROR)
        script_log_error_file_handler.setFormatter(Formatter(
            log_file_format, log_file_datefmt))

    script_log_info_stream_handler = StreamHandler(
        quiet and open(os.devnull, 'a') or sys.stdout)
    script_log_info_stream_handler.setLevel(logging.INFO)
    script_log_info_stream_handler.setFormatter(Formatter(
        log_file_format, log_file_datefmt))
    script_log_info_stream_handler.addFilter(
        LevelSpecificLogFilter(logging.WARNING))

    script_log_error_stream_handler = StreamHandler(
        quiet and open(os.devnull, 'a') or sys.stderr)
    script_log_error_stream_handler.setLevel(logging.ERROR)
    script_log_error_stream_handler.setFormatter(Formatter(
        log_file_format, log_file_datefmt))

    script_logger = logging.getLogger('script')
    script_logger.setLevel(logging.INFO)

    if config:
        script_logger.addHandler(script_log_info_file_handler)
        script_logger.addHandler(script_log_error_file_handler)

    script_logger.addHandler(script_log_info_stream_handler)
    script_logger.addHandler(script_log_error_stream_handler)

    if (not debug) and config:
        mail_handler = get_mail_handler(config)
        script_logger.addHandler(mail_handler)

    return script_logger