Beispiel #1
0
 def __call__(self, logger, name, eventDict):
     _stuff, _why, eventDict = _extractStuffAndWhy(eventDict)
     if name == "err":
         eventDict["event"] = _why
         if isinstance(_stuff, Failure):
             eventDict["exception"] = _stuff.getTraceback(detail="verbose")
             _stuff.cleanFailure()
     else:
         eventDict["event"] = _why
     return ((ReprWrapper(GenericJSONRenderer.__call__(self, logger, name, eventDict)),), {"_structlog": True})
Beispiel #2
0
 def __call__(self, logger, name, eventDict):
     _stuff, _why, eventDict = _extractStuffAndWhy(eventDict)
     if name == 'err':
         eventDict['event'] = _why
         if isinstance(_stuff, Failure):
             eventDict['exception'] = _stuff.getTraceback(detail='verbose')
             _stuff.cleanFailure()
     else:
         eventDict['event'] = _why
     return ((GenericJSONRenderer.__call__(self, logger, name, eventDict),),
             {'_structlog': True})
Beispiel #3
0
    def configure_logging(self):
        if self.app.testing:
            structlog.reset_defaults()

        disabled = [
            "docker.utils.config",
            "docker.auth",
            "docker.api.build",
            "docker.api.swarm",
            "docker.api.image",
            "werkzeug",
            "requests",
            "urllib3",
        ]

        for logger in disabled:
            log = logging.getLogger(logger)
            log.setLevel(logging.ERROR)
            log.disabled = True
        self.app.logger.disabled = True

        logging.basicConfig(
            level=self.log_level, stream=sys.stdout, format="%(message)s"
        )

        chain = [
            filter_by_level,
            add_log_level,
            add_logger_name,
            TimeStamper(fmt="iso"),
            StackInfoRenderer(),
            format_exc_info,
            JSONRenderer(indent=1, sort_keys=True),
        ]

        logger = logging.getLogger(__name__)

        if self.testing:
            chain = []
            logger = structlog.ReturnLogger()

        log = structlog.wrap_logger(
            logger,
            processors=chain,
            context_class=dict,
            wrapper_class=structlog.stdlib.BoundLogger,
            # cache_logger_on_first_use=True,
        )
        self.logger = log
        self.app.logger = self.logger
Beispiel #4
0
def setup_logging(args):
    global log

    logging.basicConfig(stream=sys.stdout, format="%(message)s")

    def add_timestamp(_, __, event_dict):
        event_dict["timestamp"] = datetime.datetime.utcnow()
        return event_dict

    log = structlog.wrap_logger(logging.getLogger(__name__),
                                processors=[
                                    add_timestamp,
                                    JSONRenderer(indent=1, sort_keys=True),
                                ])
def logger_initial_config(service_name=None,
                          log_level=None,
                          logger_format=None,
                          logger_date_format=None):

    if not logger_date_format:
        logger_date_format = os.getenv("LOGGING_DATE_FORMAT",
                                       "%Y-%m-%dT%H:%M%s")
    if not log_level:
        log_level = os.getenv("LOGGING_LEVEL")
    if not logger_format:
        logger_format = "%(message)s"
    try:
        indent = int(os.getenv("JSON_INDENT_LOGGING"))
    except TypeError:
        indent = None
    except ValueError:
        indent = None

    def add_service(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the service name to the event dict.
        """
        event_dict["service"] = service_name
        return event_dict

    def add_severity_level(logger, method_name, event_dict):
        """
        Add the log level to the event dict.
        """
        if method_name == "warn":
            # The stdlib has an alias
            method_name = "warning"

        event_dict["severity"] = method_name
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)
    configure(processors=[
        add_severity_level,
        add_log_level,
        filter_by_level,
        add_service,
        format_exc_info,
        TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
        JSONRenderer(indent=indent),
    ])
def logger_initial_config(log_level='INFO',
                          logger_format="%(message)s",
                          logger_date_format="%Y-%m-%dT%H:%M%s"):
    def add_service(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the service name to the event dict.
        This adds `service: 'ras-frontstage'` to all log lines.
        """
        event_dict['service'] = 'ras-frontstage'
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)
    auth_log = logging.getLogger(__name__)
    auth_log.addHandler(logging.NullHandler())
    auth_log.propagate = False

    def add_severity_level(logger, method_name, event_dict):
        """
        Add the log level to the event dict.
        """
        if method_name == "warn":
            # The stdlib has an alias
            method_name = "warning"

        event_dict["severity"] = method_name
        return event_dict

    def parse_exception(_, __, event_dict):
        exception = event_dict.get('exception')
        if exception:
            event_dict['exception'] = exception.replace("\"", "'").split("\n")
        return event_dict

    # setup file logging
    renderer_processor = JSONRenderer(indent=None)

    processors = [
        add_severity_level, add_log_level, filter_by_level, add_service,
        format_exc_info,
        TimeStamper(fmt=logger_date_format, utc=True,
                    key='created_at'), parse_exception, renderer_processor
    ]
    configure(context_class=wrap_dict(dict),
              logger_factory=LoggerFactory(),
              processors=processors,
              cache_logger_on_first_use=True)
Beispiel #7
0
def logger_initial_config(service_name=None,
                          log_level=None,
                          logger_format=None,
                          logger_date_format=None):
    # pylint: skip-file
    if not logger_date_format:
        logger_date_format = os.getenv('LOGGING_DATE_FORMAT',
                                       "%Y-%m-%dT%H:%M%s")
    if not log_level:
        log_level = os.getenv('LOGGING_LEVEL')
    if not logger_format:
        logger_format = "%(message)s"
    try:
        indent = int(os.getenv('JSON_INDENT_LOGGING'))
    except TypeError:
        indent = None
    except ValueError:
        indent = None

    def add_service(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the service name to the event dict.
        """
        event_dict['service'] = service_name
        return event_dict

    def zipkin_ids(logger, method_name, event_dict):
        event_dict['trace'] = ''
        event_dict['span'] = ''
        event_dict['parent'] = ''
        if not flask.has_app_context():
            return event_dict
        if '_zipkin_span' not in g:
            return event_dict
        event_dict['span'] = g._zipkin_span.zipkin_attrs.span_id
        event_dict['trace'] = g._zipkin_span.zipkin_attrs.trace_id
        event_dict['parent'] = g._zipkin_span.zipkin_attrs.parent_span_id
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)
    configure(processors=[
        zipkin_ids, add_log_level, filter_by_level, add_service,
        format_exc_info,
        TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
        JSONRenderer(indent=indent)
    ])
Beispiel #8
0
 def __init__(self, level=logging.INFO, **kwargs):
     structlog.configure(processors=[
         structlog.processors.StackInfoRenderer(),
         structlog.dev.set_exc_info,
         structlog.processors.format_exc_info,
         structlog.dev.ConsoleRenderer(),
     ])
     self._logger = wrap_logger(
         structlog.get_logger(),
         processors=[TimeStamper(fmt="iso"),
                     JSONRenderer(sort_keys=True)],
     )
     self.level = level
     self.bind(**kwargs)
     self.events = []
     self.persist = False
Beispiel #9
0
def logger_initial_config(log_level=None):
    """Configures the logger"""
    service_name = "ras-rm-auth-service"
    logger_date_format = os.getenv("LOGGING_DATE_FORMAT", "%Y-%m-%dT%H:%M%s")
    logger_format = "%(message)s"

    if not log_level:
        log_level = os.getenv("SMS_LOG_LEVEL", "INFO")

    try:
        indent = int(os.getenv("JSON_INDENT_LOGGING"))
    except (TypeError, ValueError):
        indent = None

    def add_service(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the service name to the event dict.
        """
        event_dict["service"] = service_name
        return event_dict

    def add_severity_level(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the log level to the event dict.
        """
        if method_name == "warn":
            # The stdlib has an alias
            method_name = "warning"

        event_dict["severity"] = method_name
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)
    configure(processors=[
        add_severity_level,
        add_log_level,
        filter_by_level,
        add_service,
        TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
        JSONRenderer(indent=indent),
    ])

    oauth_log = logging.getLogger("requests_oauthlib")
    oauth_log.addHandler(logging.NullHandler())
    oauth_log.propagate = False
def configure_logging():
    log_level = logging.INFO
    debug = os.getenv("FLASK_ENV") == "development"
    if debug:
        log_level = logging.DEBUG

    log_handler = logging.StreamHandler(sys.stdout)
    log_handler.setLevel(log_level)
    log_handler.addFilter(lambda record: record.levelno <= logging.WARNING)

    error_log_handler = logging.StreamHandler(sys.stderr)
    error_log_handler.setLevel(logging.ERROR)

    logging.basicConfig(level=log_level,
                        format="%(message)s",
                        handlers=[error_log_handler, log_handler])

    # Set werkzeug logging level
    werkzeug_logger = logging.getLogger("werkzeug")
    werkzeug_logger.setLevel(level=log_level)

    def parse_exception(_, __, event_dict):
        if debug:
            return event_dict
        exception = event_dict.get("exception")
        if exception:
            event_dict["exception"] = exception.replace('"', "'").split("\n")
        return event_dict

    # setup file logging
    renderer_processor = (ConsoleRenderer() if debug else JSONRenderer(
        serializer=json_dumps))
    processors = [
        add_log_level,
        TimeStamper(key="created", fmt="iso"),
        add_service,
        format_exc_info,
        parse_exception,
        renderer_processor,
    ]

    configure(
        context_class=wrap_dict(dict),
        logger_factory=LoggerFactory(),
        processors=processors,
        cache_logger_on_first_use=True,
    )
def logger_initial_config(
    service_name=None, log_level=None, logger_format=None, logger_date_format=None
):

    if not logger_date_format:
        logger_date_format = os.getenv("LOGGING_DATE_FORMAT", "%Y-%m-%dT%H:%M%s")
    if not log_level:
        log_level = os.getenv("SMS_LOG_LEVEL", "DEBUG")
    if not logger_format:
        logger_format = "%(message)s"
    if not service_name:
        service_name = os.getenv("NAME", "census-rm-case-service")
    try:
        indent = int(os.getenv("JSON_INDENT_LOGGING"))
    except TypeError:
        indent = None
    except ValueError:
        indent = None

    def add_service(_1, _2, event_dict):
        """
        Add the service name to the event dict.
        """
        event_dict["service"] = service_name
        return event_dict

    def add_thread(_1, _2, event_dict):
        """Add the thread name to the event dict.
        """
        thread_name = threading.currentThread().getName()
        event_dict["data"] = {
            "thread_name": thread_name
        }
        return event_dict

    logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format)

    configure(
        processors=[
            add_log_level,
            filter_by_level,
            add_service,
            add_thread,
            TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
            JSONRenderer(indent=indent),
        ]
    )
Beispiel #12
0
    def __init__(self, output=None, source=None, namespace=None, service=None):
        log = wrap_logger(PrintLogger(output),
                          processors=[
                              add_timestamp,
                              order_fields,
                              JSONRenderer(),
                              render_wrapp_log,
                          ])

        if not namespace:
            try:
                frame = inspect.currentframe(1)
                namespace = frame.f_globals['__name__']
            except:
                namespace = 'unknown'

        service = service or os.environ.get('SERVICE_NAME')
        self._log = log.bind(namespace=namespace, service=service)
Beispiel #13
0
def basic_setup():
    structlog.configure(
        processors=[
            filter_by_level,  # for performance reasons
            JSONRenderer(),
        ],
        context_class=dict,
        logger_factory=LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )

    global _logger
    h = lg.StreamHandler()
    h.setFormatter(
        StructFormatter('%(asctime)s [%(process)d]: %(message)s',
                        datefmt='%d %H:%M:%S'))

    _logger = Logger()
    _logger.add_handler(h)
Beispiel #14
0
def setup(sentry: str, debug: bool = False) -> None:
    processors = [
        filter_by_level,
        add_log_level,
        add_logger_name,
        PositionalArgumentsFormatter(),
        StackInfoRenderer(),
        format_exc_info,
        UnicodeDecoder(),
    ]

    configure(
        logger_factory=LoggerFactory(),
        wrapper_class=BoundLogger,
        cache_logger_on_first_use=True,
    )

    if debug:
        styles = ConsoleRenderer.get_default_level_styles()
        styles["debug"] = DIM
        processors += [
            TimeStamper(fmt="%Y-%m-%d %H:%M:%S"),
            ConsoleRenderer(level_styles=styles),
        ]
    else:
        handler = StreamHandler()
        formatter = CustomJsonFormatter("%(levelname)s %(name)s %(message)s")
        handler.setFormatter(formatter)
        for module in ("tornado", "tortoise", "aiomysql"):
            getLogger(module).addHandler(handler)

        sentry_logging = LoggingIntegration(level=INFO, event_level=ERROR)
        init(sentry, integrations=[sentry_logging])
        processors.append(JSONRenderer())

    handler = StreamHandler()
    configure(processors=processors)
    log = get_logger("api")
    log.addHandler(handler)
    log.propagate = False
    log.setLevel(DEBUG if debug else INFO)
Beispiel #15
0
def configure_logging():
    # set up some sane logging, as opposed to what flask does by default
    log_format = "%(message)s"
    levels = {
        'CRITICAL': logging.CRITICAL,
        'ERROR': logging.ERROR,
        'WARNING': logging.WARNING,
        'INFO': logging.INFO,
        'DEBUG': logging.DEBUG,
    }
    handler = logging.StreamHandler()
    logging.basicConfig(level=levels[EQ_LOG_LEVEL],
                        format=log_format,
                        handlers=[handler])

    # turn boto logging to critical as it logs far too much and it's only used
    # for cloudwatch logging
    logging.getLogger("botocore").setLevel(logging.ERROR)
    if EQ_CLOUDWATCH_LOGGING:
        _setup_cloud_watch_logging()

    # Set werkzeug logging level
    werkzeug_logger = logging.getLogger('werkzeug')
    werkzeug_logger.setLevel(level=levels[EQ_WERKZEUG_LOG_LEVEL])

    # setup file logging
    rotating_log_file = RotatingFileHandler(filename="eq.log",
                                            maxBytes=1048576,
                                            backupCount=10)
    logging.getLogger().addHandler(rotating_log_file)
    renderer_processor = ConsoleRenderer(
    ) if EQ_DEVELOPER_LOGGING else JSONRenderer()
    processors = [
        add_log_level,
        TimeStamper(key='created', fmt='iso'), add_service, format_exc_info,
        renderer_processor
    ]
    configure(context_class=wrap_dict(dict),
              logger_factory=LoggerFactory(),
              processors=processors,
              cache_logger_on_first_use=True)
Beispiel #16
0
def logging_setup(log_level='INFO'):
    """
    Set up standard structlog logger
    Args:
        log_level: string, defined the logging level. Can be: 'INFO', 'WARNING'

    Returns:
        logger: instantiated logger
    """

    #     logging setup. Import log level from config.json

    logging.basicConfig(stream=sys.stdout,
                        format="%(message)s",
                        level=log_level)
    logger = wrap_logger(logging.getLogger(__name__),
                         processors=[
                             filter_by_level, add_timestamp,
                             JSONRenderer(indent=1, sort_keys=True)
                         ])

    return logger
Beispiel #17
0
def configure_logging():
    # set up some sane logging, as opposed to what flask does by default
    log_format = "%(message)s"
    levels = {
        'CRITICAL': logging.CRITICAL,
        'ERROR': logging.ERROR,
        'WARNING': logging.WARNING,
        'INFO': logging.INFO,
        'DEBUG': logging.DEBUG,
    }
    handler = logging.StreamHandler()
    logging.basicConfig(level=levels[EQ_LOG_LEVEL],
                        format=log_format,
                        handlers=[handler])

    # Set werkzeug logging level
    werkzeug_logger = logging.getLogger('werkzeug')
    werkzeug_logger.setLevel(level=levels[EQ_WERKZEUG_LOG_LEVEL])

    def parse_exception(_, __, event_dict):
        if EQ_DEVELOPER_LOGGING:
            return event_dict
        exception = event_dict.get('exception')
        if exception:
            event_dict['exception'] = exception.replace("\"", "'").split("\n")
        return event_dict

    # setup file logging
    renderer_processor = ConsoleRenderer(
    ) if EQ_DEVELOPER_LOGGING else JSONRenderer()
    processors = [
        add_log_level,
        TimeStamper(key='created', fmt='iso'), add_service, format_exc_info,
        parse_exception, renderer_processor
    ]
    configure(context_class=wrap_dict(dict),
              logger_factory=LoggerFactory(),
              processors=processors,
              cache_logger_on_first_use=True)
Beispiel #18
0
    def test_other_handlers_get_original_record(self, configure_for_pf,
                                                capsys):
        """
        Logging handlers that come after the handler with ProcessorFormatter
        should receive original, unmodified record.
        """
        configure_logging(None)

        handler1 = logging.StreamHandler()
        handler1.setFormatter(ProcessorFormatter(JSONRenderer()))
        handler2 = type("", (), {})()
        handler2.handle = call_recorder(lambda record: None)
        handler2.level = logging.INFO
        logger = logging.getLogger()
        logger.addHandler(handler1)
        logger.addHandler(handler2)

        logger.info("meh")

        assert 1 == len(handler2.handle.calls)
        handler2_record = handler2.handle.calls[0].args[0]
        assert "meh" == handler2_record.msg
Beispiel #19
0
def _configure_logger(level='INFO', indent=None):
    logging.basicConfig(stream=sys.stdout, level=level, format='%(message)s')

    try:
        indent = int(os.getenv('LOGGING_JSON_INDENT') or indent)
    except TypeError:
        indent = None
    except ValueError:
        indent = None

    def add_service(_, __, event_dict):
        """
        Add the service name to the event dict.
        """
        event_dict['service'] = os.getenv('NAME', 'sdc-responses-dashboard')
        return event_dict

    renderer_processor = JSONRenderer(indent=indent)
    processors = [add_log_level, filter_by_level, add_service, format_exc_info, add_logger_name,
                  TimeStamper(fmt='%Y-%m-%dT%H:%M%s', utc=True, key='created_at'), renderer_processor]
    structlog.configure(context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors,
                        cache_logger_on_first_use=True)
Beispiel #20
0
def logger_initial_config():
    def add_service(_1, _2, event_dict):
        """
        Add the service name to the event dict.
        """
        event_dict["service"] = Config.NAME
        return event_dict

    def add_log_severity(_1, method_name, event_dict):
        """
        Add the logging level to the event dict as 'severity'
        """
        if method_name == "warn":
            # The stdlib has an alias, we always want 'warning' in full
            method_name = "warning"

        if method_name == "exception":
            # exception level is not as universal, use 'error' instead
            method_name = "error"

        event_dict["severity"] = method_name
        return event_dict

    logging.basicConfig(stream=sys.stdout, level=Config.LOG_LEVEL, format="%(message)s")

    configure(
        processors=[
            add_log_level,
            add_log_severity,
            filter_by_level,
            add_service,
            TimeStamper(fmt=Config.LOG_DATE_FORMAT, utc=True, key="created_at"),
            JSONRenderer(),
        ]
    )

    logging.getLogger('pika').setLevel(Config.LOG_LEVEL_PIKA)
    logging.getLogger('paramiko').setLevel(Config.LOG_LEVEL_PARAMIKO)
def logger_initial_config():
    logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M%s")
    logger_format = "%(message)s"
    log_level = os.getenv('LOGGING_LEVEL', 'DEBUG')
    service_name = 'ras-secure-message'
    try:
        indent = int(os.getenv('JSON_INDENT_LOGGING'))
    except (TypeError, ValueError):
        indent = None

    def add_service(_1, _2, event_dict):
        """
        Add the service name to the event dict.
        """
        event_dict['service'] = service_name
        return event_dict

    def add_severity_level(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the log level to the event dict.
        """
        if method_name == "warn":
            # The stdlib has an alias
            method_name = "warning"

        event_dict["severity"] = method_name
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)

    configure(processors=[
        add_severity_level, add_log_level, filter_by_level, add_service,
        TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
        JSONRenderer(indent=indent)
    ])
Beispiel #22
0
    def test_formatter_unsets_exc_info(self, configure_for_pf, capsys, keep):
        """
        Stack traces doesn't get printed outside of the json document when
        keep_exc_info are set to False but preserved if set to True.
        """
        configure_logging(None)
        logger = logging.getLogger()

        def format_exc_info_fake(logger, name, event_dict):
            event_dict = collections.OrderedDict(event_dict)
            del event_dict["exc_info"]
            event_dict["exception"] = "Exception!"
            return event_dict

        formatter = ProcessorFormatter(
            processor=JSONRenderer(),
            keep_stack_info=keep,
            keep_exc_info=keep,
            foreign_pre_chain=[format_exc_info_fake],
        )
        logger.handlers[0].setFormatter(formatter)

        try:
            raise RuntimeError("oh noo")
        except Exception:
            logging.getLogger().exception("seen worse")

        out, err = capsys.readouterr()

        assert "" == out

        if keep is False:
            assert (
                '{"event": "seen worse", "exception": "Exception!"}\n'
            ) == err
        else:
            assert "Traceback (most recent call last):" in err
Beispiel #23
0
    async def test_integration(self, capsys):
        """
        Configure and log an actual entry.
        """

        configure(
            processors=[add_log_level, JSONRenderer()],
            logger_factory=PrintLogger,
            wrapper_class=AsyncBoundLogger,
            cache_logger_on_first_use=True,
        )

        logger = get_logger()

        await logger.bind(foo="bar").info("baz", x="42")

        assert {
            "foo": "bar",
            "x": "42",
            "event": "baz",
            "level": "info",
        } == json.loads(capsys.readouterr().out)

        reset_defaults()
def logger_initial_config(service_name=None,
                          log_level=None,
                          logger_format=None,
                          logger_date_format=None):

    if not logger_date_format:
        logger_date_format = os.getenv('LOGGING_DATE_FORMAT',
                                       "%Y-%m-%dT%H:%M%s")
    if not log_level:
        log_level = os.getenv('LOGGING_LEVEL')
    if not logger_format:
        logger_format = "%(message)s"
    if not service_name:
        service_name = os.getenv('NAME')
    try:
        indent = int(os.getenv('JSON_INDENT_LOGGING'))
    except TypeError:
        indent = None
    except ValueError:
        indent = None

    def add_service(logger, method_name, event_dict):  # pylint: disable=unused-argument
        """
        Add the service name to the event dict.
        """
        event_dict['service'] = service_name
        return event_dict

    logging.basicConfig(stream=sys.stdout,
                        level=log_level,
                        format=logger_format)
    configure(processors=[
        add_log_level, filter_by_level, add_service,
        TimeStamper(fmt=logger_date_format, utc=True, key="created_at"),
        JSONRenderer(indent=indent)
    ])
Beispiel #25
0
    def test_formatter_unsets_stack_info(self, configure_for_pf, capsys, keep):
        """
        Stack traces doesn't get printed outside of the json document when
        keep_stack_info are set to False but preserved if set to True.
        """
        configure_logging(None)
        logger = logging.getLogger()

        formatter = ProcessorFormatter(
            processor=JSONRenderer(),
            keep_stack_info=keep,
            keep_exc_info=keep,
            foreign_pre_chain=[],
        )
        logger.handlers[0].setFormatter(formatter)

        logging.getLogger().warn("have a stack trace", stack_info=True)

        out, err = capsys.readouterr()
        assert "" == out
        if keep is False:
            assert 1 == err.count("Stack (most recent call last):")
        else:
            assert 2 == err.count("Stack (most recent call last):")
Beispiel #26
0
def get_logger(name="blackopt", logdir=LOGDIR, **initial_values):
    path = os.path.join(get_rootdir(), logdir, name)

    os.makedirs(os.path.dirname(path), exist_ok=True)

    def rotating_file_handler(log_path):
        return RotatingFileHandler(
            log_path,
            mode="a",
            maxBytes=BACKUP_COUNT * MAX_FILE_SIZE,
            backupCount=BACKUP_COUNT,
        )

    file_hdlr = rotating_file_handler(path)
    if os.path.exists(path):
        try:
            if os.stat(path).st_size != 0:
                file_hdlr.doRollover()
        except FileNotFoundError:
            # Rollover failed. prefer to overwrite old log instead terminating Moonfish.
            file_hdlr = rotating_file_handler(path)

    logger = logging.getLogger(name)
    logger.addHandler(file_hdlr)
    logger.setLevel(logging.INFO)

    log = wrap_logger(
        logger,
        processors=[
            # filter_by_level,
            TimeStamper(fmt="iso"),
            JSONRenderer(sort_keys=True),
        ],
        **initial_values)

    return log
Beispiel #27
0
from structlog import wrap_logger
from structlog.processors import JSONRenderer

import luigi
from luigi.local_target import LocalFileSystem

from wagl.acquisition import acquisitions
from wagl.singlefile_workflow import DataStandardisation
from tesp.package import package, PATTERN2, ARD
from tesp.constants import ProductPackage

from eugl.fmask import fmask

ERROR_LOGGER = wrap_logger(logging.getLogger('errors'),
                           processors=[JSONRenderer(indent=1, sort_keys=True)])
STATUS_LOGGER = wrap_logger(
    logging.getLogger('status'),
    processors=[JSONRenderer(indent=1, sort_keys=True)])
INTERFACE_LOGGER = logging.getLogger('luigi-interface')


@luigi.Task.event_handler(luigi.Event.FAILURE)
def on_failure(task, exception):
    """Capture any Task Failure here."""
    ERROR_LOGGER.error(task=task.get_task_family(),
                       params=task.to_str_params(),
                       level1=getattr(task, 'level1', ''),
                       exception=exception.__str__(),
                       traceback=traceback.format_exc().splitlines())
Beispiel #28
0
        pytz.utc).isoformat()
    event_dict['level'] = level

    if session:
        event_dict['session_id'] = session.get('session_id')

    if request:
        try:
            event_dict['ip_address'] = request.headers[
                'X-Forwarded-For'].split(',')[0].strip()
        except:
            event_dict['ip_address'] = 'unknown'

    return event_dict


# Add a handler to write log messages to a file
if app.config.get('LOG_FILE'):
    file_handler = RotatingFileHandler(filename=app.config['LOG_FILENAME'],
                                       maxBytes=app.config['LOG_MAXBYTES'],
                                       backupCount=app.config['LOG_BACKUPS'],
                                       mode='a',
                                       encoding='utf-8')
    file_handler.setLevel(logging.DEBUG)
    app.logger.addHandler(file_handler)

# Wrap the application logger with structlog to format the output
logger = wrap_logger(app.logger,
                     processors=[add_fields,
                                 JSONRenderer(indent=None)])
Beispiel #29
0
                                  KeyValueRenderer)
from tzlocal import get_localzone

from . import DEBUG
from .middleware import CORS, TrailingSlash
from .storage import Store

try:
    from logging.config import dictConfig
except ImportError:
    from logutils.dictconfig import dictConfig

if DEBUG:
    processors = (format_exc_info, KeyValueRenderer())
else:
    processors = (format_exc_info, JSONRenderer())

logger = structlog.get_logger()

default_conf = {
    'search_index':
    '/srv/graphite/index',
    'finders': [
        'graphite_api.finders.whisper.WhisperFinder',
    ],
    'functions': [
        'graphite_api.functions.SeriesFunctions',
        'graphite_api.functions.PieFunctions',
    ],
    'whisper': {
        'directories': [
 def handleLogRecord(self, record):
     logger = wrap_logger(
         logging.getLogger(__name__),
         processors=[add_timestamp,
                     JSONRenderer(sort_keys=True)])
     logger.info(record)
Beispiel #31
0
    def __init__(self, message, case_number):
        #Logging setup for json only
        self.message = message
        self.timestamp = int(time.time())
        self.datetime = datetime.utcnow().isoformat()
        self.desc = "AWS_IR Action"

        configure(processors=[JSONRenderer(indent=1, sort_keys=True)],
                  context_class=structlog.threadlocal.wrap_dict(dict),
                  logger_factory=structlog.stdlib.LoggerFactory())

        self.log = get_logger('aws_ir.json')
        event = ReturnLogger().msg('message',
                                   message=self.message,
                                   timestamp=self.timestamp,
                                   datetime=self.datetime,
                                   desc=self.desc)

        def generate_log_filename(case_number):
            filename = ("/tmp/{case_number}-aws_ir.log").format(
                case_number=case_number)
            return filename

        def log_file_exists():
            exists = os.path.isfile(self.logfile)
            return exists

        self.logfile = generate_log_filename(case_number)

        def file_len(fname):
            i = 0
            with open(fname) as f:
                for i, l in enumerate(f):
                    pass
            return i + 1

        def log_file_contains_events(logfile):
            length = file_len(logfile)
            if length >= 2:
                return True
            else:
                return False

        def stub_ts_file(logfile):
            with open(logfile, "w") as f:
                f.write("[ \n")
                f.close()

        def write_log_event(event):
            logfile = self.logfile
            if log_file_exists():
                if (log_file_contains_events(logfile) == True):
                    f = open(logfile)
                    lines = f.readlines()
                    f.close()
                    with open(logfile, 'w') as w:
                        w.writelines([item for item in lines[:-1]])
                        w.write("\t" + str(event) + ",")
                        w.write("\n")
                        w.write("]")
                        w.close()
                else:
                    stub_ts_file(logfile)
                    with open(logfile, 'a') as w:
                        w.write("\t" + str(event) + ",")
                        w.write("\n")
                        w.write("]")
                        w.close()
            else:
                stub_ts_file(logfile)
                with open(logfile, 'a') as w:
                    w.write("\t" + str(event) + ",")
                    w.write("\n")
                    w.write("]")
                    w.close()

        write_log_event(event[1])
from tqdm import tqdm

from audioutils.parallel import do_parallel_with_pbar
from audioutils.io import get_and_make_artist_and_album_dirs
from audioutils.conversion import convert_album

logfile_name = 'manage_player_log_{}.txt'.format(datetime.datetime.now())
logfile_path = os.path.join(pathlib.Path.home(), '.audioutils', 'logs',
                            logfile_name)
logging.basicConfig(filename=logfile_path, level=logging.DEBUG)

from structlog.stdlib import LoggerFactory
from structlog.processors import (JSONRenderer, TimeStamper)

structlog.configure(logger_factory=LoggerFactory(),
                    processors=[TimeStamper(), JSONRenderer()])

LOGGER = structlog.get_logger()


def convert_and_save_album(source, target, target_format, bitrate,
                           copy_non_sound, num_processes):

    print('Inside convert_and_save_album', file=sys.stderr)
    (artist, album) = source.split('/')[-2:]
    (artist_dir,
     album_dir) = get_and_make_artist_and_album_dirs(artist, album, target)
    print('Calling convert_album', file=sys.stderr)
    convert_album(album_dir, target_format, source, bitrate, copy_non_sound,
                  num_processes)