Пример #1
0
def init_app(app):

    # Output logs on stderr
    fmt = '{record.channel}: {record.message}'
    stderr = logbook.StderrHandler(format_string=fmt)
    stderr.push_application()

    def logbook_factory(*args, **kwargs):
        # Logger given to structlog
        level = app.debug and logbook.DEBUG or logbook.INFO
        return logbook.Logger(level=level, *args, **kwargs)

    # Setup structlog over logbook
    processors = [
        # structlog.stdlib.filter_by_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.StackInfoRenderer(),
        # structlog.processors.format_exc_info,
    ]

    # send to mozdef before formatting into a string
    mozdef = app.config.get('MOZDEF_TARGET', None)
    if mozdef:
        processors.append(mozdef)

    processors.append(UnstructuredRenderer())

    structlog.configure(
        context_class=structlog.threadlocal.wrap_dict(dict),
        processors=processors,
        logger_factory=logbook_factory,
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
Пример #2
0
def setup_logging(config):
    """Setup structured logging, and emit `request.summary` event on each
    request, as recommanded by Mozilla Services standard:

    * https://mana.mozilla.org/wiki/display/CLOUDSERVICES/Logging+Standard
    * http://12factor.net/logs
    """
    settings = config.get_settings()

    renderer_klass = config.maybe_dotted(settings['cliquet.logging_renderer'])
    renderer = renderer_klass(settings)

    structlog.configure(
        # Share the logger context by thread.
        context_class=structlog.threadlocal.wrap_dict(dict),
        # Integrate with Pyramid logging facilities.
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        # Setup logger output format.
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.processors.format_exc_info,
            renderer,
        ])

    def on_new_request(event):
        request = event.request
        # Save the time the request was received by the server.
        event.request._received_at = utils.msec_time()

        # New logger context, with infos for request summary logger.
        logger.new(agent=request.headers.get('User-Agent'),
                   path=event.request.path,
                   method=request.method,
                   querystring=dict(request.GET),
                   uid=request.authenticated_userid,
                   lang=request.headers.get('Accept-Language'),
                   auth_type=getattr(request, 'auth_type', None),
                   errno=None)

    config.add_subscriber(on_new_request, NewRequest)

    def on_new_response(event):
        response = event.response
        request = event.request

        # Compute the request processing time in msec (-1 if unknown)
        current = utils.msec_time()
        duration = current - getattr(request, '_received_at', current - 1)
        isotimestamp = datetime.fromtimestamp(current/1000).isoformat()

        # Bind infos for request summary logger.
        logger.bind(time=isotimestamp,
                    code=response.status_code,
                    t=duration)

        # Ouput application request summary.
        logger.info('request.summary')

    config.add_subscriber(on_new_response, NewResponse)
Пример #3
0
def run():
    logging.basicConfig(format=settings.LOGGING_FORMAT,
                        datefmt="%Y-%m-%dT%H:%M:%S",
                        level=settings.LOGGING_LEVEL)

    logging.getLogger('sdc.rabbit').setLevel(logging.INFO)

    # These structlog settings allow bound fields to persist between classes
    structlog.configure(logger_factory=LoggerFactory(), context_class=wrap_dict(dict))
    logger = structlog.getLogger()

    logger.info('Starting SDX Downstream', version=__version__)

    message_processor = MessageProcessor()

    quarantine_publisher = QueuePublisher(
        urls=settings.RABBIT_URLS,
        queue=settings.RABBIT_QUARANTINE_QUEUE
    )

    message_consumer = MessageConsumer(
        durable_queue=True,
        exchange=settings.RABBIT_EXCHANGE,
        exchange_type='topic',
        rabbit_queue=settings.RABBIT_QUEUE,
        rabbit_urls=settings.RABBIT_URLS,
        quarantine_publisher=quarantine_publisher,
        process=message_processor.process
    )

    try:
        message_consumer.run()
    except KeyboardInterrupt:
        message_consumer.stop()
Пример #4
0
def configure(config_string='', log_json=False):
    # configure structlog
    processors = [
        log_listeners,  # before level filtering
        structlog.stdlib.filter_by_level,
        structlog.processors.StackInfoRenderer()
    ]
    if log_json:
        processors.append(JSONRenderer(sort_keys=True))
    else:
        processors.extend([
            structlog.processors.ExceptionPrettyPrinter(file=None),
            KeyValueRenderer(sort_keys=True, key_order=None)
        ])
    structlog.configure(
        processors=processors,
        context_class=dict,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=BoundLoggerTrace,
        # later calls on configure() dont have any effect on already cached loggers
        cache_logger_on_first_use=True,
    )
    # configure standard logging
    if log_json:
        format = JSON_FORMAT
    else:
        format = PRINT_FORMAT
    setup_stdlib_logging(DEFAULT_LOGLEVEL, format)
    if config_string:
        configure_loglevels(config_string)
Пример #5
0
Файл: runner.py Проект: iffy/ppo
def run():
    args = ap.parse_args()

    if args.ls:
        print('\n'.join(parser.listPluginNames()))
        sys.exit(0)

    if args.verbose:
        structlog.configure(logger_factory=structlog.PrintLoggerFactory(sys.stderr))

    infile = BytesIO(sys.stdin.read())
    try:
        parsed = parse(infile, exclude=args.exclude)
    except NoWillingParsers:
        if args.strict:
            raise
        else:
            infile.seek(0)
            sys.stdout.write(infile.read())
            sys.exit(0)

    if args.format == 'yaml':
        print(yaml.safe_dump(parsed, default_flow_style=False))
    elif args.format == 'json':
        print(json.dumps(parsed))
    elif args.format == 'grep':
        from ppo.output import giganticGrep
        giganticGrep(parsed, sys.stdout)
Пример #6
0
def configure_logging(config):
    structlog.configure(processors=processors,
                        logger_factory=structlog.stdlib.LoggerFactory(),
                        wrapper_class=structlog.stdlib.BoundLogger,
                        cache_logger_on_first_use=True)
    config.setdefault('logging', {})
    config['logging'].setdefault('version', 1)
    config['logging'].setdefault('handlers', {})
    config['logging'].setdefault('formatters', {})
    config['logging'].setdefault('loggers', {})
    config['logging']['handlers'].setdefault('raw', {
        'level': 'DEBUG',
        'class': 'logging.StreamHandler',
        'formatter': 'raw',
    })
    config['logging']['loggers'].setdefault('root', {
        'handlers': ['raw'],
        'level': 'DEBUG',
        'propagate': False,
    })
    config['logging']['loggers'].setdefault('graphite_api', {
        'handlers': ['raw'],
        'level': 'DEBUG',
    })
    config['logging']['formatters']['raw'] = {'()': StructlogFormatter}
    dictConfig(config['logging'])
    if 'path' in config:
        logger.info("loading configuration", path=config['path'])
    else:
        logger.info("loading default configuration")
Пример #7
0
def update_logging(instance_id, vcore_id):
    """
    Add the vcore id to the structured logger
    :param vcore_id:  The assigned vcore id
    :return: structure logger
    """
    def add_exc_info_flag_for_exception(_, name, event_dict):
        if name == 'exception':
            event_dict['exc_info'] = True
        return event_dict

    def add_instance_id(_, __, event_dict):
        event_dict['instance_id'] = instance_id
        return event_dict

    def add_vcore_id(_, __, event_dict):
        event_dict['vcore_id'] = vcore_id
        return event_dict

    processors = [
        add_exc_info_flag_for_exception,
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        add_instance_id,
        add_vcore_id,
        FluentRenderer(),
    ]
    structlog.configure(processors=processors)

    # Mark first line of log
    log = structlog.get_logger()
    log.info("updated-logger")
    return log
Пример #8
0
def global_setup(config):
    """Perform global cofiguration. In a given process, this should only
    ever be called with a single configuration instance. Doing otherwise
    will result in a runtime exception.
    """
    global _global_config
    if _global_config is None:
        _global_config = config
        # this breaks with unicode :(
        connection.setup([str(v) for v in config.CASSANDRA_CLUSTER],
                         consistency=config.CASSANDRA_CONSISTENCY)

        processors = [
            _capture_stack_trace,
            _format_event,
        ]

        if config.PRETTY_LOGGING:
            processors.append(structlog.processors.ExceptionPrettyPrinter())
            processors.append(structlog.processors.KeyValueRenderer())
        else:
            processors.append(structlog.processors.JSONRenderer())

        structlog.configure(
            processors=processors
        )
    elif _global_config != config:
        raise Exception('global_setup called twice with different '
                        'configurations')
Пример #9
0
def configure_logging(app):
    if app.config.get('JSON_STRUCTURED_LOGGING'):
        processors = [
            structlog.stdlib.filter_by_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            mozdef_format,
            structlog.processors.JSONRenderer()
        ]
    else:
        processors = [
            structlog.stdlib.filter_by_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            UnstructuredRenderer()
        ]

    if app.config.get('JSON_STRUCTURED_LOGGING') and stdout_log:
        # structlog has combined all of the interesting data into the
        # (JSON-formatted) message, so only log that
        stdout_log.setFormatter(logging.Formatter('%(message)s'))

    structlog.configure(
        context_class=structlog.threadlocal.wrap_dict(dict),
        processors=processors,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
Пример #10
0
def get_tiger():
    """
    Sets up logging and returns a new tasktiger instance.
    """
    structlog.configure(
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
    )
    logging.basicConfig(format='%(message)s')
    conn = redis.Redis(db=TEST_DB)
    tiger = TaskTiger(connection=conn, config={
        # We need this 0 here so we don't pick up scheduled tasks when
        # doing a single worker run.
        'SELECT_TIMEOUT': 0,

        'LOCK_RETRY': DELAY*2.,

        'DEFAULT_RETRY_METHOD': fixed(DELAY, 2),

        'BATCH_QUEUES': {
            'batch': 3,
        }
    })
    tiger.log.setLevel(logging.CRITICAL)
    return tiger
Пример #11
0
    def test_structlog_processor(self):
        try:
            # Use ReturnLogger for testing
            structlog.configure(
                processors=[tasktiger_processor],
                context_class=dict,
                logger_factory=structlog.ReturnLoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
                cache_logger_on_first_use=True,
            )

            # Run a simple task. Logging output is verified in
            # the task.
            self.tiger.delay(logging_task)
            queues = self._ensure_queues(queued={"default": 1})
            task = queues["queued"]["default"][0]
            assert task["func"] == "tests.test_logging:logging_task"
            Worker(self.tiger).run(once=True)
            self._ensure_queues(queued={"default": 0})
            assert not self.conn.exists("t:task:%s" % task["id"])
        finally:
            structlog.configure(
                processors=[
                    structlog.stdlib.add_log_level,
                    structlog.stdlib.filter_by_level,
                    structlog.processors.TimeStamper(fmt="iso", utc=True),
                    structlog.processors.StackInfoRenderer(),
                    structlog.processors.format_exc_info,
                    structlog.processors.JSONRenderer(),
                ],
                context_class=dict,
                logger_factory=structlog.ReturnLoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
                cache_logger_on_first_use=True,
            )
Пример #12
0
def configure_logging(logging_levels, plain=False):

    _remove_all_existing_log_handlers()

    renderer = (
        PlainRenderer() if plain else
        structlog.processors.JSONRenderer())

    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_logger_name,
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            renderer
        ],
        context_class=dict,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )

    handler = logging.StreamHandler(sys.stdout)
    root_logger = logging.getLogger()
    root_logger.addHandler(handler)

    for logger, level in logging_levels.items():

        if logger.lower() == 'root':
            logger = ''

        logging.getLogger(logger).setLevel(level.upper())
Пример #13
0
def run():
    structlog.configure(
        processors=[
            structlog.processors.StackInfoRenderer(),
            structlog.twisted.JSONRenderer()
        ],
        context_class=dict,
        logger_factory=structlog.twisted.LoggerFactory(),
        wrapper_class=structlog.twisted.BoundLogger,
        cache_logger_on_first_use=True,
    )
    # grab all of the events that are dispatched to stdlib logger
    # new relic uses this.
    handler = logging.StreamHandler(sys.stdout)
    root_logger = logging.getLogger()
    root_logger.addHandler(handler)

    # start the twisted logger
    twLog.startLogging(sys.stdout)
    # api is the WSGI resource returned by Falcon.
    api = falcon.API()
    api.add_route('/quote', QuoteResource())

    app = newrelic.agent.WSGIApplicationWrapper(api)
    resource = WSGIResource(reactor, reactor.getThreadPool(), app)
    site = Site(resource)

    reactor.listenTCP(port=8713, factory=site)
    reactor.run()
Пример #14
0
def init_logger(level):
    logging.basicConfig()
    formatter = logging.Formatter('%(message)s')
    handler = logging.StreamHandler(sys.stdout)
    handler.setFormatter(formatter)

    root_logger = logging.getLogger()
    root_logger.removeHandler(root_logger.handlers[0])
    root_logger.setLevel(level)
    root_logger.addHandler(handler)

    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            structlog.processors.KeyValueRenderer()
        ],
        context_class=dict,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
Пример #15
0
def get_tiger():
    """
    Sets up logging and returns a new tasktiger instance.
    """
    structlog.configure(
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
    )
    logging.basicConfig(format='%(message)s')
    conn = redis.Redis(db=TEST_DB, decode_responses=True)
    tiger = TaskTiger(connection=conn, config={
        # We need this 0 here so we don't pick up scheduled tasks when
        # doing a single worker run.
        'SELECT_TIMEOUT': 0,

        'ACTIVE_TASK_UPDATE_TIMEOUT': 2 * DELAY,

        'REQUEUE_EXPIRED_TASKS_INTERVAL': DELAY,

        'LOCK_RETRY': DELAY * 2.,

        'DEFAULT_RETRY_METHOD': fixed(DELAY, 2),

        'BATCH_QUEUES': {
            'batch': 3,
        },

        'SINGLE_WORKER_QUEUES': ['swq'],
    })
    tiger.log.setLevel(logging.CRITICAL)
    return tiger
Пример #16
0
def init_logger(project_name,
                channel=None,
                level=logbook.INFO,
                handler=None,
                PAPERTRAIL_HOST=None,
                PAPERTRAIL_PORT=None,
                SENTRY_DSN=None,
                MOZDEF=None,
                flask_app=None,
                timestamp=False,
                ):

    if not channel:
        channel = os.environ.get('APP_CHANNEL')

    if channel and channel not in CHANNELS:
        raise Exception('Initilizing logging with channel `{}`. It should be one of: {}'.format(channel, ', '.join(CHANNELS)))

    # By default output logs on stderr
    if handler is None:
        fmt = '{record.channel}: {record.message}'
        handler = logbook.StderrHandler(level=level, format_string=fmt)

    handler.push_application()

    # Log to papertrail
    if channel and PAPERTRAIL_HOST and PAPERTRAIL_PORT:
        setup_papertrail(project_name, channel, PAPERTRAIL_HOST, PAPERTRAIL_PORT)

    # Log to sentry
    if channel and SENTRY_DSN:
        setup_sentry(project_name, channel, SENTRY_DSN, flask_app)

    def logbook_factory(*args, **kwargs):
        # Logger given to structlog
        logbook.compat.redirect_logging()
        return logbook.Logger(level=level, *args, **kwargs)

    # Setup structlog over logbook
    processors = [
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]
    if timestamp is True:
        processors.append(structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S'))

    # send to mozdef before formatting into a string
    if channel and MOZDEF:
        processors.append(setup_mozdef(project_name, channel, MOZDEF))

    processors.append(UnstructuredRenderer())

    structlog.configure(
        context_class=structlog.threadlocal.wrap_dict(dict),
        processors=processors,
        logger_factory=logbook_factory,
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
Пример #17
0
def configure_logging(log_format, utc, endpoint):
    processors = [
        TimeStamper(
            key='@timestamp',
            utc=utc,
        ),
    ]
    if endpoint.startswith('file://'):
        path = endpoint[7:]
        if path == '/dev/stdout':
            stream = sys.stdout
        elif path == '/dev/stderr':
            stream = sys.stderr
        else:
            stream = open(path, 'w')
        logger_factory = structlog.PrintLoggerFactory(file=stream)
        if log_format == 'kv':
            processors.append(structlog.processors.KeyValueRenderer(
                sort_keys=True,
                key_order=['@timestamp', 'event'],
            ))
        else:
            processors.append(structlog.processors.JSONRenderer(
                sort_keys=True,
            ))
    elif endpoint.startswith('fluent://'):
        utc = True
        logger_factory = FluentLoggerFactory.from_url(endpoint)
    else:
        raise ValueError('Invalid logging endpoint "%s".' % endpoint)
    structlog.configure(
        processors=processors,
        logger_factory=logger_factory,
    )
Пример #18
0
def get_logger(level=logging.DEBUG, name=None, stream=DEFAULT_STREAM):
    """Configure and return a logger with structlog and stdlib."""
    wrap_dict_class = structlog.threadlocal.wrap_dict(dict)
    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_log_level,
            structlog.stdlib.add_logger_name,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            structlog.processors.JSONRenderer(sort_keys=True)
        ],
        context_class=wrap_dict_class,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True)
    log = structlog.get_logger(name)
    if not _has_streamhandler(logging.getLogger(name),
                              level=level, stream=stream):
        streamhandler = logging.StreamHandler(stream)
        streamhandler.setLevel(level)
        streamhandler.setFormatter(logging.Formatter(fmt=LOG_FORMAT))
        log.addHandler(streamhandler)
    log.setLevel(level)
    return log
Пример #19
0
 def test_format_event_basic(self):
     processors = [teeth_overlord.service._format_event,
                   _return_event_processor]
     structlog.configure(processors=processors)
     log = structlog.wrap_logger(structlog.ReturnLogger())
     logged_msg = log.msg("hello {word}", word='world')
     self.assertEqual(logged_msg, "hello world")
Пример #20
0
def configure_structlog():
    """
    Make structlog comply with all of our options.
    """
    from django.conf import settings
    import logging
    import structlog
    from sentry import options
    from sentry.logging import LoggingFormat
    WrappedDictClass = structlog.threadlocal.wrap_dict(dict)
    kwargs = {
        'context_class': WrappedDictClass,
        'wrapper_class': structlog.stdlib.BoundLogger,
        'cache_logger_on_first_use': True,
        'processors': [
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.format_exc_info,
            structlog.processors.StackInfoRenderer(),
        ]
    }

    fmt_from_env = os.environ.get('SENTRY_LOG_FORMAT')
    if fmt_from_env:
        settings.SENTRY_OPTIONS['system.logging-format'] = fmt_from_env.lower()

    fmt = options.get('system.logging-format')

    if fmt == LoggingFormat.HUMAN:
        from sentry.logging.handlers import HumanRenderer
        kwargs['processors'].extend([
            structlog.processors.ExceptionPrettyPrinter(),
            HumanRenderer(),
        ])
    elif fmt == LoggingFormat.MACHINE:
        from sentry.logging.handlers import JSONRenderer
        kwargs['processors'].append(JSONRenderer())

    structlog.configure(**kwargs)

    lvl = os.environ.get('SENTRY_LOG_LEVEL')

    if lvl and lvl not in logging._levelNames:
        raise AttributeError('%s is not a valid logging level.' % lvl)

    settings.LOGGING['root'].update({
        'level': lvl or settings.LOGGING['default_level']
    })

    if lvl:
        for logger in settings.LOGGING['overridable']:
            try:
                settings.LOGGING['loggers'][logger].update({
                    'level': lvl
                })
            except KeyError:
                raise KeyError('%s is not a defined logger.' % logger)

    logging.config.dictConfig(settings.LOGGING)
Пример #21
0
def init_logging(log_dir, name):
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    logging.config.dictConfig({
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'plain': {
                '()': ProcessorFormatter,
                'processor': structlog.dev.ConsoleRenderer(), # TODO: ConsoleRenderer without coloring
            },
            'colored': {
                '()': ProcessorFormatter,
                'processor': structlog.dev.ConsoleRenderer(),
            },
        },
        'handlers': {
            'default': {
                'level': 'DEBUG',
                'class': 'logging.StreamHandler',
                'formatter': 'colored',
            },
            'file': {
                'level': 'DEBUG',
                'class': 'logging.handlers.WatchedFileHandler',
                'filename': os.path.join(log_dir, '{}.log'.format(name)),
                'formatter': 'plain',
            },
        },
        'loggers': {
            '': {
                'handlers': ['default', 'file'],
                'level': 'DEBUG',
                'propagate': True,
            },
            'asyncio': {
                'propagate': False,
            },
            'telnetlib3': {
                'propagate': False,
            },
        }
    })
    structlog.configure(
        processors=[
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='%Y-%m-%d %H:%M:%S'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            event_dict_to_message,
        ],
        context_class=dict,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
    sys.excepthook = uncaught_exception
Пример #22
0
 def test_no_format_keys(self):
     """Check that we get an exception if you don't provide enough keys to
     format a log message requiring format
     """
     processors = [teeth_overlord.service._format_event,
                   _return_event_processor]
     structlog.configure(processors=processors)
     log = structlog.wrap_logger(structlog.ReturnLogger())
     self.assertRaises(KeyError, log.msg, "hello {word}")
    def setUp(self):
        logging.basicConfig(format=settings.LOGGING_FORMAT,
                            datefmt="%Y-%m-%dT%H:%M:%S",
                            level=settings.LOGGING_LEVEL)

        logging.getLogger('sdc.rabbit').setLevel(logging.INFO)

        structlog.configure(logger_factory=LoggerFactory(), context_class=wrap_dict(dict))
        self.message_processor = MessageProcessor()
Пример #24
0
def raise_on_logger_msg():
  def proc(logger, method_name, event_dict):
    if method_name in ('warning', 'error'):
      if 'exc_info' in event_dict:
        raise event_dict['exc_info']
      if not event_dict['event'].startswith(('rate limited', 'no-result')):
        raise RuntimeError(event_dict['event'])
    return event_dict['event']

  structlog.configure([proc])
Пример #25
0
def includeme(config):
    structlog.configure(
        processors=[
            structlog.processors.KeyValueRenderer(
                key_order=['event', 'request_id'],
            ),
        ],
        context_class=structlog.threadlocal.wrap_dict(dict),
        logger_factory=structlog.stdlib.LoggerFactory(),
    )
    def setUp(self):
        logging.basicConfig(format=settings.LOGGING_FORMAT,
                            datefmt="%Y-%m-%dT%H:%M:%S",
                            level=settings.LOGGING_LEVEL)

        logging.getLogger('sdc.rabbit').setLevel(logging.INFO)

        structlog.configure(logger_factory=LoggerFactory(), context_class=wrap_dict(dict))
        survey = json.loads(cora_survey)
        self.processor = CoraProcessor(survey, ftpconn)
        self.processor.ftp.unzip_and_deliver = MagicMock(return_value=True)
Пример #27
0
def setup_logging(log_config, instance_id, verbosity_adjust=0, fluentd=None):
    """
    Set up logging such that:
    - The primary logging entry method is structlog
      (see http://structlog.readthedocs.io/en/stable/index.html)
    - By default, the logging backend is Python standard lib logger
    - Alternatively, fluentd can be configured with to be the backend,
      providing direct bridge to a fluent logging agent.
    """

    def add_exc_info_flag_for_exception(_, name, event_dict):
        if name == 'exception':
            event_dict['exc_info'] = True
        return event_dict

    def add_instance_id(_, __, event_dict):
        event_dict['instance_id'] = instance_id
        return event_dict

    # if fluentd is specified, we need to override the config data with
    # its host and port info
    if fluentd is not None:
        fluentd_host = fluentd.split(':')[0].strip()
        fluentd_port = int(fluentd.split(':')[1].strip())

        handlers = log_config.get('handlers', None)
        if isinstance(handlers, dict):
            for _, defs in handlers.iteritems():
                if isinstance(defs, dict):
                    if defs.get('class', '').endswith('FluentHandler'):
                        defs['host'] = fluentd_host
                        defs['port'] = fluentd_port

    # Configure standard logging
    logging.config.dictConfig(log_config)
    logging.root.level -= 10 * verbosity_adjust

    processors = [
        add_exc_info_flag_for_exception,
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        add_instance_id,
        FluentRenderer(),
    ]
    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
                        context_class=PlainRenderedOrderedDict,
                        wrapper_class=BoundLogger,
                        processors=processors)

    # Mark first line of log
    log = structlog.get_logger()
    log.info("first-line")
    return log
Пример #28
0
def configure():
    structlog.configure(
        processors=[
            structlog.processors.StackInfoRenderer(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.twisted.JSONRenderer(),
        ],
        context_class=dict,
        logger_factory=structlog.twisted.LoggerFactory(),
        wrapper_class=structlog.twisted.BoundLogger,
        cache_logger_on_first_use=True,
    )
Пример #29
0
def configure():
    """Configure Balanced logging system

    """
    structlog.configure(
        processors=[
            format_exc_info,
            LogProcessor(),
            JSONRenderer(),
        ],
        logger_factory=LoggerFactory(),
    )
Пример #30
0
def includeme(config):
    # Configure the standard library logging
    logging.config.dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {
            "structlog": {
                "()": "warehouse.logging.StructlogFormatter",
            },
        },
        "handlers": {
            "primary": {
                "class": "logging.StreamHandler",
                "stream": "ext://sys.stdout",
                "formatter": "structlog",
            },
            "sentry": {
                "class": "raven.handlers.logging.SentryHandler",
                "level": "ERROR",
                "dsn": config.registry.settings.get("sentry.dsn"),
                "release": config.registry.settings.get("warehouse.commit"),
                "transport": config.registry.settings.get("sentry.transport"),
            },
        },
        "root": {
            "level": config.registry.settings.get("logging.level", "INFO"),
            "handlers": ["primary", "sentry"],
        },
    })

    # Configure structlog
    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_logger_name,
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            RENDERER,
        ],
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )

    # Give every request a unique identifier
    config.add_request_method(_create_id, name="id", reify=True)

    # Add a log method to every request.
    config.add_request_method(_create_logger, name="log", reify=True)
Пример #31
0
from buildscripts.evergreen_burn_in_tests import GenerateConfig, DEFAULT_PROJECT, CONFIG_FILE, \
    EvergreenFileChangeDetector
from buildscripts.resmokelib.suitesconfig import get_named_suites_with_root_level_key
from buildscripts.task_generation.evg_config_builder import EvgConfigBuilder
from buildscripts.task_generation.gen_config import GenerationConfiguration
from buildscripts.task_generation.generated_config import GeneratedConfiguration
from buildscripts.task_generation.multiversion_util import MultiversionUtilService
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyConfig
from buildscripts.task_generation.suite_split import SuiteSplitConfig, SuiteSplitParameters
from buildscripts.task_generation.suite_split_strategies import SplitStrategy, greedy_division, \
    FallbackStrategy, round_robin_fallback
from buildscripts.task_generation.task_types.gentask_options import GenTaskOptions
from buildscripts.task_generation.task_types.multiversion_tasks import MultiversionGenTaskParams
from buildscripts.util.cmdutils import enable_logging

structlog.configure(logger_factory=LoggerFactory())
LOGGER = structlog.getLogger(__name__)

MULTIVERSION_CONFIG_KEY = gen_multiversion.MULTIVERSION_CONFIG_KEY
MULTIVERSION_PASSTHROUGH_TAG = gen_multiversion.PASSTHROUGH_TAG
BURN_IN_MULTIVERSION_TASK = gen_multiversion.BURN_IN_TASK
DEFAULT_CONFIG_DIR = "generated_resmoke_config"
DEFAULT_TEST_SUITE_DIR = os.path.join("buildscripts", "resmokeconfig",
                                      "suites")


def filter_list(item: str, input_list: List[str]) -> bool:
    """
    Filter to determine if the given item is in the given list.

    :param item: Item to search for.
Пример #32
0
    def __init__(self, connection=None, config=None, setup_structlog=False):
        """
        Initializes TaskTiger with the given Redis connection and config
        options. Optionally sets up structlog.
        """

        self.config = {
            # String that is used to prefix all Redis keys
            'REDIS_PREFIX': 't',

            # Name of the Python (structlog) logger
            'LOGGER_NAME': 'tasktiger',

            # Where to queue tasks that don't have an explicit queue
            'DEFAULT_QUEUE': 'default',

            # After how many seconds time out on listening on the activity
            # channel and check for scheduled or expired items.
            'SELECT_TIMEOUT': 1,

            # If this is True, all tasks except future tasks (when=a future
            # time) will be executed locally by blocking until the task
            # returns. This is useful for testing purposes.
            'ALWAYS_EAGER': False,

            # If retry is True but no retry_method is specified for a given
            # task, use the following default method.
            'DEFAULT_RETRY_METHOD': fixed(60, 3),

            # After how many seconds a task that can't require a lock is
            # retried.
            'LOCK_RETRY': 1,

            # How many items to move at most from the scheduled queue to the
            # active queue.
            'SCHEDULED_TASK_BATCH_SIZE': 1000,

            # After how many seconds a long-running task is killed. This can be
            # overridden by the task or at queue time.
            'DEFAULT_HARD_TIMEOUT': 300,

            # The timer specifies how often the worker updates the task's
            # timestamp in the active queue. Tasks exceeding the timeout value
            # are requeued. Note that no delay is necessary before the retry
            # since this condition happens when the worker crashes, and not
            # when there is an exception in the task itself.
            'ACTIVE_TASK_UPDATE_TIMER': 10,
            'ACTIVE_TASK_UPDATE_TIMEOUT': 60,
            'ACTIVE_TASK_EXPIRED_BATCH_SIZE': 10,

            # Set up queues that will be processed in batch, i.e. multiple jobs
            # are taken out of the queue at the same time and passed as a list
            # to the worker method. Takes a dict where the key represents the
            # queue name and the value represents the batch size. Note that the
            # task needs to be declared as batch=True. Also note that any
            # subqueues will be automatically treated as batch queues, and the
            # batch value of the most specific subqueue name takes precedence.
            'BATCH_QUEUES': {},

            # How often to print stats.
            'STATS_INTERVAL': 60,
        }
        if config:
            self.config.update(config)

        self.connection = connection or redis.Redis()
        self.scripts = RedisScripts(self.connection)

        if setup_structlog:
            structlog.configure(
                processors=[
                    structlog.stdlib.add_log_level,
                    structlog.stdlib.filter_by_level,
                    structlog.processors.TimeStamper(fmt='iso', utc=True),
                    structlog.processors.StackInfoRenderer(),
                    structlog.processors.format_exc_info,
                    structlog.processors.JSONRenderer()
                ],
                context_class=dict,
                logger_factory=structlog.stdlib.LoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
                cache_logger_on_first_use=True,
            )

        self.log = structlog.get_logger(self.config['LOGGER_NAME'], ).bind()

        if setup_structlog:
            self.log.setLevel(logging.DEBUG)
            logging.basicConfig(format='%(message)s')
Пример #33
0
def configure_structlog():
    """
    Make structlog comply with all of our options.
    """
    from django.conf import settings
    import logging.config
    import structlog
    from sentry import options
    from sentry.logging import LoggingFormat

    WrappedDictClass = structlog.threadlocal.wrap_dict(dict)
    kwargs = {
        "context_class":
        WrappedDictClass,
        "wrapper_class":
        structlog.stdlib.BoundLogger,
        "cache_logger_on_first_use":
        True,
        "processors": [
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.format_exc_info,
            structlog.processors.StackInfoRenderer(),
            structlog.processors.UnicodeDecoder(),
        ],
    }

    fmt_from_env = os.environ.get("SENTRY_LOG_FORMAT")
    if fmt_from_env:
        settings.SENTRY_OPTIONS["system.logging-format"] = fmt_from_env.lower()

    fmt = options.get("system.logging-format")

    if fmt == LoggingFormat.HUMAN:
        from sentry.logging.handlers import HumanRenderer

        kwargs["processors"].extend(
            [structlog.processors.ExceptionPrettyPrinter(),
             HumanRenderer()])
    elif fmt == LoggingFormat.MACHINE:
        from sentry.logging.handlers import JSONRenderer

        kwargs["processors"].append(JSONRenderer())

    structlog.configure(**kwargs)

    lvl = os.environ.get("SENTRY_LOG_LEVEL")

    if lvl:
        levelNames = logging._levelNames if not six.PY3 else logging._nameToLevel
        if lvl not in levelNames:
            raise AttributeError("%s is not a valid logging level." % lvl)

    settings.LOGGING["root"].update(
        {"level": lvl or settings.LOGGING["default_level"]})

    if lvl:
        for logger in settings.LOGGING["overridable"]:
            try:
                settings.LOGGING["loggers"][logger].update({"level": lvl})
            except KeyError:
                raise KeyError("%s is not a defined logger." % logger)

    logging.config.dictConfig(settings.LOGGING)
Пример #34
0
import os

from configurations import Configuration, values

from structlog import configure
from structlog.stdlib import LoggerFactory

from . import mixins

configure(logger_factory=LoggerFactory())


def project_path(*path):
    this_dir = os.path.dirname(os.path.realpath(__file__))
    return os.path.join(this_dir, '..', *path)


class Base(mixins.DjangoLoggingMixin, Configuration):
    DEBUG = False
    TEMPLATE_DEBUG = DEBUG

    ADMINS = (('Alerts', '*****@*****.**'))
    MANAGERS = ADMINS

    AUTH_USER_MODEL = 'users.User'

    CACHES = values.CacheURLValue('locmem:///')

    DATABASE_DICT = values.DatabaseURLValue()

    @property
Пример #35
0
def configure_logging(
    logger_level_config: Dict[str, str] = None,
    colorize: bool = True,
    log_json: bool = False,
    log_file: str = None,
    disable_debug_logfile: bool = False,
):
    structlog.reset_defaults()
    if logger_level_config is None:
        logger_level_config = {'': DEFAULT_LOG_LEVEL}
    processors = [
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]

    formatter = 'colorized' if colorize and not log_file else 'plain'
    if log_json:
        formatter = 'json'

    redact = redactor({
        re.compile(r'\b(access_?token=)([a-z0-9_-]+)', re.I):
        r'\1<redacted>',
    })
    _wrap_tracebackexception_format(redact)

    log_handler = _get_log_handler(
        formatter,
        log_file,
    )
    if disable_debug_logfile:
        combined_log_handlers = log_handler
    else:
        debug_log_file_handler = _get_log_file_handler()
        combined_log_handlers = {**log_handler, **debug_log_file_handler}

    logging.config.dictConfig(
        {
            'version': 1,
            'disable_existing_loggers': False,
            'filters': {
                'log_level_filter': {
                    '()': RaidenFilter,
                    'log_level_config': logger_level_config,
                },
                'log_level_debug_filter': {
                    '()': RaidenFilter,
                    'log_level_config': {
                        '': DEFAULT_LOG_LEVEL,
                        'raiden': 'DEBUG'
                    },
                },
            },
            'formatters': {
                'plain': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=False),
                           redact),
                    'foreign_pre_chain':
                    processors,
                },
                'json': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.processors.JSONRenderer(), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'colorized': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=True), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'debug': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=False),
                           redact),
                    'foreign_pre_chain':
                    processors,
                },
            },
            'handlers': combined_log_handlers,
            'loggers': {
                '': {
                    'handlers': list(combined_log_handlers.keys()),
                    'propagate': True,
                },
            },
        }, )
    structlog.configure(
        processors=processors + [
            structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
        ],
        wrapper_class=structlog.stdlib.BoundLogger,
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=True,
    )

    # set logging level of the root logger to DEBUG, to be able to intercept
    # all messages, which are then be filtered by the `RaidenFilter`
    structlog.get_logger('').setLevel(
        logger_level_config.get('', DEFAULT_LOG_LEVEL))
    structlog.get_logger('raiden').setLevel('DEBUG')

    # rollover RotatingFileHandler on startup, to split logs also per-session
    root = logging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            handler.flush()
            if os.stat(handler.baseFilename).st_size > 0:
                handler.doRollover()

    # fix logging of py-evm (it uses a custom Trace logger from logging library)
    # if py-evm is not used this will throw, hence the try-catch block
    # for some reason it didn't work to put this into conftest.py
    try:
        from eth.tools.logging import setup_trace_logging
        setup_trace_logging()
    except ImportError:
        pass
Пример #36
0
import math

import structlog

structlog.configure(logger_factory=structlog.PrintLoggerFactory())
logger = structlog.get_logger(processors=[structlog.processors.JSONRenderer()])


def add_equivalent_travel_cost(travel_options, value_one_hour,
                               value_ten_hours):
    """
    Calculate the dollar equivalent of the travel time, add that to the monetary travel cost, and store the results
    in a new column called `equivalent_travel_cost`
    :param travel_options: pandas.DataFrame: each row is a travel option
    :param value_one_hour:int: value in dollars of 1 hour of time
    :param value_ten_hours: int: value in dollars of 10 hours of time
    :return: pandas.DataFrame: travel_options with the new column `equivalent_travel_cost`
    """
    # Formula used:  Y = a(X^b), where X is the travel time in hours, and Y is the dollar equivalent value
    a = value_one_hour
    b = math.log(value_ten_hours / value_one_hour) / math.log(10)
    logger.info("Calculating dollar equivalent of travel time",
                formula="Y = a(X^b)",
                a=a,
                b=b)

    def calculate_equivalent_travel_cost(row):
        """ Calculate the equivalent travel cost for a single travel option """
        travel_time_hours = row.travel_time_seconds / 3600
        time_value = a * (travel_time_hours**b)
        return row.total_cost + time_value
Пример #37
0
def configure_logging(
    logger_level_config: Dict[str, str] = None,
    colorize: bool = True,
    log_json: bool = False,
    log_file: str = None,
    disable_debug_logfile: bool = False,
    debug_log_file_name: str = None,
    _first_party_packages: FrozenSet[str] = _FIRST_PARTY_PACKAGES,
    cache_logger_on_first_use: bool = True,
):
    structlog.reset_defaults()

    logger_level_config = logger_level_config or dict()
    logger_level_config.setdefault('filelock', 'ERROR')
    logger_level_config.setdefault('', DEFAULT_LOG_LEVEL)

    processors = [
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        add_greenlet_name,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]

    if log_json:
        formatter = 'json'
    elif colorize and not log_file:
        formatter = 'colorized'
    else:
        formatter = 'plain'

    redact = redactor({
        re.compile(r'\b(access_?token=)([a-z0-9_-]+)', re.I):
        r'\1<redacted>',
    })
    _wrap_tracebackexception_format(redact)

    handlers = dict()
    if log_file:
        handlers['file'] = {
            'class': 'logging.handlers.WatchedFileHandler',
            'filename': log_file,
            'level': 'DEBUG',
            'formatter': formatter,
            'filters': ['user_filter'],
        }
    else:
        handlers['default'] = {
            'class': 'logging.StreamHandler',
            'level': 'DEBUG',
            'formatter': formatter,
            'filters': ['user_filter'],
        }

    if not disable_debug_logfile:
        if debug_log_file_name is None:
            time = datetime.datetime.utcnow().isoformat()
            debug_log_file_name = f'raiden-debug_{time}.log'
        handlers['debug-info'] = {
            'class': 'logging.handlers.RotatingFileHandler',
            'filename': debug_log_file_name,
            'level': 'DEBUG',
            'formatter': 'debug',
            'maxBytes': MAX_LOG_FILE_SIZE,
            'backupCount': LOG_BACKUP_COUNT,
            'filters': ['raiden_debug_file_filter'],
        }

    logging.config.dictConfig(
        {
            'version': 1,
            'disable_existing_loggers': False,
            'filters': {
                'user_filter': {
                    '()': RaidenFilter,
                    'log_level_config': logger_level_config,
                },
                'raiden_debug_file_filter': {
                    '()': RaidenFilter,
                    'log_level_config': {
                        '': DEFAULT_LOG_LEVEL,
                        'raiden': 'DEBUG',
                    },
                },
            },
            'formatters': {
                'plain': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=False),
                           redact),
                    'foreign_pre_chain':
                    processors,
                },
                'json': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.processors.JSONRenderer(), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'colorized': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.dev.ConsoleRenderer(colors=True), redact),
                    'foreign_pre_chain':
                    processors,
                },
                'debug': {
                    '()':
                    structlog.stdlib.ProcessorFormatter,
                    'processor':
                    _chain(structlog.processors.JSONRenderer(), redact),
                    'foreign_pre_chain':
                    processors,
                },
            },
            'handlers': handlers,
            'loggers': {
                '': {
                    'handlers': handlers.keys(),
                    'propagate': True,
                },
            },
        }, )
    structlog.configure(
        processors=processors + [
            structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
        ],
        wrapper_class=structlog.stdlib.BoundLogger,
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=cache_logger_on_first_use,
    )

    # set logging level of the root logger to DEBUG, to be able to intercept
    # all messages, which are then be filtered by the `RaidenFilter`
    structlog.get_logger('').setLevel(
        logger_level_config.get('', DEFAULT_LOG_LEVEL))
    for package in _first_party_packages:
        structlog.get_logger(package).setLevel('DEBUG')

    # rollover RotatingFileHandler on startup, to split logs also per-session
    root = logging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            handler.flush()
            if os.stat(handler.baseFilename).st_size > 0:
                handler.doRollover()

    # fix logging of py-evm (it uses a custom Trace logger from logging library)
    # if py-evm is not used this will throw, hence the try-catch block
    # for some reason it didn't work to put this into conftest.py
    try:
        from eth.tools.logging import setup_trace_logging
        setup_trace_logging()
    except ImportError:
        pass
Пример #38
0
import logging
import sys
import structlog
from structlog import get_logger

logger = get_logger()


def hello():
    logger.info("hello", name="foo", age=20)
    return "hello"


def broken():
    try:
        return 1 / 0
    except Exception as e:
        logger.error(e, exc_info=True)


if __name__ == "__main__":
    logging.basicConfig(
        format="%(message)s %(asctime)s %(levelname)s)",
        stream=sys.stdout,
        level=logging.INFO,
    )
    structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(), )
    print(hello())
    print(broken())
Пример #39
0
# Boundaries
BOUNDARIES_SHAPEFILES_DIR = "shapefiles"

# API
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r"^/(graphql|api/v1).*$"
CORS_ALLOW_METHODS = ["GET", "POST", "OPTIONS"]
CORS_ALLOW_HEADERS = default_headers + ("x-api-key", )

GRAPHENE = {"SCHEMA": "graphapi.schema.schema", "MIDDLEWARE": []}

# structlog config
structlog.configure(
    processors=[
        structlog.stdlib.filter_by_level,
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="iso"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        structlog.processors.UnicodeDecoder(),
        structlog.processors.JSONRenderer(),
    ],
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)
Пример #40
0
""" Experimenting with using structlog with hope of supporting it in Lightbus
"""
import logging
import sys

import structlog


def event_dict_ordering(logger, method_name, event_dict):
    ordered = {"event": event_dict.pop("event")}
    ordered.update(**event_dict)
    return ordered


structlog.configure(processors=[
    event_dict_ordering,
    structlog.stdlib.add_log_level,
    structlog.stdlib.add_logger_name,
    structlog.processors.TimeStamper(fmt="iso"),
    structlog.dev.ConsoleRenderer() if sys.stdout.isatty() else structlog.
    processors.JSONRenderer(),
])

if __name__ == "__main__":
    log = structlog.wrap_logger(logging.getLogger("test"))
    log.warning("hello from std", foo=1)

    log.info("Loaded plugins", plugins={...}, context={"service_name": "..."})
Пример #41
0
    chain = [
        LogEntryProcessor.add_app_info,
        LogEntryProcessor.add_logger_name,
        structlog.stdlib.add_log_level,
        LogEntryProcessor.add_timestamp,
        LogEntryProcessor.censor_password,
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        LogEntryProcessor.cleanup_keynames,
        structlog.processors.JSONRenderer(),
    ]

structlog.configure(
    processors=chain,
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)


# structlog.configure(
#     context_class=structlog.threadlocal.wrap_dict(dict),
#     logger_factory=structlog.stdlib.LoggerFactory(),
#     wrapper_class=structlog.stdlib.BoundLogger,
#     processors=[
#         structlog.stdlib.filter_by_level,
#         structlog.stdlib.add_logger_name,
#         structlog.stdlib.add_log_level,
#         structlog.stdlib.add_log_level_number,
#         structlog.stdlib.PositionalArgumentsFormatter(),
Пример #42
0
    structlog.stdlib.PositionalArgumentsFormatter(),
    structlog.processors.TimeStamper(fmt="iso"),
    structlog.processors.StackInfoRenderer(),
    structlog.processors.format_exc_info,
    structlog.processors.UnicodeDecoder(),
]

structlog_post_processors = [
    SentryJsonProcessor(level=logging.ERROR, tag_keys="__all__"),
    structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]

structlog.configure(
    processors=[structlog.stdlib.filter_by_level] + shared_processors +
    structlog_post_processors,
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)

# Logging config
# ==============
formatter = structlog.stdlib.ProcessorFormatter(
    processor=structlog.processors.JSONRenderer(),
    foreign_pre_chain=shared_processors)
handler = logging.StreamHandler()
handler.setFormatter(formatter)

root_logger = logging.getLogger()
root_logger.addHandler(handler)
root_logger.setLevel(logging.INFO)
Пример #43
0
    def set_default_append(self):
        DEBUG = self.get("DEBUG")
        self.set_option("DEBUG_EMAIL", DEBUG)
        for template in self.get("TEMPLATES"):
            template["OPTIONS"]["debug"] = DEBUG
        # TEMPLATES[0]["OPTIONS"]["debug"] = DEBUG

        # use structlog for logging
        import structlog

        MIDDLEWARE = self.get("MIDDLEWARE")

        MIDDLEWARE += [
            "django_structlog.middlewares.RequestMiddleware",
        ]

        # set these explicitly, not with DEBUG
        DJANGO_LOG_LEVEL = self.set_option("DJANGO_LOG_LEVEL", "INFO")
        FULLCTL_LOG_LEVEL = self.set_option("FULLCTL_LOG_LEVEL", "DEBUG")

        structlog.configure(
            processors=[
                structlog.stdlib.filter_by_level,
                structlog.processors.TimeStamper(fmt="iso"),
                structlog.stdlib.add_logger_name,
                structlog.stdlib.add_log_level,
                structlog.stdlib.PositionalArgumentsFormatter(),
                structlog.processors.StackInfoRenderer(),
                structlog.processors.format_exc_info,
                structlog.processors.UnicodeDecoder(),
                structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
            ],
            context_class=structlog.threadlocal.wrap_dict(dict),
            logger_factory=structlog.stdlib.LoggerFactory(),
            wrapper_class=structlog.stdlib.BoundLogger,
            cache_logger_on_first_use=True,
        )

        # logging define extra formatters and handlers for convenience
        LOGGING = {
            "version": 1,
            "disable_existing_loggers": False,
            "formatters": {
                "json": {
                    "()": structlog.stdlib.ProcessorFormatter,
                    "processor": structlog.processors.JSONRenderer(),
                },
                "color_console": {
                    "()": structlog.stdlib.ProcessorFormatter,
                    "processor": structlog.dev.ConsoleRenderer(),
                },
                "key_value": {
                    "()": structlog.stdlib.ProcessorFormatter,
                    "processor": structlog.processors.KeyValueRenderer(
                        key_order=["timestamp", "level", "event", "logger"]
                    ),
                },
            },
            "handlers": {
                "console": {
                    "class": "logging.StreamHandler",
                    "formatter": "color_console",
                    "stream": sys.stdout,
                },
                "console_json": {
                    "class": "logging.StreamHandler",
                    "formatter": "json",
                    "stream": sys.stdout,
                },
                "mail_admins": {
                    "class": "django.utils.log.AdminEmailHandler",
                    "level": "ERROR",
                    # plain text by default - HTML is nicer
                    "include_html": True,
                },
            },
            "loggers": {
                "django": {
                    "handlers": ["console_json"],
                    "level": DJANGO_LOG_LEVEL,
                },
                "django_structlog": {
                    "handlers": ["console_json"],
                    "level": FULLCTL_LOG_LEVEL,
                },
            },
        }
        self.set_option("LOGGING", LOGGING)
Пример #44
0
    stdlib.ProcessorFormatter.wrap_for_formatter,
]

prod_processors = [
    stdlib.filter_by_level,
    stdlib.add_logger_name,
    stdlib.add_log_level,
    add_environment,
    processors.format_exc_info,
    processors.UnicodeDecoder(),
    processors.TimeStamper(fmt="ISO", utc=True, key="@timestamp"),
    SentryJsonProcessor(level=logging.ERROR, tag_keys=["environment"]),
    stdlib.ProcessorFormatter.wrap_for_formatter,
]

if ENVIRONMENT in ("dev", "test"):
    processors_list = dev_processors
else:
    processors_list = prod_processors
    sentry_sdk.init(
        dsn=str(SENTRY_DSN),
        environment=ENVIRONMENT,
    )

configure(
    processors=processors_list,
    logger_factory=stdlib.LoggerFactory(),
    wrapper_class=stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)
Пример #45
0
def configure_logging(
    logger_level_config: Dict[str, str] = None,
    colorize: bool = True,
    log_json: bool = False,
    log_file: str = None,
    disable_debug_logfile: bool = False,
    debug_log_file_name: str = None,
    cache_logger_on_first_use: bool = True,
    _first_party_packages: FrozenSet[str] = _FIRST_PARTY_PACKAGES,
    _debug_log_file_additional_level_filters: Dict[str, str] = None,
) -> None:
    structlog.reset_defaults()

    logger_level_config = logger_level_config or dict()
    logger_level_config.setdefault("filelock", "ERROR")
    logger_level_config.setdefault("", DEFAULT_LOG_LEVEL)

    processors = [
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        add_greenlet_name,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S.%f"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
    ]

    if log_json:
        formatter = "json"
    elif colorize and not log_file:
        formatter = "colorized"
    else:
        formatter = "plain"

    redact = redactor(LOG_BLACKLIST)

    handlers: Dict[str, Any] = dict()
    if log_file:
        handlers["file"] = {
            "class": "logging.handlers.WatchedFileHandler",
            "filename": log_file,
            "level": "DEBUG",
            "formatter": formatter,
            "filters": ["user_filter"],
        }
    else:
        handlers["default"] = {
            "class": "logging.StreamHandler",
            "level": "DEBUG",
            "formatter": formatter,
            "filters": ["user_filter"],
        }

    if not disable_debug_logfile:
        if debug_log_file_name is None:
            time = datetime.datetime.utcnow().isoformat()
            debug_log_file_name = f"raiden-debug_{time}.log"
        handlers["debug-info"] = {
            "class": "logging.handlers.RotatingFileHandler",
            "filename": debug_log_file_name,
            "level": "DEBUG",
            "formatter": "debug",
            "maxBytes": MAX_LOG_FILE_SIZE,
            "backupCount": LOG_BACKUP_COUNT,
            "filters": ["raiden_debug_file_filter"],
        }

    logging.config.dictConfig({
        "version": 1,
        "disable_existing_loggers": False,
        "filters": {
            "user_filter": {
                "()": RaidenFilter,
                "log_level_config": logger_level_config
            },
            "raiden_debug_file_filter": {
                "()": RaidenFilter,
                "log_level_config": {
                    "": DEFAULT_LOG_LEVEL,
                    "raiden": "DEBUG",
                    **(_debug_log_file_additional_level_filters or {}),
                },
            },
        },
        "formatters": {
            "plain": {
                "()":
                structlog.stdlib.ProcessorFormatter,
                "processor":
                _chain(structlog.dev.ConsoleRenderer(colors=False), redact),
                "foreign_pre_chain":
                processors,
            },
            "json": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": _chain(structlog.processors.JSONRenderer(),
                                    redact),
                "foreign_pre_chain": processors,
            },
            "colorized": {
                "()":
                structlog.stdlib.ProcessorFormatter,
                "processor":
                _chain(structlog.dev.ConsoleRenderer(colors=True), redact),
                "foreign_pre_chain":
                processors,
            },
            "debug": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": _chain(structlog.processors.JSONRenderer(),
                                    redact),
                "foreign_pre_chain": processors,
            },
        },
        "handlers": handlers,
        "loggers": {
            "": {
                "handlers": handlers.keys(),
                "propagate": True
            }
        },
    })
    structlog.configure(
        processors=processors +
        [structlog.stdlib.ProcessorFormatter.wrap_for_formatter],
        wrapper_class=structlog.stdlib.BoundLogger,
        logger_factory=structlog.stdlib.LoggerFactory(),
        cache_logger_on_first_use=cache_logger_on_first_use,
    )

    # set logging level of the root logger to DEBUG, to be able to intercept
    # all messages, which are then be filtered by the `RaidenFilter`
    structlog.get_logger("").setLevel(
        logger_level_config.get("", DEFAULT_LOG_LEVEL))
    for package in _first_party_packages:
        structlog.get_logger(package).setLevel("DEBUG")

    # rollover RotatingFileHandler on startup, to split logs also per-session
    root = logging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            handler.flush()
            if os.stat(handler.baseFilename).st_size > 0:
                handler.doRollover()

    # fix logging of py-evm (it uses a custom Trace logger from logging library)
    # if py-evm is not used this will throw, hence the try-catch block
    # for some reason it didn't work to put this into conftest.py
    try:
        from eth.tools.logging import setup_trace_logging

        setup_trace_logging()
    except ImportError:
        pass
Пример #46
0
    global IS_CONFIGURED, PROFILES, DOCKER_URL

    IS_CONFIGURED = True
    if isinstance(profiles, dict):
        profiles_map = {
            name: Profile(name, **profile_kwargs)
            for name, profile_kwargs in profiles.items()
        }
    else:
        profiles_map = {profile.name: profile for profile in profiles or []}
    PROFILES.update(profiles_map)
    DOCKER_URL = docker_url


if not structlog._config._CONFIG.is_configured:
    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.add_logger_name,
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt='iso'),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            structlog.processors.KeyValueRenderer(key_order=['event']),
        ],
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )
def default_logging(
    verbosity: int,
    log_format: LogFormat = LogFormat.TEXT,
    external_logs: Optional[Iterable[str]] = None,
    loggers_to_configure: Optional[Iterable[str]] = None,
) -> None:
    """
    Configure structlog based on the given parameters.

    Logging will be done to stdout.

    :param verbosity: Amount of verbosity to use.
    :param log_format: Format to logs should be written in.
    :param external_logs: External modules that should have logging turned down unless verbosity is
        set to highest level.
    :param loggers_to_configure: Names of loggers to configure with the same configuration.
    """
    level = Verbosity(verbosity).level()

    if log_format == LogFormat.TEXT:
        logging.basicConfig(level=level,
                            stream=sys.stdout,
                            format=TEXT_LOG_FORMAT)
        structlog.configure(
            logger_factory=structlog.stdlib.LoggerFactory(),
            wrapper_class=structlog.stdlib.BoundLogger,
            cache_logger_on_first_use=True,
            processors=[
                structlog.stdlib.filter_by_level,
                structlog.stdlib.PositionalArgumentsFormatter(),
                structlog.processors.StackInfoRenderer(),
                structlog.processors.format_exc_info,
                structlog.processors.UnicodeDecoder(),
                structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
            ],
        )
    elif log_format == LogFormat.JSON:
        # Setup json logging.
        logger_config = {"handlers": ["json"], "level": level}
        loggers = build_loggers_dictionary(loggers_to_configure, logger_config)
        logging.config.dictConfig({
            "version": 1,
            "formatters": {
                "json": {
                    "format": "%(message)s $(lineno)d $(filename)s",
                    "class": "pythonjsonlogger.jsonlogger.JsonFormatter",
                }
            },
            "handlers": {
                "json": {
                    "class": "logging.StreamHandler",
                    "formatter": "json"
                }
            },
            "loggers": loggers,
        })

        structlog.configure(
            context_class=structlog.threadlocal.wrap_dict(dict),
            logger_factory=structlog.stdlib.LoggerFactory(),
            wrapper_class=structlog.stdlib.BoundLogger,
            cache_logger_on_first_use=True,
            processors=[
                structlog.stdlib.filter_by_level,
                structlog.stdlib.add_logger_name,
                structlog.stdlib.add_log_level,
                structlog.stdlib.PositionalArgumentsFormatter(),
                structlog.processors.StackInfoRenderer(),
                structlog.processors.format_exc_info,
                structlog.processors.UnicodeDecoder(),
                structlog.stdlib.render_to_log_kwargs,
            ],
        )

    # Unless the user specifies higher verbosity than we have levels, turn down the log level
    # for external libraries.
    if external_logs and verbosity < Verbosity.MAX:
        # Turn down logging for modules outside this project.
        for logger in external_logs:
            logging.getLogger(logger).setLevel(logging.WARNING)
Пример #48
0
    def init(self, connection=None, config=None, setup_structlog=False):
        """Provide Redis connection and config when lazy initialization is used."""

        if self.config is not None:
            raise RuntimeError('TaskTiger was already initialized')

        self.config = {
            # String that is used to prefix all Redis keys
            'REDIS_PREFIX': 't',

            # Name of the Python (structlog) logger
            'LOGGER_NAME': 'tasktiger',

            # Where to queue tasks that don't have an explicit queue
            'DEFAULT_QUEUE': 'default',

            # After how many seconds time out on listening on the activity
            # channel and check for scheduled or expired items.  The batch
            # timeout will delay the specified seconds after the first message
            # to wait for additional messages, useful for very active systems.
            # Appropriate values: 0 <= SELECT_BATCH_TIMEOUT <= SELECT_TIMEOUT
            'SELECT_TIMEOUT': 1,
            'SELECT_BATCH_TIMEOUT': 0,

            # If this is True, all tasks except future tasks (when=a future
            # time) will be executed locally by blocking until the task
            # returns. This is useful for testing purposes.
            'ALWAYS_EAGER': False,

            # If retry is True but no retry_method is specified for a given
            # task, use the following default method.
            'DEFAULT_RETRY_METHOD': fixed(60, 3),

            # After how many seconds a task that can't acquire a lock is
            # retried.
            'LOCK_RETRY': 1,

            # How many items to move at most from the scheduled queue to the
            # active queue.
            'SCHEDULED_TASK_BATCH_SIZE': 1000,

            # After how many seconds a long-running task is killed. This can be
            # overridden by the task or at queue time.
            'DEFAULT_HARD_TIMEOUT': 300,

            # The timer specifies how often the worker updates the task's
            # timestamp in the active queue (in seconds). Tasks exceeding the
            # timeout value are requeued periodically. This may happen when a
            # worker crashes or is killed.
            'ACTIVE_TASK_UPDATE_TIMER': 10,
            'ACTIVE_TASK_UPDATE_TIMEOUT': 60,

            # How often we requeue expired tasks (in seconds), and how many
            # expired tasks we requeue at a time. The interval also determines
            # the lock timeout, i.e. it should be large enough to have enough
            # time to requeue a batch of tasks.
            'REQUEUE_EXPIRED_TASKS_INTERVAL': 30,
            'REQUEUE_EXPIRED_TASKS_BATCH_SIZE': 10,

            # Set up queues that will be processed in batch, i.e. multiple jobs
            # are taken out of the queue at the same time and passed as a list
            # to the worker method. Takes a dict where the key represents the
            # queue name and the value represents the batch size. Note that the
            # task needs to be declared as batch=True. Also note that any
            # subqueues will be automatically treated as batch queues, and the
            # batch value of the most specific subqueue name takes precedence.
            'BATCH_QUEUES': {},

            # How often to print stats.
            'STATS_INTERVAL': 60,

            # Single worker queues can reduce redis activity in some use cases
            # by locking at the queue level instead of just at the task or task
            # group level. These queues will only allow a single worker to
            # access the queue at a time.  This can be useful in environments
            # with large queues and many worker processes that need aggressive
            # locking techniques.
            'SINGLE_WORKER_QUEUES': [],

            # The following settings are only considered if no explicit queues
            # are passed in the command line (or to the queues argument in the
            # run_worker() method).

            # If non-empty, a worker only processeses the given queues.
            'ONLY_QUEUES': [],

            # If non-empty, a worker excludes the given queues from processing.
            'EXCLUDE_QUEUES': [],

            # List of context manager instances that will be called in each
            # forked child process. Useful to do things like close file handles
            # or reinitialize crypto libraries.
            'CHILD_CONTEXT_MANAGERS': [],

            # Store traceback in execution history for failed tasks. This can
            # increase Redis storage requirements and therefore can be disabled
            # if that is a concern.
            'STORE_TRACEBACKS': True,
        }
        if config:
            self.config.update(config)

        if setup_structlog:
            structlog.configure(
                processors=[
                    structlog.stdlib.add_log_level,
                    structlog.stdlib.filter_by_level,
                    structlog.processors.TimeStamper(fmt='iso', utc=True),
                    structlog.processors.StackInfoRenderer(),
                    structlog.processors.format_exc_info,
                    structlog.processors.JSONRenderer()
                ],
                context_class=dict,
                logger_factory=structlog.stdlib.LoggerFactory(),
                wrapper_class=structlog.stdlib.BoundLogger,
                cache_logger_on_first_use=True,
            )

        self.log = structlog.get_logger(
            self.config['LOGGER_NAME'],
        ).bind()

        if setup_structlog:
            self.log.setLevel(logging.DEBUG)
            logging.basicConfig(format='%(message)s')

        self.connection = connection or redis.Redis(decode_responses=True)
        self.scripts = RedisScripts(self.connection)
Пример #49
0
        time.sleep(5)

    # Save cell infosets
    for slot in config.slots:
        backend.put(inserted_cells[slot])


if __name__ == '__main__':
    # Restrict log message to be above selected level
    structlog.configure(
        processors=[
            structlog.threadlocal.merge_threadlocal,
            structlog.contextvars.merge_contextvars,
            structlog.stdlib.add_log_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M.%S"),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            structlog.dev.ConsoleRenderer()
        ],
        wrapper_class=structlog.make_filtering_bound_logger(logging.INFO),
        logger_factory=structlog.PrintLoggerFactory(file=sys.stderr)
    )

    load_plugins()

    parser = argparse.ArgumentParser(description='Megacell charger')
    parser.add_argument('--loglevel', choices=LOG_LEVEL_NAMES, default='INFO', help='Change log level')
    _config_group(parser)

    # These will be needed only for workflows
    add_plugin_args(parser)
Пример #50
0
        if env is not None:
            event_kw["env"] = env

        return super(BoundLogger,
                     self)._proxy_to_logger(method_name, event, *event_args,
                                            **event_kw)


structlog.configure(
    processors=[
        structlog.stdlib.filter_by_level,
        structlog.processors.TimeStamper(fmt="iso", utc=True),
        structlog.processors.StackInfoRenderer(),
        _record_level,
        _safe_exc_info_renderer,
        _safe_encoding_renderer,
        _record_module,
        structlog.processors.JSONRenderer(),
    ],
    context_class=wrap_dict(dict),
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=BoundLogger,
    cache_logger_on_first_use=True,
)
get_logger = structlog.get_logger

# Convenience map to let users set level with a string
LOG_LEVELS = {
    "debug": logging.DEBUG,
    "info": logging.INFO,
    "warning": logging.WARNING,
    "error": logging.ERROR,
Пример #51
0
        "default": {
            "level": "DEBUG",
            "class": "logging.StreamHandler",
            "formatter": "colored",
        },
    },
    "loggers": {
        "": {
            "handlers": ["default"],
            "level": "DEBUG",
            "propagate": True,
        },
    }
})

structlog.configure(
    processors=[
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        timestamper,
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
    ],
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)
Пример #52
0
       A returncode and a string containing the output of visualmetrics.py
    """
    cmd = [
        "/usr/bin/python",
        str(visualmetrics_path), "--video",
        str(job.video_path)
    ]
    cmd.extend(options)
    return run_command(log, cmd)


if __name__ == "__main__":
    structlog.configure(
        processors=[
            structlog.processors.TimeStamper(fmt="iso"),
            structlog.processors.format_exc_info,
            structlog.dev.ConsoleRenderer(colors=False),
        ],
        cache_logger_on_first_use=True,
    )

    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)

    parser.add_argument(
        "visual_metrics_options",
        type=str,
        metavar="VISUAL-METRICS-OPTIONS",
        help="Options to pass to visualmetrics.py",
        nargs="*",
    )
Пример #53
0
    reports = [
        v1.reports[codeword].handler_class(config=config, backend=backend)
        for codeword in config.reports
    ]

    for infoset in selected_cells(config=config, backend=backend):
        for report in reports:
            report.process_cell(infoset=infoset)

    for report in reports:
        report.report()


if __name__ == "__main__":
    structlog.configure(
        wrapper_class=structlog.make_filtering_bound_logger(logging.INFO),
        logger_factory=structlog.PrintLoggerFactory(file=sys.stderr))

    load_plugins()

    parser = argparse.ArgumentParser(description='Report on cells')
    parser.add_argument('--loglevel',
                        choices=LOG_LEVEL_NAMES,
                        default='INFO',
                        help='Change log level')
    add_plugin_args(parser)
    add_backend_selection_args(parser)
    add_cell_selection_args(parser)

    # Then add arguments dependent on the loaded plugins
    parser.add_argument('-R',
Пример #54
0
from django import forms
from django.forms import ModelForm
from django.core.exceptions import ValidationError
from django.contrib import messages
from django.db.models import Exists
from django.db.models import Q

from .models import Portfolio, Project, User
import logging
logging.basicConfig()
import structlog
from structlog import get_logger
from structlog.stdlib import LoggerFactory

structlog.configure(logger_factory=LoggerFactory())
structlog.configure(processors=[structlog.processors.JSONRenderer()])
logger = get_logger()


class EditProjectForm(ModelForm):
    class Meta:
        model = Project
        fields = ['root_task']
        project_title = forms.CharField(label='Project Title', max_length=200)
        project_version = forms.CharField(label='Project Version',
                                          max_length=200)


class AddProjectForm(ModelForm):
    class Meta:
        model = Project
Пример #55
0
            "elasticsearch": {"level": "WARNING"},
            "falcon_cors": {"level": "WARNING", "formatter": "json"},
            "urllib3": {"level": "CRITICAL"},
            "timy": {"level": "ERROR"},
        },
    }
)

configure(
    context_class=threadlocal.wrap_dict(dict),
    logger_factory=stdlib.LoggerFactory(),
    wrapper_class=stdlib.BoundLogger,
    processors=[
        stdlib.filter_by_level,
        stdlib.add_logger_name,
        stdlib.add_log_level,
        stdlib.PositionalArgumentsFormatter(),
        processors.TimeStamper(fmt="iso"),
        processors.StackInfoRenderer(),
        processors.format_exc_info,
        processors.UnicodeDecoder(),
        stdlib.render_to_log_kwargs,
    ],
)


# import logging
# import logging.config
# import structlog


# def setup_logging():
Пример #56
0
def stdlib_log_config(level="DEBUG", log_format=DEFAULT_FORMAT, log_type="text"):
    if level not in LOG_LEVELS:
        raise ValueError("Invalid log level: {}".format(level))

    if log_type not in LOG_TYPES:
        raise ValueError("Invalid log type: {}".format(log_type))

    structlog.configure(
        processors=[
            structlog.stdlib.filter_by_level,
            structlog.stdlib.PositionalArgumentsFormatter(),
            structlog.processors.StackInfoRenderer(),
            structlog.processors.format_exc_info,
            # structlog.processors.UnicodeEncoder(),
            structlog.stdlib.render_to_log_kwargs,
        ],
        context_class=dict,
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )

    if log_type == "text":
        formatter = {
            "format": log_format,
        }
    else:
        formatter = {
            "()": "pythonjsonlogger.jsonlogger.JsonFormatter",
            "format": log_format,
        }

    conf = {
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {"default": formatter,},
        "handlers": {
            "console": {
                "level": level,
                "class": "logging.StreamHandler",
                "formatter": "default",
            }
        },
        "loggers": {
            "PIL": {"handlers": ["console"], "level": "WARN", "propagate": False,},
            "chardet": {"handlers": ["console"], "level": "INFO", "propagate": False,},
            "django": {"handlers": ["console"], "level": "INFO", "propagate": False,},
            "matplotlib": {
                "handlers": ["console"],
                "level": "INFO",
                "propagate": False,
            },
            "newrelic": {"handlers": ["console"], "level": "WARN", "propagate": False,},
            "requests": {"handlers": ["console"], "level": "WARN", "propagate": False,},
            "selenium": {"handlers": ["console"], "level": "WARN", "propagate": False,},
            "urllib3": {"handlers": ["console"], "level": "WARN", "propagate": False,},
        },
        "root": {"handlers": ["console"], "level": level,},
    }

    return conf
Пример #57
0
            'level': 'DEBUG',
        },
        'transactions': {
            'handlers': ['console', 'sentry'],
            'level': 'DEBUG',
        },
        'accounts': {
            'handlers': ['console', 'sentry'],
            'level': 'DEBUG',
        },
    }
}

structlog.configure(logger_factory=structlog.stdlib.LoggerFactory(),
                    processors=[
                        structlog.processors.UnicodeEncoder(),
                        KeyValueRenderer(),
                    ])

REST_FRAMEWORK = {
    'DEFAULT_PAGINATION_CLASS':
    'rest_framework.pagination.LimitOffsetPagination',
    'PAGE_SIZE':
    20,
    'DEFAULT_RENDERER_CLASSES': (
        'rest_framework.renderers.JSONRenderer',
        'rest_framework.renderers.BrowsableAPIRenderer',
    )
}

SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
Пример #58
0
def datlog(
    *,
    level=logging.DEBUG,
    capture_warnings=True,
    redirect_print=False,
    tty=None,
    user_config=None,
    json_renderer=None,
    hook_thread=True,
    hook_process=True,
    use_processors=DEFAULT_PROCESSORS,
    use_foreign_processors=DEFAULT_FOREIGN_PROCESSORS
):
    """Setup struct logging.

    :param tty: if `False` the log will appear in json format
    :param level: the root logger level
    :param redirect_print: hijacks stdout/err
    :param capture_warnings: capture warnings
    :param user_config: merge user config with default log config
    :param json_renderer: a custom json renderer
    """
    if isinstance(level, str):
        level = logging.getLevelName(level.upper())

    if json_renderer is None:
        json_renderer = JSONRenderer(
            serializer=lambda obj, **kwargs: json.dumps(
                stringify_dict_keys(obj), **kwargs
            )
        )
    if tty is None:
        tty = sys.stdout.isatty()
    console_renderer = structlog.dev.ConsoleRenderer()
    renderer = console_renderer if tty else json_renderer
    pre_chain = [PROCESSORS[x] for x in use_foreign_processors]

    config = {
        "version": 1,
        "disable_existing_loggers": False,
        "formatters": {
            "structured": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": renderer,
                "foreign_pre_chain": pre_chain,
            },
            "structured_console": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": console_renderer,
                "foreign_pre_chain": pre_chain,
            },
            "structured_json": {
                "()": structlog.stdlib.ProcessorFormatter,
                "processor": json_renderer,
                "foreign_pre_chain": pre_chain,
            },
        },
        "handlers": {
            "default": {"class": "logging.StreamHandler", "formatter": "structured"}
        },
        "loggers": {"": {"handlers": ["default"], "level": level, "propagate": True}},
    }

    if user_config:
        merge_dict(config, user_config)

    logging.config.dictConfig(config)

    logging.captureWarnings(capture_warnings)

    processors = [PROCESSORS[x] for x in use_processors]
    processors.append(structlog.stdlib.ProcessorFormatter.wrap_for_formatter)

    structlog.configure(
        processors=processors,
        context_class=structlog.threadlocal.wrap_dict(dict),
        logger_factory=structlog.stdlib.LoggerFactory(),
        wrapper_class=structlog.stdlib.BoundLogger,
        cache_logger_on_first_use=True,
    )

    if redirect_print:
        # redirect stdio print
        print_log = structlog.get_logger("print")
        sys.stderr = StdioToLog(print_log)
        sys.stdout = StdioToLog(print_log)

    # log uncaught exceptions
    sys.excepthook = uncaught_exception
    if hook_thread:
        install_thread_excepthook()
    if hook_process:
        install_process_excepthook()
    logger = structlog.get_logger()
    return logger
Пример #59
0
from server.api.api_v1.api import api_router
from server.api.error_handling import ProblemDetailException
from server.db import db
from server.db.database import DBSessionMiddleware
from server.exception_handlers.generic_exception_handlers import form_error_handler, problem_detail_handler
from server.forms import FormException
from server.settings import app_settings
from server.version import GIT_COMMIT_HASH

structlog.configure(
    processors=[
        structlog.processors.add_log_level,
        structlog.processors.StackInfoRenderer(),
        structlog.dev.set_exc_info,
        structlog.processors.format_exc_info,
        structlog.processors.TimeStamper(),
        structlog.dev.ConsoleRenderer(),
    ],
    wrapper_class=structlog.make_filtering_bound_logger(logging.NOTSET),
    context_class=dict,
    logger_factory=structlog.PrintLoggerFactory(),
    cache_logger_on_first_use=False,
)

logger = structlog.get_logger(__name__)

app = FastAPI(
    title="Boilerplate",
    description="The boilerplate is a project that can be copied and adapted.",
    openapi_url="/api/openapi.json",
    docs_url="/api/docs",
    redoc_url="/api/redoc",
Пример #60
0
                       '"message":"",'
                       '"message_json": %(message)s}')
        }
    },
    'handlers': {
        'json': {
            'class': 'logging.StreamHandler',
            'formatter': 'json'
        }
    },
    'loggers': {
        'app': {
            'handlers': ['json'],
            'propagate': False
        }
    }

})

configure(
    context_class=threadlocal.wrap_dict(dict),
    logger_factory=stdlib.LoggerFactory(),
    wrapper_class=stdlib.BoundLogger,
    processors=[
        stdlib.filter_by_level,
        stdlib.PositionalArgumentsFormatter(),
        processors.StackInfoRenderer(),
        processors.format_exc_info,
        processors.JSONRenderer()]
)