def get_logger(name, log_level): """Get JSON logger. Args: name: The logger name. log_level: The log level. Returns: A logger. """ logging.basicConfig(level=log_level, stream=sys.stdout, format="%(message)s") if log_level == "DEBUG": # pragma: no cover processors = [ filter_by_level, add_logger_name, _add_timestamp, JSONRenderer(indent=1, sort_keys=True), ] else: processors = [ filter_by_level, add_logger_name, _add_timestamp, JSONRenderer(sort_keys=True), ] return wrap_logger( logging.getLogger(name), processors=processors, )
def logger_initial_config(service_name=None, log_level=None, logger_format=None, logger_date_format=None): if not logger_date_format: logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M%s") if not log_level: log_level = os.getenv('SMS_LOG_LEVEL', 'INFO') if not logger_format: logger_format = "%(message)s" if not service_name: service_name = os.getenv('NAME', 'clear_down') try: indent = int(os.getenv('JSON_INDENT_LOGGING')) except TypeError: indent = None except ValueError: indent = None def add_service(logger, method_name, event_dict): """ Add the service name to the event dict. """ event_dict['service'] = service_name return event_dict logging.basicConfig(level=log_level, format=logger_format) configure(processors=[ add_log_level, filter_by_level, add_service, TimeStamper(fmt=logger_date_format, utc=True, key="created_at"), JSONRenderer(indent=indent) ])
def configure_logging(): # set up some sane logging, as opposed to what flask does by default log_format = "%(message)s" handler = logging.StreamHandler() logging.basicConfig(level=logging.INFO, format=log_format, handlers=[handler]) def parse_exception(_, __, event_dict): exception = event_dict.get('exception') if exception: event_dict['exception'] = exception.replace("\"", "'").split("\n") return event_dict # setup file logging renderer_processor = JSONRenderer() processors = [ add_log_level, TimeStamper(key='created', fmt='iso'), format_exc_info, parse_exception, renderer_processor ] configure(context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors, cache_logger_on_first_use=True)
def logger_initial_config(service_name=None, log_level=None, logger_format=None, logger_date_format=None): if not logger_date_format: logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M%s") if not log_level: log_level = os.getenv('LOGGING_LEVEL') if not logger_format: logger_format = "%(message)s" try: indent = int(os.getenv('JSON_INDENT_LOGGING')) except TypeError: indent = None except ValueError: indent = None def add_service(logger, method_name, event_dict): # pylint: disable=unused-argument """ Add the service name to the event dict. """ event_dict['service'] = service_name return event_dict logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format) configure(processors=[add_log_level, filter_by_level, add_service, format_exc_info, TimeStamper(fmt=logger_date_format, utc=True, key="created_at"), JSONRenderer(indent=indent)])
def get_logger(name=None, datadog=False): logging.basicConfig( format="%(message)s", stream=sys.stdout, level=LOG_LEVEL, ) processors = [ filter_by_level, rename_message_key, add_log_level_number, increase_level_numbers, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.UnicodeDecoder(), JSONRenderer(), ] if datadog: processors.insert(0, tracer_injection) structlog.configure( context_class=dict, logger_factory=structlog.stdlib.LoggerFactory(), wrapper_class=structlog.stdlib.BoundLogger, cache_logger_on_first_use=True, processors=processors, ) return structlog.get_logger(name)
def test_formatter_unsets_exc_info(self, configure_for_pf, capsys, keep): """ Stack traces doesn't get printed outside of the json document when keep_exc_info are set to False but preserved if set to True. """ configure_logging(None) logger = logging.getLogger() def format_exc_info_fake(logger, name, event_dict): event_dict = collections.OrderedDict(event_dict) del event_dict["exc_info"] event_dict["exception"] = "Exception!" return event_dict formatter = ProcessorFormatter( processor=JSONRenderer(), keep_stack_info=keep, keep_exc_info=keep, foreign_pre_chain=[format_exc_info_fake], ) logger.handlers[0].setFormatter(formatter) try: raise RuntimeError("oh noo") except Exception: logging.getLogger().exception("seen worse") out, err = capsys.readouterr() assert "" == out if keep is False: assert ( '{"event": "seen worse", "exception": "Exception!"}\n') == err else: assert "Traceback (most recent call last):" in err
def init(): logging.basicConfig(stream=sys.stdout, format='%(message)s') logging.getLogger().setLevel( LOG_LEVEL_DEBUG if config.DEBUG else LOG_LEVEL_PROD) configure( processors=[ filter_by_level, add_log_level, add_app_context, split_pos_args, TimeStamper(fmt='iso', utc=True), StackInfoRenderer(), format_exc_info, JSONRenderer(sort_keys=True) ], context_class=wrap_dict(dict), logger_factory=LoggerFactory(), wrapper_class=BoundLogger, cache_logger_on_first_use=True, ) for logger_name in [ 'requests', 'statsd', 'amqpstorm', 'datadog.dogstatsd' ]: logging.getLogger(logger_name).setLevel(logging.WARNING) return get()
def test_renders_json(self, event_dict): """ Renders a predictable JSON string. """ assert (r'{"a": "<A(\\o/)>", "b": [3, 4], "x": 7, "y": "test", "z": ' r'[1, 2]}' == JSONRenderer(sort_keys=True)(None, None, event_dict))
def test_other_handlers_get_original_record( self, configure_for_pf, capsys ): """ Logging handlers that come after the handler with ProcessorFormatter should receive original, unmodified record. """ configure_logging(None) handler1 = logging.StreamHandler() handler1.setFormatter(ProcessorFormatter(JSONRenderer())) handler2 = type("", (), {})() handler2.handle = call_recorder(lambda record: None) handler2.level = logging.INFO logger = logging.getLogger() logger.addHandler(handler1) logger.addHandler(handler2) logger.info("meh") assert 1 == len(handler2.handle.calls) handler2_record = handler2.handle.calls[0].args[0] assert "meh" == handler2_record.msg
def test_formatter_unsets_stack_info(self, configure_for_pf, capsys, keep): """ Stack traces doesn't get printed outside of the json document when keep_stack_info are set to False but preserved if set to True. """ configure_logging(None) logger = logging.getLogger() formatter = ProcessorFormatter( processor=JSONRenderer(), keep_stack_info=keep, keep_exc_info=keep, foreign_pre_chain=[], ) logger.handlers[0].setFormatter(formatter) logging.getLogger().warning("have a stack trace", stack_info=True) out, err = capsys.readouterr() assert "" == out if keep is False: assert 1 == err.count("Stack (most recent call last):") else: assert 2 == err.count("Stack (most recent call last):")
def logger_initial_config(service_name=None, log_level=None, logger_format=None, logger_date_format=None): # pylint: skip-file if not logger_date_format: logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M%s") if not log_level: log_level = os.getenv('LOG_LEVEL', 'DEBUG') if not logger_format: logger_format = "%(message)s" if not service_name: service_name = os.getenv('NAME', 'ras-secure-message') try: indent = int(os.getenv('JSON_INDENT_LOGGING')) except TypeError: indent = None except ValueError: indent = None def add_service(_1, _2, event_dict): """ Add the service name to the event dict. """ event_dict['service'] = service_name return event_dict logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format) configure(processors=[add_log_level, filter_by_level, add_service, TimeStamper(fmt=logger_date_format, utc=True, key="created_at"), JSONRenderer(indent=indent)])
def __init__(self, output=None, service=None, namespace=None): if USE_STDLIB: log = structlog.getLogger( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_logger_name, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.StackInfoRenderer(), structlog.processors.format_exc_info, structlog.processors.UnicodeDecoder(), structlog.stdlib.render_to_log_kwargs, ], context_class=dict, wrapper_class=structlog.stdlib.BoundLogger, ) else: log = wrap_logger(PrintLogger(output), processors=[ order_fields, JSONRenderer(), ]) if service is None: self._service = os.getenv('SERVICE_NAME', '') else: self._service = service self._logger = log.bind(service=self._service) if namespace is not None: self._logger = self._logger.bind(namespace=namespace)
def __init__(self, output=None, service=None, hostname=None): output = output or open('/var/log/metrics.log', "a") self.service = service or os.environ.get('SERVICE_NAME') self.hostname = hostname or os.environ.get('HOSTNAME') self._log = wrap_logger(PrintLogger(output), processors=[add_metadata, JSONRenderer()])
def _configure_logger(level='INFO', indent=None): logging.basicConfig(stream=sys.stdout, level=level, format='%(message)s') try: indent = int(os.getenv('LOGGING_JSON_INDENT') or indent) except TypeError: indent = None except ValueError: indent = None def add_service(_, __, event_dict): """ Add the service name to the event dict. """ event_dict['service'] = os.getenv('NAME', 'sdc-responses-dashboard') return event_dict renderer_processor = JSONRenderer(indent=indent) processors = [ add_log_level, filter_by_level, add_service, format_exc_info, add_logger_name, TimeStamper(fmt='%Y-%m-%dT%H:%M%s', utc=True, key='created_at'), renderer_processor ] structlog.configure(context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors, cache_logger_on_first_use=True)
def test_custom_fallback(self): """ A custom fallback handler can be used. """ jr = JSONRenderer(default=lambda x: repr(x)[::-1]) d = {"date": datetime.date(1980, 3, 25)} assert '{"date": ")52 ,3 ,0891(etad.emitetad"}' == jr(None, None, d)
def logger(name=__name__): ''' Configure and return a new logger for hivy modules ''' return wrap_logger(logbook.Logger(name), processors=[ add_unique_id, add_timestamp, JSONRenderer(), ])
def test_serializer(self): """ A custom serializer is used if specified. """ jr = JSONRenderer(serializer=lambda obj, **kw: {"a": 42}) obj = object() assert {"a": 42} == jr(None, None, obj)
def test_rapidjson(self, event_dict): """ Integration test with python-rapidjson. """ jr = JSONRenderer(serializer=rapidjson.dumps) assert { 'a': '<A(\\o/)>', 'b': [3, 4], 'x': 7, 'y': 'test', 'z': [1, 2] } == json.loads(jr(None, None, event_dict))
def get_wrapped_logger(): return wrap_logger( logging.getLogger(__name__), processors=[ filter_by_level, add_timestamp, JSONRenderer(separators=(', ', ':'), sort_keys=True) ] )
def __init__(self, level=logging.INFO, **kwargs): self._logger = wrap_logger( structlog.get_logger(), processors=[TimeStamper(fmt="iso"), JSONRenderer(sort_keys=True)], ) self.level = level self.bind(**kwargs) self.events = [] self.persist = False
def _set_structured_logger(context, logger_name=None, service_tag=None): """ use this function to get a structured logger and can use this to pass into the LogLambda decorator context - aws lambda context service - name of the service that this lambda belongs to logger_name - logger's name log_level - one of the levels in logging module """ if LogLambdaBase._structured_logger: return LogLambdaBase._structured_logger stage_tag = get_stage(context) if logger_name: logger = logging.getLogger(str(logger_name)) else: logger = logging.getLogger() # Python logger in AWS Lambda has a preset format. # To change the format of # the logging statement, remove the logging handler # and add a new handler with the required format for handler in logger.handlers: logger.removeHandler(handler) LogLambdaBase._log_handler = LogLambdaBase._get_handler() LogLambdaBase._log_handler.setFormatter(logging.Formatter(FORMAT)) logger.addHandler(LogLambdaBase._log_handler) logger.setLevel(LogLambdaBase._log_level) logger.propagate = False wlogger = wrap_logger( logger, processors=[ LogLambdaBase._filter_pii_info, add_logger_name, add_log_level, TimeStamper(fmt="iso"), StackInfoRenderer(), format_exc_info, JSONRenderer(separators=(',', ':'), sort_keys=True)]) inferred_lambda_tag = context.function_name if stage_tag is not None: inferred_lambda_tag = inferred_lambda_tag.replace('{0}_'.format(stage_tag), '', 1) LogLambdaBase._structured_logger = wlogger.bind( aws_lambda_name=context.function_name, aws_lambda_request_id=context.aws_request_id, internal_service_tag=service_tag, inferred_stage_tag=stage_tag, inferred_lambda_tag=inferred_lambda_tag ) return LogLambdaBase._structured_logger
def logger_initial_config(service_name=None, # noqa: C901 pylint: disable=too-complex log_level=None, logger_format=None, logger_date_format=None): if not logger_date_format: logger_date_format = os.getenv('LOGGING_DATE_FORMAT', "%Y-%m-%dT%H:%M%s") if not log_level: log_level = os.getenv('SMS_LOG_LEVEL', 'INFO') if not logger_format: logger_format = "%(message)s" if not service_name: service_name = os.getenv('NAME', 'ras-frontstage') try: indent = int(os.getenv('JSON_INDENT_LOGGING')) except TypeError: indent = None except ValueError: indent = None def add_service(logger, method_name, event_dict): # pylint: disable=unused-argument """ Add the service name to the event dict. """ event_dict['service'] = service_name return event_dict logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format) oauth_log = logging.getLogger("requests_oauthlib") oauth_log.addHandler(logging.NullHandler()) oauth_log.propagate = False def zipkin_ids(logger, method_name, event_dict): event_dict['trace'] = '' event_dict['span'] = '' event_dict['parent'] = '' if not flask.has_app_context(): return event_dict if '_zipkin_span' not in g: return event_dict event_dict['span'] = g._zipkin_span.zipkin_attrs.span_id event_dict['trace'] = g._zipkin_span.zipkin_attrs.trace_id event_dict['parent'] = g._zipkin_span.zipkin_attrs.parent_span_id return event_dict def parse_exception(_, __, event_dict): exception = event_dict.get('exception') if exception: event_dict['exception'] = exception.replace("\"", "'").split("\n") return event_dict # setup file logging renderer_processor = JSONRenderer(indent=indent) processors = [zipkin_ids, add_log_level, filter_by_level, add_service, format_exc_info, TimeStamper(fmt=logger_date_format, utc=True, key='created_at'), parse_exception, renderer_processor] configure(context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors, cache_logger_on_first_use=True)
def logger(name=__name__, uuid=False, timestamp=False): ''' Configure and return a new logger for hivy modules ''' processors = [JSONRenderer()] if uuid: processors.append(add_unique_id) if uuid: processors.append(add_timestamp) return wrap_logger( logbook.Logger(name), processors=processors )
def logger_initial_config(log_level="INFO", logger_format="%(message)s", logger_date_format="%Y-%m-%dT%H:%M%s"): def add_service(logger, method_name, event_dict): """ Add the service name to the event dict. This adds `service: 'ras-frontstage'` to all log lines. """ event_dict["service"] = "ras-frontstage" return event_dict logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format) auth_log = logging.getLogger(__name__) auth_log.addHandler(logging.NullHandler()) auth_log.propagate = False def add_severity_level(logger, method_name, event_dict): """ Add the log level to the event dict. """ if method_name == "warn": # The stdlib has an alias method_name = "warning" event_dict["severity"] = method_name return event_dict def parse_exception(_, __, event_dict): exception = event_dict.get("exception") if exception: event_dict["exception"] = exception.replace('"', "'").split("\n") return event_dict # setup file logging renderer_processor = JSONRenderer(indent=None) processors = [ add_severity_level, add_log_level, filter_by_level, add_service, format_exc_info, TimeStamper(fmt=logger_date_format, utc=True, key="created_at"), parse_exception, renderer_processor, ] configure( context_class=wrap_dict(dict), logger_factory=LoggerFactory(), processors=processors, cache_logger_on_first_use=True, )
def configure(): """Configure Balanced logging system """ structlog.configure( processors=[ format_exc_info, LogProcessor(), JSONRenderer(), ], logger_factory=LoggerFactory(), )
def _init_logging(self): log_file_path = os.path.join( self.config.get('ADMIN', 'log_file_location'), 'telegram-bot.log') logging.basicConfig(level=logging.INFO, format='%(message)s', filename=log_file_path) logger = logging.getLogger(__name__) self.logger = wrap_logger( logger, processors=[TimeStamper(), format_exc_info, JSONRenderer()], script="telegram_bot")
def test_rapidjson(self, event_dict): """ Integration test with python-rapidjson. """ jr = JSONRenderer(serializer=rapidjson.dumps) assert { "a": "<A(\\o/)>", "b": [3, 4], "x": 7, "y": "test", "z": [1, 2], } == json.loads(jr(None, None, event_dict))
def configure_logging(self): if self.app.testing: structlog.reset_defaults() disabled = [ "docker.utils.config", "docker.auth", "docker.api.build", "docker.api.swarm", "docker.api.image", "rq.worker", "werkzeug", "requests", "urllib3", ] for logger in disabled: log = logging.getLogger(logger) log.setLevel(logging.ERROR) log.disabled = True self.app.logger.disabled = True logging.basicConfig(level=self.log_level, stream=sys.stdout, format="%(message)s") chain = [ filter_by_level, add_log_level, add_logger_name, TimeStamper(fmt="iso"), StackInfoRenderer(), format_exc_info, JSONRenderer(indent=1, sort_keys=True), ] logger = logging.getLogger(__name__) if self.testing: chain = [] logger = structlog.ReturnLogger() log = structlog.wrap_logger( logger, processors=chain, context_class=dict, wrapper_class=structlog.stdlib.BoundLogger, # cache_logger_on_first_use=True, ) self.logger = log self.app.logger = self.logger
def setup_logging(args): global log logging.basicConfig(stream=sys.stdout, format="%(message)s") def add_timestamp(_, __, event_dict): event_dict["timestamp"] = datetime.datetime.utcnow() return event_dict log = structlog.wrap_logger(logging.getLogger(__name__), processors=[ add_timestamp, JSONRenderer(indent=1, sort_keys=True), ])
def logger_initial_config(service_name=None, log_level=None, logger_format=None, logger_date_format=None): if not logger_date_format: logger_date_format = os.getenv("LOGGING_DATE_FORMAT", "%Y-%m-%dT%H:%M%s") if not log_level: log_level = os.getenv("LOGGING_LEVEL") if not logger_format: logger_format = "%(message)s" try: indent = int(os.getenv("JSON_INDENT_LOGGING")) except TypeError: indent = None except ValueError: indent = None def add_service(logger, method_name, event_dict): """ Add the service name to the event dict. """ event_dict["service"] = service_name return event_dict def add_severity_level(logger, method_name, event_dict): """ Add the log level to the event dict. """ if method_name == "warn": # The stdlib has an alias method_name = "warning" event_dict["severity"] = method_name return event_dict logging.basicConfig(stream=sys.stdout, level=log_level, format=logger_format) configure(processors=[ add_severity_level, add_log_level, filter_by_level, add_service, format_exc_info, TimeStamper(fmt=logger_date_format, utc=True, key="created_at"), JSONRenderer(indent=indent), ])