コード例 #1
0
def test_log_custom_std_log_attribute(stdout, service_name):
    # GIVEN a logger where we have a standard log attr process
    # https://docs.python.org/3/library/logging.html#logrecord-attributes
    logger = Logger(service=service_name, stream=stdout, process="%(process)d")

    # WHEN logging a message
    logger.info("foo")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN process key should be evaluated
    assert "%" not in log_dict["process"]
コード例 #2
0
def test_with_unserializable_value_in_message(stdout, service_name):
    logger = Logger(service=service_name, level="DEBUG", stream=stdout)

    class Unserializable:
        pass

    msg = {"x": Unserializable()}
    logger.debug(msg)

    log_dict = json.loads(stdout.getvalue())

    assert log_dict["message"]["x"].startswith("<")
コード例 #3
0
def test_log_dict_xray_is_not_present_when_tracing_is_disabled(
        stdout, monkeypatch, service_name):
    # GIVEN a logger is initialized within a Lambda function with X-Ray disabled (default)
    logger = Logger(service=service_name, stream=stdout)

    # WHEN logging a message
    logger.info("foo")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN `xray_trace_id`` key should not be present
    assert "xray_trace_id" not in log_dict
コード例 #4
0
def test_setup_service_env_var(monkeypatch, stdout):
    # GIVEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env
    # WHEN logger is setup
    # THEN service field should be equals POWERTOOLS_SERVICE_NAME value
    service_name = "payment"
    monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name)

    logger = Logger(stream=stdout)
    logger.info("Hello")
    log = json.loads(stdout.getvalue())

    assert service_name == log["service"]
コード例 #5
0
def test_setup_service_env_var(monkeypatch, stdout):
    service_name = "payment"
    # GIVEN Logger is initialized
    # WHEN service is explicitly defined via POWERTOOLS_SERVICE_NAME env
    monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", service_name)
    logger = Logger(stream=stdout)

    logger.info("Hello")

    # THEN service field should be equals POWERTOOLS_SERVICE_NAME value
    log = capture_logging_output(stdout)
    assert service_name == log["service"]
コード例 #6
0
def test_log_dict_key_seq(stdout, service_name):
    # GIVEN the default logger configuration
    logger = Logger(service=service_name, stream=stdout)

    # WHEN logging a message
    logger.info("Message")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN the beginning key sequence must be `level,location,message,timestamp`
    assert ",".join(list(
        log_dict.keys())[:4]) == "level,location,message,timestamp"
コード例 #7
0
def test_log_dict_key_custom_seq(stdout, service_name):
    # GIVEN a logger configuration with log_record_order set to ["message"]
    logger = Logger(service=service_name,
                    stream=stdout,
                    log_record_order=["message"])

    # WHEN logging a message
    logger.info("Message")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN the first key should be "message"
    assert list(log_dict.keys())[0] == "message"
def test_logging_exception_traceback(stdout):
    logger = Logger(level="DEBUG", stream=stdout)

    try:
        raise ValueError("Boom")
    except ValueError:
        logger.exception("A value error occurred")

    log_dict = json.loads(stdout.getvalue())

    check_log_dict(log_dict)
    assert "ERROR" == log_dict["level"]
    assert "exception" in log_dict
コード例 #9
0
def test_setup_sampling_rate_env_var(monkeypatch, stdout, service_name):
    # GIVEN Logger is initialized
    # WHEN samping rate is explicitly set to 100% via POWERTOOLS_LOGGER_SAMPLE_RATE env
    sampling_rate = "1"
    monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate)
    logger = Logger(service=service_name, stream=stdout)
    logger.debug("I am being sampled")

    # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value
    # log level should be DEBUG
    # and debug log statements should be in stdout
    log = capture_logging_output(stdout)
    assert sampling_rate == log["sampling_rate"]
    assert "DEBUG" == log["level"]
    assert "I am being sampled" == log["message"]
コード例 #10
0
def test_logger_do_not_log_twice_when_root_logger_is_setup(stdout):
    # GIVEN Lambda configures the root logger with a handler
    root_logger = logging.getLogger()
    root_logger.addHandler(logging.StreamHandler(stream=stdout))

    # WHEN we create a new Logger and child Logger
    logger = Logger(stream=stdout)
    child_logger = Logger(child=True, stream=stdout)
    logger.info("hello")
    child_logger.info("hello again")

    # THEN it should only contain only two log entries
    # since child's log records propagated to root logger should be rejected
    logs = list(stdout.getvalue().strip().split("\n"))
    assert len(logs) == 2
コード例 #11
0
def test_logging_exception_traceback(stdout):
    logger = Logger(level="DEBUG",
                    stream=stdout,
                    request_id="request id!",
                    another="value")

    try:
        raise Exception("Boom")
    except Exception:
        logger.exception("This is a test")

    log_dict = json.loads(stdout.getvalue())

    check_log_dict(log_dict)
    assert "exception" in log_dict
def test_with_unserializable_value_in_message_custom(stdout):
    class Unserializable:
        pass

    # GIVEN a custom json_default
    logger = Logger(level="DEBUG", stream=stdout, json_default=lambda o: f"<non-serializable: {type(o).__name__}>")

    # WHEN we log a message
    logger.debug({"x": Unserializable()})

    log_dict = json.loads(stdout.getvalue())

    # THEN json_default should not be in the log message and the custom unserializable handler should be used
    assert log_dict["message"]["x"] == "<non-serializable: Unserializable>"
    assert "json_default" not in log_dict
コード例 #13
0
def test_inject_lambda_context_with_structured_log(lambda_context, stdout, service_name):
    # GIVEN Logger is initialized
    logger = Logger(service=service_name, stream=stdout)

    # WHEN structure_logs has been used to add an additional key upfront
    # and a lambda function is decorated with logger.inject_lambda_context
    logger.structure_logs(append=True, additional_key="test")

    @logger.inject_lambda_context
    def handler(event, context):
        logger.info("Hello")

    handler({}, lambda_context)

    # THEN lambda contextual info should always be in the logs
    log = capture_logging_output(stdout)
    expected_logger_context_keys = (
        "function_name",
        "function_memory_size",
        "function_arn",
        "function_request_id",
        "additional_key",
    )
    for key in expected_logger_context_keys:
        assert key in log
コード例 #14
0
def test_log_custom_formatting(stdout, service_name):
    # GIVEN a logger where we have a custom `location`, 'datefmt' format
    logger = Logger(service=service_name,
                    stream=stdout,
                    location="[%(funcName)s] %(module)s",
                    datefmt="fake-datefmt")

    # WHEN logging a message
    logger.info("foo")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN the `location` and "timestamp" should match the formatting
    assert log_dict[
        "location"] == "[test_log_custom_formatting] test_aws_lambda_logging"
    assert log_dict["timestamp"] == "fake-datefmt"
コード例 #15
0
def test_setup_with_valid_log_levels(stdout, level):
    logger = Logger(level=level,
                    stream=stdout,
                    request_id="request id!",
                    another="value")
    msg = "This is a test"
    log_command = {
        "INFO": logger.info,
        "ERROR": logger.error,
        "WARNING": logger.warning,
        "DEBUG": logger.debug,
        "CRITICAL": logger.critical,
    }

    log_message = log_command[level]
    log_message(msg)

    log_dict = json.loads(stdout.getvalue().strip())

    check_log_dict(log_dict)

    assert level == log_dict["level"]
    assert "This is a test" == log_dict["message"]
    assert "request id!" == log_dict["request_id"]
    assert "exception" not in log_dict
コード例 #16
0
def test_logger_level_not_set(service_name):
    # GIVEN a Loggers is initialized
    # WHEN no log level was passed
    logger = Logger(service=service_name)

    # THEN we should default to INFO
    assert logger.level == logging.INFO
コード例 #17
0
def test_inject_lambda_context_log_event_request_env_var(
        monkeypatch, lambda_context, stdout):
    # GIVEN a lambda function is decorated with logger instructed to log event
    # via POWERTOOLS_LOGGER_LOG_EVENT env
    # WHEN logger is setup
    # THEN logger should log event received from Lambda
    lambda_event = {"greeting": "hello"}
    monkeypatch.setenv("POWERTOOLS_LOGGER_LOG_EVENT", "true")

    logger = Logger(stream=stdout)

    @logger.inject_lambda_context
    def handler(event, context):
        logger.info("Hello")

    handler(lambda_event, lambda_context)

    # Given that our string buffer has many log statements separated by newline \n
    # We need to clean it before we can assert on
    logs = [
        json.loads(line.strip()) for line in stdout.getvalue().split("\n")
        if line
    ]

    event = {}
    for log in logs:
        if "greeting" in log["message"]:
            event = log["message"]

    assert event == lambda_event
コード例 #18
0
def test_inject_lambda_context_log_no_request_by_default(
        monkeypatch, lambda_context, stdout):
    # GIVEN a lambda function is decorated with logger
    # WHEN logger is setup
    # THEN logger should not log event received by lambda handler
    lambda_event = {"greeting": "hello"}

    logger = Logger(stream=stdout)

    @logger.inject_lambda_context
    def handler(event, context):
        logger.info("Hello")

    handler(lambda_event, lambda_context)

    # Given that our string buffer has many log statements separated by newline \n
    # We need to clean it before we can assert on
    logs = [
        json.loads(line.strip()) for line in stdout.getvalue().split("\n")
        if line
    ]

    event = {}
    for log in logs:
        if "greeting" in log["message"]:
            event = log["message"]

    assert event != lambda_event
コード例 #19
0
def test_logger_level_as_int(service_name):
    # GIVEN a Loggers is initialized
    # WHEN log level is int
    logger = Logger(service=service_name, level=logging.INFO)

    # THEN we should be expected int (20, in this case)
    assert logger.level == logging.INFO
コード例 #20
0
def test_setup_sampling_rate(monkeypatch, stdout):
    # GIVEN samping rate is explicitly defined via POWERTOOLS_LOGGER_SAMPLE_RATE env
    # WHEN logger is setup
    # THEN sampling rate should be equals POWERTOOLS_LOGGER_SAMPLE_RATE value and should sample debug logs

    sampling_rate = "1"
    monkeypatch.setenv("POWERTOOLS_LOGGER_SAMPLE_RATE", sampling_rate)
    monkeypatch.setenv("LOG_LEVEL", "INFO")

    logger = Logger(stream=stdout)
    logger.debug("I am being sampled")
    log = json.loads(stdout.getvalue())

    assert sampling_rate == log["sampling_rate"]
    assert "DEBUG" == log["level"]
    assert "I am being sampled" == log["message"]
コード例 #21
0
def test_logger_level_case_insensitive(service_name):
    # GIVEN a Loggers is initialized
    # WHEN log level is set as "info" instead of "INFO"
    logger = Logger(service=service_name, level="info")

    # THEN we should correctly set log level as INFO
    assert logger.level == logging.INFO
def test_log_dict_xray_is_present_when_tracing_is_enabled(stdout, monkeypatch):
    # GIVEN a logger is initialized within a Lambda function with X-Ray enabled
    trace_id = "1-5759e988-bd862e3fe1be46a994272793"
    trace_header = f"Root={trace_id};Parent=53995c3f42cd8ad8;Sampled=1"
    monkeypatch.setenv(name="_X_AMZN_TRACE_ID", value=trace_header)
    logger = Logger(stream=stdout)

    # WHEN logging a message
    logger.info("foo")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN `xray_trace_id`` key should be present
    assert log_dict["xray_trace_id"] == trace_id

    monkeypatch.delenv(name="_X_AMZN_TRACE_ID")
コード例 #23
0
def test_log_dict_key_strip_nones(stdout, service_name):
    # GIVEN a logger confirmation where we set `location` and `timestamp` to None
    # Note: level and service cannot be suppressed
    logger = Logger(stream=stdout,
                    level=None,
                    location=None,
                    timestamp=None,
                    sampling_rate=None,
                    service=None)

    # WHEN logging a message
    logger.info("foo")

    log_dict: dict = json.loads(stdout.getvalue())

    # THEN the keys should only include `level`, `message`, `service`
    assert sorted(log_dict.keys()) == ["level", "message", "service"]
    assert log_dict["service"] == "service_undefined"
コード例 #24
0
def test_logger_log_twice_when_log_filter_isnt_present_and_root_logger_is_setup(monkeypatch, stdout, service_name):
    # GIVEN Lambda configures the root logger with a handler
    root_logger = logging.getLogger()
    root_logger.addHandler(logging.StreamHandler(stream=stdout))

    # WHEN we create a new Logger and child Logger
    # and log deduplication filter for child messages are disabled
    # see #262 for more details on why this is needed for Pytest Live Log feature
    monkeypatch.setenv(constants.LOGGER_LOG_DEDUPLICATION_ENV, "true")
    logger = Logger(service=service_name, stream=stdout)
    child_logger = Logger(service=service_name, child=True, stream=stdout)
    logger.info("PARENT")
    child_logger.info("CHILD")

    # THEN it should only contain only two log entries
    # since child's log records propagated to root logger should be rejected
    logs = list(stdout.getvalue().strip().split("\n"))
    assert len(logs) == 4
コード例 #25
0
def test_logger_level_env_var_as_int(monkeypatch, service_name):
    # GIVEN Logger is initialized
    # WHEN log level is explicitly defined via LOG_LEVEL env as int
    # THEN Logger should propagate ValueError
    # since env vars can only be string
    # and '50' is not a correct log level
    monkeypatch.setenv("LOG_LEVEL", 50)
    with pytest.raises(ValueError, match="Unknown level: '50'"):
        Logger(service=service_name)
コード例 #26
0
def test_logger_get_correlation_id(lambda_context, stdout, service_name):
    # GIVEN a logger with a correlation_id set
    logger = Logger(service=service_name, stream=stdout)
    logger.set_correlation_id("foo")

    # WHEN calling get_correlation_id
    correlation_id = logger.get_correlation_id()

    # THEN it should return the correlation_id
    assert "foo" == correlation_id
コード例 #27
0
def test_logger_children_propagate_changes(stdout, service_name):
    # GIVEN Loggers are initialized
    # create child logger before parent to mimick
    # importing logger from another module/file
    # as loggers are created in global scope
    child = Logger(stream=stdout, service=service_name, child=True)
    parent = Logger(stream=stdout, service=service_name)

    # WHEN a child Logger adds an additional key
    child.structure_logs(append=True, customer_id="value")

    # THEN child Logger changes should propagate to parent
    # and subsequent log statements should have the latest value
    parent.info("Hello parent")
    child.info("Hello child")

    parent_log, child_log = capture_multiple_logging_statements_output(stdout)
    assert "customer_id" in parent_log
    assert "customer_id" in child_log
    assert child.parent.name == service_name
コード例 #28
0
def test_logger_append_duplicated(stdout, service_name):
    # GIVEN Logger is initialized with request_id field
    logger = Logger(service=service_name, stream=stdout, request_id="value")

    # WHEN `request_id` is appended to the existing structured log
    # using a different value
    logger.structure_logs(append=True, request_id="new_value")
    logger.info("log")

    # THEN subsequent log statements should have the latest value
    log = capture_logging_output(stdout)
    assert "new_value" == log["request_id"]
コード例 #29
0
def test_logger_record_caller_location(stdout, service_name):
    # GIVEN Logger is initialized
    logger = Logger(service=service_name, stream=stdout)

    # WHEN log statement is run
    logger.info("log")

    # THEN 'location' field should have
    # the correct caller resolution
    caller_fn_name = inspect.currentframe().f_code.co_name
    log = capture_logging_output(stdout)
    assert caller_fn_name in log["location"]
コード例 #30
0
def test_log_dict_xray_is_updated_when_tracing_id_changes(
        stdout, monkeypatch, service_name):
    # GIVEN a logger is initialized within a Lambda function with X-Ray enabled
    trace_id = "1-5759e988-bd862e3fe1be46a994272793"
    trace_header = f"Root={trace_id};Parent=53995c3f42cd8ad8;Sampled=1"
    monkeypatch.setenv(name="_X_AMZN_TRACE_ID", value=trace_header)
    logger = Logger(service=service_name, stream=stdout)

    # WHEN logging a message
    logger.info("foo")

    # and Trace ID changes to mimick a new invocation
    trace_id_2 = "1-5759e988-bd862e3fe1be46a949393982437"
    trace_header_2 = f"Root={trace_id_2};Parent=53995c3f42cd8ad8;Sampled=1"
    monkeypatch.setenv(name="_X_AMZN_TRACE_ID", value=trace_header_2)

    logger.info("foo bar")

    log_dict, log_dict_2 = [
        json.loads(line.strip()) for line in stdout.getvalue().split("\n")
        if line
    ]

    # THEN `xray_trace_id`` key should be different in both invocations
    assert log_dict["xray_trace_id"] == trace_id
    assert log_dict_2["xray_trace_id"] == trace_id_2

    monkeypatch.delenv(name="_X_AMZN_TRACE_ID")