示例#1
0
def test_log_multiple_metrics(capsys, metrics_same_name, dimensions,
                              namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(namespace=namespace)

    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush multiple metrics with the same name at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        for metric in metrics_same_name:
            my_metrics.add_metric(**metric)

    lambda_handler({}, {})
    output = capture_metrics_output(capsys)
    expected = serialize_metrics(metrics=metrics_same_name,
                                 dimensions=dimensions,
                                 namespace=namespace)

    # THEN we should have no exceptions
    # and a valid EMF object should be flushed correctly
    remove_timestamp(metrics=[output, expected])
    assert expected == output
def test_schema_no_metrics(service, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(service=service, namespace=namespace)

    # THEN it should fail validation and raise SchemaValidationError
    with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"):
        my_metrics.serialize_metric_set()
def test_emit_cold_start_metric_only_once(capsys, namespace, dimension,
                                          metric):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used with capture_cold_start_metric
    # and handler is called more than once
    @my_metrics.log_metrics(capture_cold_start_metric=True)
    def lambda_handler(evt, context):
        my_metrics.add_metric(**metric)
        my_metrics.add_dimension(**dimension)

    LambdaContext = namedtuple("LambdaContext", "function_name")
    lambda_handler({}, LambdaContext("example_fn"))
    capsys.readouterr().out.strip()

    # THEN ColdStart metric and function_name dimension should be logged
    # only once
    lambda_handler({}, LambdaContext("example_fn"))

    output = json.loads(capsys.readouterr().out.strip())

    assert "ColdStart" not in output

    assert "function_name" not in output
def test_log_metrics_with_renamed_service(capsys, metrics, metric):
    # GIVEN Metrics is initialized with service specified
    my_metrics = Metrics(service="test_service", namespace="test_application")
    for metric in metrics:
        my_metrics.add_metric(**metric)

    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        # WHEN we manually call add_dimension to change the value of the service dimension
        my_metrics.add_dimension(name="service", value="another_test_service")
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    lambda_handler({}, {})
    second_output = json.loads(capsys.readouterr().out.strip())

    remove_timestamp(metrics=[output])  # Timestamp will always be different

    # THEN we should have no exceptions and the dimensions should be set to the name provided in the
    # add_dimension call
    assert output["service"] == "another_test_service"
    assert second_output["service"] == "another_test_service"
def test_log_metrics_with_implicit_dimensions(capsys, metrics):
    # GIVEN Metrics is initialized with service specified
    my_metrics = Metrics(service="test_service", namespace="test_application")
    for metric in metrics:
        my_metrics.add_metric(**metric)

    # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())

    expected_dimensions = [{"name": "service", "value": "test_service"}]
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=expected_dimensions,
                                 namespace={"name": "test_application"})

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions and the dimensions should be set to the name provided in the
    # service passed to Metrics constructor
    assert expected == output
示例#6
0
def test_metrics_reuse_metric_set(metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(namespace=namespace)
    my_metrics.add_metric(**metric)

    # WHEN Metrics is initialized one more time
    my_metrics_2 = Metrics(namespace=namespace)

    # THEN Both class instances should have the same metric set
    assert my_metrics_2.metric_set == my_metrics.metric_set
def test_serialize_metric_set_metric_definition_multiple_values(
    metrics_same_name, dimension, namespace, service, metadata
):
    expected_metric_definition = {
        "metric_one": [1.0, 5.0],
        "_aws": {
            "Timestamp": 1592237875494,
            "CloudWatchMetrics": [
                {
                    "Namespace": "test_namespace",
                    "Dimensions": [["test_dimension", "service"]],
                    "Metrics": [{"Name": "metric_one", "Unit": "Count"}],
                }
            ],
        },
        "service": "test_service",
        "username": "******",
        "test_dimension": "test",
    }

    # GIVEN Metrics is initialized and multiple metrics are added with the same name
    my_metrics = Metrics(service=service, namespace=namespace)
    for metric in metrics_same_name:
        my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_metadata(**metadata)

    # WHEN metrics are serialized manually
    metric_definition_output = my_metrics.serialize_metric_set()

    # THEN we should emit a valid embedded metric definition object
    assert "Timestamp" in metric_definition_output["_aws"]
    remove_timestamp(metrics=[metric_definition_output, expected_metric_definition])
    assert metric_definition_output == expected_metric_definition
示例#8
0
def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace,
                           a_hundred_metrics):
    # GIVEN Metrics is initialized and we have over a hundred metrics to add
    my_metrics = Metrics(namespace=namespace)
    my_metrics.add_dimension(**dimension)

    # WHEN we add more than 100 metrics
    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    # THEN it should serialize and flush all metrics at the 100th
    # and clear all metrics and dimensions from memory
    output = capture_metrics_output(capsys)
    spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"]
    assert my_metrics.metric_set == {}
    assert len(spillover_metrics) == 100

    # GIVEN we add the 101th metric
    # WHEN we already had a Metric class instance
    # with an existing dimension set from the previous 100th metric batch
    my_metrics.add_metric(**metric)

    # THEN serializing the 101th metric should
    # create a new EMF object with a single metric in it (101th)
    # and contain the same dimension we previously added
    serialized_101th_metric = my_metrics.serialize_metric_set()
    expected_101th_metric = serialize_single_metric(metric=metric,
                                                    dimension=dimension,
                                                    namespace=namespace)
    remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric])
    assert serialized_101th_metric == expected_101th_metric
示例#9
0
def test_default_dimensions_across_instances(namespace):
    # GIVEN Metrics is initialized and we persist a set of default dimensions
    my_metrics = Metrics(namespace=namespace)
    my_metrics.set_default_dimensions(environment="test",
                                      log_group="/lambda/test")

    # WHEN a new Metrics instance is created
    same_metrics = Metrics()

    # THEN default dimensions should also be present
    assert "environment" in same_metrics.default_dimensions
def test_log_metrics_during_exception(capsys, metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to serialize metrics
    # but an error has been raised during handler execution
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric,
                                       dimension=dimension,
                                       namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different
    # THEN we should log metrics and propagate the exception up
    assert expected["_aws"] == output["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=dimensions,
                                 namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions
    # and a valid EMF object should've been flushed correctly
    assert expected["_aws"] == output["_aws"]
    for dimension in dimensions:
        assert dimension["name"] in output
def test_log_metrics_clear_metrics_after_invocation(metric, service, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(service=service, namespace=namespace)
    my_metrics.add_metric(**metric)

    # WHEN log_metrics is used to flush metrics from memory
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        pass

    lambda_handler({}, {})

    # THEN metric set should be empty after function has been run
    assert my_metrics.metric_set == {}
示例#13
0
def test_metrics_large_operation_without_json_serialization_sla(namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(namespace=namespace)

    # WHEN we add and serialize 99 metrics
    with timing() as t:
        add_max_metrics_before_serialization(metrics_instance=my_metrics)
        my_metrics.serialize_metric_set()

    # THEN completion time should be below our validation SLA
    elapsed = t()
    if elapsed > METRICS_VALIDATION_SLA:
        pytest.fail(
            f"Metric validation should be below {METRICS_VALIDATION_SLA}s: {elapsed}"
        )
def test_service_env_var(monkeypatch, capsys, metric, namespace):
    # GIVEN we use POWERTOOLS_SERVICE_NAME
    monkeypatch.setenv("POWERTOOLS_SERVICE_NAME", "test_service")
    my_metrics = Metrics(namespace=namespace["name"])

    # WHEN creating a metric but don't explicitly
    # add a dimension
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})

    monkeypatch.delenv("POWERTOOLS_SERVICE_NAME")

    output = json.loads(capsys.readouterr().out.strip())
    expected_dimension = {"name": "service", "value": "test_service"}
    expected = serialize_single_metric(metric=metric,
                                       dimension=expected_dimension,
                                       namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN metrics should be logged using the implicitly created "service" dimension
    assert expected == output
示例#15
0
def test_metrics_large_operation_and_json_serialization_sla(namespace):
    # GIVEN Metrics is initialized with validation disabled
    my_metrics = Metrics(namespace=namespace)

    # WHEN we add and serialize 99 metrics
    with timing() as t:
        add_max_metrics_before_serialization(metrics_instance=my_metrics)
        metrics = my_metrics.serialize_metric_set()
        print(json.dumps(metrics, separators=(",", ":")))

    # THEN completion time should be below our serialization SLA
    elapsed = t()
    if elapsed > METRICS_SERIALIZATION_SLA:
        pytest.fail(
            f"Metric serialization should be below {METRICS_SERIALIZATION_SLA}s: {elapsed}"
        )
示例#16
0
def test_log_metrics_capture_cold_start_metric_separately(
        capsys, namespace, service, metric, dimension):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(service=service, namespace=namespace)

    # WHEN log_metrics is used with capture_cold_start_metric
    @my_metrics.log_metrics(capture_cold_start_metric=True)
    def lambda_handler(evt, context):
        my_metrics.add_metric(**metric)
        my_metrics.add_dimension(**dimension)

    LambdaContext = namedtuple("LambdaContext", "function_name")
    lambda_handler({}, LambdaContext("example_fn"))

    cold_start_blob, custom_metrics_blob = capture_metrics_output_multiple_emf_objects(
        capsys)

    # THEN ColdStart metric and function_name dimension should be logged
    # in a separate EMF blob than the application metrics
    assert cold_start_blob["ColdStart"] == [1.0]
    assert cold_start_blob["function_name"] == "example_fn"
    assert cold_start_blob["service"] == service

    # and that application metrics dimensions are not part of ColdStart EMF blob
    assert "test_dimension" not in cold_start_blob

    # THEN application metrics EMF blob should not have
    # ColdStart metric nor function_name dimension
    assert "function_name" not in custom_metrics_blob
    assert "ColdStart" not in custom_metrics_blob

    # and that application metrics are recorded as normal
    assert custom_metrics_blob["service"] == service
    assert custom_metrics_blob["single_metric"] == [float(metric["value"])]
    assert custom_metrics_blob["test_dimension"] == dimension["value"]
示例#17
0
def test_log_metrics_with_implicit_dimensions_called_twice(
        capsys, metric, namespace, service):
    # GIVEN Metrics is initialized with service specified
    my_metrics = Metrics(service=service, namespace=namespace)

    # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions,
    # and the lambda function is called more than once
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})
    output = capture_metrics_output(capsys)

    lambda_handler({}, {})
    second_output = capture_metrics_output(capsys)

    # THEN we should have no exceptions and the dimensions should be set to the name provided in the
    # service passed to Metrics constructor
    assert output["service"] == "test_service"
    assert second_output["service"] == "test_service"

    for metric_record in output["_aws"]["CloudWatchMetrics"]:
        assert ["service"] in metric_record["Dimensions"]

    for metric_record in second_output["_aws"]["CloudWatchMetrics"]:
        assert ["service"] in metric_record["Dimensions"]
def test_log_metrics_with_implicit_dimensions(capsys, metric, namespace, service):
    # GIVEN Metrics is initialized with service specified
    my_metrics = Metrics(service=service, namespace=namespace)
    my_metrics.add_metric(**metric)

    # WHEN we utilize log_metrics to serialize and don't explicitly add any dimensions
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        pass

    lambda_handler({}, {})

    output = capture_metrics_output(capsys)

    # THEN we should have no exceptions and the dimensions should be set to the name provided in the
    # service passed to Metrics constructor
    assert service == output["service"]
def test_log_metrics_with_explicit_namespace(capsys, metric, service, namespace):
    # GIVEN Metrics is initialized with explicit namespace
    my_metrics = Metrics(service=service, namespace=namespace)
    my_metrics.add_metric(**metric)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        pass

    lambda_handler({}, {})

    output = capture_metrics_output(capsys)

    # THEN we should have no exceptions and the namespace should be set
    # using the service value passed to Metrics constructor
    assert namespace == output["_aws"]["CloudWatchMetrics"][0]["Namespace"]
def test_emit_cold_start_metric(capsys, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used with capture_cold_start_metric
    @my_metrics.log_metrics(capture_cold_start_metric=True)
    def lambda_handler(evt, context):
        return True

    LambdaContext = namedtuple("LambdaContext", "function_name")
    lambda_handler({}, LambdaContext("example_fn"))

    output = json.loads(capsys.readouterr().out.strip())

    # THEN ColdStart metric and function_name dimension should be logged
    assert output["ColdStart"] == 1
    assert output["function_name"] == "example_fn"
def test_schema_no_metrics(dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)

    # WHEN no metrics have been added
    # but a namespace and dimensions only
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # THEN it should fail validation and raise SchemaValidationError
    with pytest.raises(SchemaValidationError):
        my_metrics.serialize_metric_set()
def test_log_metrics_decorator_call_decorated_function(metric, namespace, service):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(service=service, namespace=namespace)

    # WHEN log_metrics is used to serialize metrics
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        return True

    # THEN log_metrics should invoke the function it decorates
    # and return no error if we have a namespace and dimension
    assert lambda_handler({}, {}) is True
def observability_init_middleware(handler: Callable,
                                  event: Dict[str, Any],
                                  context: LambdaContext,
                                  logger: Logger,
                                  metrics: Metrics,
                                  log_structure: Dict[str, str] = None,
                                  *args,
                                  **kwargs) -> Any:
    """
    An aws_lambda_powertools middleware function setting up standard logging
    and metrics handling for lambda functions.
    """
    if log_structure:
        logger.structure_logs(append=True, **log_structure)
    metrics.add_metadata(key='handler_name', value=handler.__module__)
    try:
        result = handler(event, context)
    except Exception as err:
        metrics.add_metric(name='function_failed', unit='Count', value=1)
        logger.error({
            'msg': 'Function failed',
            'error': str(err),
            'event': event
        })
        raise
    else:
        metrics.add_metric(name='function_succeeded', unit='Count', value=1)
    return result
def test_log_metrics_with_explicit_namespace(capsys, metrics, dimensions,
                                             namespace):
    # GIVEN Metrics is initialized with service specified
    my_metrics = Metrics(service="test_service", namespace=namespace["name"])
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())

    dimensions.append({"name": "service", "value": "test_service"})
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=dimensions,
                                 namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions and the namespace should be set to the name provided in the
    # service passed to Metrics constructor
    assert expected == output
def test_log_metrics_raise_on_empty_metrics(capsys, metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(service="test_service", namespace=namespace)

    # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics
    @my_metrics.log_metrics(raise_on_empty_metrics=True)
    def lambda_handler(evt, context):
        pass

    # THEN the raised exception should be SchemaValidationError
    # and specifically about the lack of Metrics
    with pytest.raises(SchemaValidationError, match=".*Metrics must contain at least 1 items"):
        lambda_handler({}, {})
def test_log_no_metrics_error_propagation(capsys, metric, dimension,
                                          namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    @my_metrics.log_metrics(raise_on_empty_metrics=True)
    def lambda_handler(evt, context):
        # WHEN log_metrics is used with raise_on_empty_metrics param and has no metrics
        # and the function decorated also raised an exception
        raise ValueError("Bubble up")

    # THEN the raised exception should be
    with pytest.raises(SchemaValidationError):
        lambda_handler({}, {})
def test_log_metrics_decorator_no_metrics(dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics(namespace=namespace["name"], service="test_service")

    # WHEN using the log_metrics decorator and no metrics have been added
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        pass

    # THEN it should raise a warning instead of throwing an exception
    with warnings.catch_warnings(record=True) as w:
        lambda_handler({}, {})
        assert len(w) == 1
        assert str(w[-1].message) == "No metrics to publish, skipping"
示例#28
0
def test_log_persist_default_dimensions(capsys, metrics, dimensions,
                                        namespace):
    # GIVEN Metrics is initialized and we persist a set of default dimensions
    my_metrics = Metrics(namespace=namespace)
    my_metrics.set_default_dimensions(environment="test",
                                      log_group="/lambda/test")

    # WHEN we utilize log_metrics to serialize
    # and flush metrics and clear all metrics and dimensions from memory
    # at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        for metric in metrics:
            my_metrics.add_metric(**metric)

    lambda_handler({}, {})
    first_invocation = capture_metrics_output(capsys)

    lambda_handler({}, {})
    second_invocation = capture_metrics_output(capsys)

    # THEN we should have default dimensions in both outputs
    assert "environment" in first_invocation
    assert "environment" in second_invocation
def test_log_metrics_should_invoke_function(metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    # WHEN log_metrics is used to serialize metrics
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        my_metrics.add_namespace(**namespace)
        my_metrics.add_metric(**metric)
        my_metrics.add_dimension(**dimension)
        return True

    # THEN log_metrics should invoke the function it decorates
    # and return no error if we have a metric, namespace, and a dimension
    lambda_handler({}, {})
def test_log_metrics_clear_metrics_after_invocation(metric, dimension,
                                                    namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to flush metrics from memory
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        return True

    lambda_handler({}, {})

    # THEN metric set should be empty after function has been run
    assert my_metrics.metric_set == {}