def test_metrics_reuse_metric_set(metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_metric(**metric)

    # WHEN Metrics is initialized one more time
    my_metrics_2 = Metrics()

    # THEN Both class instances should have the same metric set
    assert my_metrics_2.metric_set == my_metrics.metric_set
def test_log_metrics_during_exception(capsys, metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to serialize metrics
    # but an error has been raised during handler execution
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric,
                                       dimension=dimension,
                                       namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different
    # THEN we should log metrics and propagate the exception up
    assert expected["_aws"] == output["_aws"]
def test_schema_no_metrics(dimensions, namespace):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)
    with pytest.raises(SchemaValidationError):
        my_metrics.serialize_metric_set()
def test_metrics_spillover(capsys, metric, dimension, namespace, a_hundred_metrics):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    my_metrics.add_dimension(**dimension)

    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})

    output = capsys.readouterr().out.strip()
    spillover_metrics, single_metric = output.split("\n")
    spillover_metrics = json.loads(spillover_metrics)
    single_metric = json.loads(single_metric)

    expected_single_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)
    expected_spillover_metrics = serialize_metrics(
        metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace
    )

    remove_timestamp(metrics=[spillover_metrics, expected_spillover_metrics, single_metric, expected_single_metric])

    assert single_metric["_aws"] == expected_single_metric["_aws"]
    assert spillover_metrics["_aws"] == expected_spillover_metrics["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=dimensions,
                                 namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions
    # and a valid EMF object should've been flushed correctly
    assert expected["_aws"] == output["_aws"]
    for dimension in dimensions:
        assert dimension["name"] in output
def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict:
    """ Helper function to build EMF object from a list of metrics, dimensions """
    my_metrics = Metrics()
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)

    if len(metrics) != 100:
        return my_metrics.serialize_metric_set()
def test_multiple_metrics(metrics, dimensions, namespace):
    my_metrics = Metrics()
    for metric in metrics:
        my_metrics.add_metric(**metric)

    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    output = my_metrics.serialize_metric_set()
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_schema_no_metrics(dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)

    # WHEN no metrics have been added
    # but a namespace and dimensions only
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # THEN it should fail validation and raise SchemaValidationError
    with pytest.raises(SchemaValidationError):
        my_metrics.serialize_metric_set()
def test_log_no_metrics_error_propagation(capsys, metric, dimension, namespace):
    # GIVEN Metrics are serialized after handler execution
    # WHEN If an error occurs and no metrics have been added
    # THEN we should propagate exception up and raise SchemaValidationError
    my_metrics = Metrics()

    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(SchemaValidationError):
        lambda_handler({}, {})
def test_log_no_metrics_error_propagation(capsys, metric, dimension,
                                          namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        # WHEN log_metrics is used despite having no metrics
        # and the function decorated also raised an exception
        raise ValueError("Bubble up")

    # THEN we should first raise SchemaValidationError as the main exception
    with pytest.raises(SchemaValidationError):
        lambda_handler({}, {})
def test_log_metrics_should_invoke_function(metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    # WHEN log_metrics is used to serialize metrics
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        my_metrics.add_namespace(**namespace)
        my_metrics.add_metric(**metric)
        my_metrics.add_dimension(**dimension)
        return True

    # THEN log_metrics should invoke the function it decorates
    # and return no error if we have a metric, namespace, and a dimension
    lambda_handler({}, {})
def test_log_metrics_schema_error(metrics, dimensions, namespace):
    # It should error out because by default log_metrics doesn't invoke a function
    # so when decorator runs it'll raise an error while trying to serialize metrics
    my_metrics = Metrics()

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        my_metrics.add_namespace(namespace)
        for metric in metrics:
            my_metrics.add_metric(**metric)
        for dimension in dimensions:
            my_metrics.add_dimension(**dimension)
            return True

    with pytest.raises(SchemaValidationError):
        lambda_handler({}, {})
def test_log_metrics(capsys, metrics, dimensions, namespace):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        return True

    lambda_handler({}, {})
    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_log_metrics_clear_metrics_after_invocation(metric, dimension,
                                                    namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to flush metrics from memory
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        return True

    lambda_handler({}, {})

    # THEN metric set should be empty after function has been run
    assert my_metrics.metric_set == {}
def test_log_metrics_error_propagation(capsys, metric, dimension, namespace):
    # GIVEN Metrics are serialized after handler execution
    # WHEN If an error occurs and metrics have been added
    # THEN we should log metrics and propagate exception up
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
示例#16
0
import json

import requests

from aws_lambda_powertools.logging import logger_inject_lambda_context, logger_setup
from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.tracing import Tracer

tracer = Tracer()
logger = logger_setup()
metrics = Metrics()

_cold_start = True

metrics.add_dimension(name="operation", value="example")


@lambda_handler_decorator(trace_execution=True)
def my_middleware(handler, event, context, say_hello=False):
    if say_hello:
        print("========= HELLO PARAM DETECTED =========")
    print("========= Logging event before Handler is called =========")
    print(event)
    ret = handler(event, context)
    print("========= Logging response after Handler is called =========")
    print(ret)
    return ret


@metrics.log_metrics
def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace,
                           a_hundred_metrics):
    # GIVEN Metrics is initialized and we have over a hundred metrics to add
    my_metrics = Metrics()
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN we add more than 100 metrics
    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    # THEN it should serialize and flush all metrics at the 100th
    # and clear all metrics and dimensions from memory
    output = json.loads(capsys.readouterr().out.strip())
    spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"]
    assert my_metrics.metric_set == {}
    assert len(spillover_metrics) == 100

    # GIVEN we add the 101th metric
    # WHEN we already had a Metric class instance
    # with an existing dimension set from the previous 100th metric batch
    my_metrics.add_metric(**metric)

    # THEN serializing the 101th metric should
    # create a new EMF object with a single metric in it (101th)
    # and contain have the same dimension we previously added
    serialized_101th_metric = my_metrics.serialize_metric_set()
    expected_101th_metric = serialize_single_metric(metric=metric,
                                                    dimension=dimension,
                                                    namespace=namespace)
    remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric])

    assert serialized_101th_metric["_aws"] == expected_101th_metric["_aws"]
def reset_metric_set():
    metrics = Metrics()
    metrics.clear_metrics()
    yield
import requests

from aws_lambda_powertools.logging import Logger
from aws_lambda_powertools.logging.logger import set_package_logger
from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.tracing import Tracer, aiohttp_trace_config

set_package_logger()  # Enable package diagnostics (DEBUG log)

# tracer = Tracer() # patches all available modules
tracer = Tracer(patch_modules=(
    "aioboto3", "boto3",
    "requests"))  # ~90-100ms faster in perf depending on set of libs
logger = Logger()
metrics = Metrics()

_cold_start = True

metrics.add_dimension(name="operation",
                      value="example")  # added at cold start only


async def aioboto_task():
    async with aioboto3.client("sts") as sts:
        account = await sts.get_caller_identity()
        return account


async def aiohttp_task():
    # You have full access to all xray_recorder methods via `tracer.provider`