def test_metrics_spillover(capsys, metric, dimension, namespace, a_hundred_metrics):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    my_metrics.add_dimension(**dimension)

    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})

    output = capsys.readouterr().out.strip()
    spillover_metrics, single_metric = output.split("\n")
    spillover_metrics = json.loads(spillover_metrics)
    single_metric = json.loads(single_metric)

    expected_single_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)
    expected_spillover_metrics = serialize_metrics(
        metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace
    )

    remove_timestamp(metrics=[spillover_metrics, expected_spillover_metrics, single_metric, expected_single_metric])

    assert single_metric["_aws"] == expected_single_metric["_aws"]
    assert spillover_metrics["_aws"] == expected_spillover_metrics["_aws"]
def test_schema_no_metrics(dimensions, namespace):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)
    with pytest.raises(SchemaValidationError):
        my_metrics.serialize_metric_set()
def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace,
                           a_hundred_metrics):
    # GIVEN Metrics is initialized and we have over a hundred metrics to add
    my_metrics = Metrics()
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN we add more than 100 metrics
    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    # THEN it should serialize and flush all metrics at the 100th
    # and clear all metrics and dimensions from memory
    output = json.loads(capsys.readouterr().out.strip())
    spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"]
    assert my_metrics.metric_set == {}
    assert len(spillover_metrics) == 100

    # GIVEN we add the 101th metric
    # WHEN we already had a Metric class instance
    # with an existing dimension set from the previous 100th metric batch
    my_metrics.add_metric(**metric)

    # THEN serializing the 101th metric should
    # create a new EMF object with a single metric in it (101th)
    # and contain have the same dimension we previously added
    serialized_101th_metric = my_metrics.serialize_metric_set()
    expected_101th_metric = serialize_single_metric(metric=metric,
                                                    dimension=dimension,
                                                    namespace=namespace)
    remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric])

    assert serialized_101th_metric["_aws"] == expected_101th_metric["_aws"]
def test_log_metrics_during_exception(capsys, metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to serialize metrics
    # but an error has been raised during handler execution
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric,
                                       dimension=dimension,
                                       namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different
    # THEN we should log metrics and propagate the exception up
    assert expected["_aws"] == output["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=dimensions,
                                 namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions
    # and a valid EMF object should've been flushed correctly
    assert expected["_aws"] == output["_aws"]
    for dimension in dimensions:
        assert dimension["name"] in output
def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict:
    """ Helper function to build EMF object from a list of metrics, dimensions """
    my_metrics = Metrics()
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)

    if len(metrics) != 100:
        return my_metrics.serialize_metric_set()
def test_schema_no_metrics(dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)

    # WHEN no metrics have been added
    # but a namespace and dimensions only
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # THEN it should fail validation and raise SchemaValidationError
    with pytest.raises(SchemaValidationError):
        my_metrics.serialize_metric_set()
def test_multiple_metrics(metrics, dimensions, namespace):
    my_metrics = Metrics()
    for metric in metrics:
        my_metrics.add_metric(**metric)

    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    output = my_metrics.serialize_metric_set()
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        return True

    lambda_handler({}, {})
    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_log_metrics_clear_metrics_after_invocation(metric, dimension,
                                                    namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to flush metrics from memory
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        return True

    lambda_handler({}, {})

    # THEN metric set should be empty after function has been run
    assert my_metrics.metric_set == {}
def test_log_metrics_error_propagation(capsys, metric, dimension, namespace):
    # GIVEN Metrics are serialized after handler execution
    # WHEN If an error occurs and metrics have been added
    # THEN we should log metrics and propagate exception up
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
Example #12
0
import json

import requests

from aws_lambda_powertools.logging import logger_inject_lambda_context, logger_setup
from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.tracing import Tracer

tracer = Tracer()
logger = logger_setup()
metrics = Metrics()

_cold_start = True

metrics.add_dimension(name="operation", value="example")


@lambda_handler_decorator(trace_execution=True)
def my_middleware(handler, event, context, say_hello=False):
    if say_hello:
        print("========= HELLO PARAM DETECTED =========")
    print("========= Logging event before Handler is called =========")
    print(event)
    ret = handler(event, context)
    print("========= Logging response after Handler is called =========")
    print(ret)
    return ret


@metrics.log_metrics
from aws_lambda_powertools.metrics import Metrics, MetricUnit, single_metric
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.tracing import Tracer, aiohttp_trace_config

set_package_logger()  # Enable package diagnostics (DEBUG log)

# tracer = Tracer() # patches all available modules
tracer = Tracer(patch_modules=(
    "aioboto3", "boto3",
    "requests"))  # ~90-100ms faster in perf depending on set of libs
logger = Logger()
metrics = Metrics()

_cold_start = True

metrics.add_dimension(name="operation",
                      value="example")  # added at cold start only


async def aioboto_task():
    async with aioboto3.client("sts") as sts:
        account = await sts.get_caller_identity()
        return account


async def aiohttp_task():
    # You have full access to all xray_recorder methods via `tracer.provider`
    # these include thread-safe methods, all context managers, x-ray configuration etc.
    # see https://github.com/aws/aws-xray-sdk-python/issues/164
    async with tracer.provider.in_subsegment_async("## aiohttp escape hatch"):
        async with aiohttp.ClientSession(
                trace_configs=[aiohttp_trace_config()]) as session: