def test_metrics_spillover(capsys, metric, dimension, namespace, a_hundred_metrics):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    my_metrics.add_dimension(**dimension)

    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        my_metrics.add_metric(**metric)
        return True

    lambda_handler({}, {})

    output = capsys.readouterr().out.strip()
    spillover_metrics, single_metric = output.split("\n")
    spillover_metrics = json.loads(spillover_metrics)
    single_metric = json.loads(single_metric)

    expected_single_metric = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)
    expected_spillover_metrics = serialize_metrics(
        metrics=a_hundred_metrics, dimensions=[dimension], namespace=namespace
    )

    remove_timestamp(metrics=[spillover_metrics, expected_spillover_metrics, single_metric, expected_single_metric])

    assert single_metric["_aws"] == expected_single_metric["_aws"]
    assert spillover_metrics["_aws"] == expected_spillover_metrics["_aws"]
def test_log_metrics_during_exception(capsys, metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to serialize metrics
    # but an error has been raised during handler execution
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric,
                                       dimension=dimension,
                                       namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different
    # THEN we should log metrics and propagate the exception up
    assert expected["_aws"] == output["_aws"]
def test_metrics_spillover(monkeypatch, capsys, metric, dimension, namespace,
                           a_hundred_metrics):
    # GIVEN Metrics is initialized and we have over a hundred metrics to add
    my_metrics = Metrics()
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN we add more than 100 metrics
    for _metric in a_hundred_metrics:
        my_metrics.add_metric(**_metric)

    # THEN it should serialize and flush all metrics at the 100th
    # and clear all metrics and dimensions from memory
    output = json.loads(capsys.readouterr().out.strip())
    spillover_metrics = output["_aws"]["CloudWatchMetrics"][0]["Metrics"]
    assert my_metrics.metric_set == {}
    assert len(spillover_metrics) == 100

    # GIVEN we add the 101th metric
    # WHEN we already had a Metric class instance
    # with an existing dimension set from the previous 100th metric batch
    my_metrics.add_metric(**metric)

    # THEN serializing the 101th metric should
    # create a new EMF object with a single metric in it (101th)
    # and contain have the same dimension we previously added
    serialized_101th_metric = my_metrics.serialize_metric_set()
    expected_101th_metric = serialize_single_metric(metric=metric,
                                                    dimension=dimension,
                                                    namespace=namespace)
    remove_timestamp(metrics=[serialized_101th_metric, expected_101th_metric])

    assert serialized_101th_metric["_aws"] == expected_101th_metric["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    # WHEN we utilize log_metrics to serialize
    # and flush all metrics at the end of a function execution
    @my_metrics.log_metrics
    def lambda_handler(evt, ctx):
        return True

    lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics,
                                 dimensions=dimensions,
                                 namespace=namespace)

    remove_timestamp(metrics=[output,
                              expected])  # Timestamp will always be different

    # THEN we should have no exceptions
    # and a valid EMF object should've been flushed correctly
    assert expected["_aws"] == output["_aws"]
    for dimension in dimensions:
        assert dimension["name"] in output
def test_metrics_reuse_metric_set(metric, dimension, namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()
    my_metrics.add_metric(**metric)

    # WHEN Metrics is initialized one more time
    my_metrics_2 = Metrics()

    # THEN Both class instances should have the same metric set
    assert my_metrics_2.metric_set == my_metrics.metric_set
def serialize_metrics(metrics: List[Dict], dimensions: List[Dict], namespace: Dict) -> Dict:
    """ Helper function to build EMF object from a list of metrics, dimensions """
    my_metrics = Metrics()
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)

    if len(metrics) != 100:
        return my_metrics.serialize_metric_set()
def test_multiple_metrics(metrics, dimensions, namespace):
    my_metrics = Metrics()
    for metric in metrics:
        my_metrics.add_metric(**metric)

    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    my_metrics.add_namespace(**namespace)
    output = my_metrics.serialize_metric_set()
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_log_metrics(capsys, metrics, dimensions, namespace):
    my_metrics = Metrics()
    my_metrics.add_namespace(**namespace)
    for metric in metrics:
        my_metrics.add_metric(**metric)
    for dimension in dimensions:
        my_metrics.add_dimension(**dimension)

    @my_metrics.log_metrics
    def lambda_handler(evt, handler):
        return True

    lambda_handler({}, {})
    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_metrics(metrics=metrics, dimensions=dimensions, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]
def test_log_metrics_clear_metrics_after_invocation(metric, dimension,
                                                    namespace):
    # GIVEN Metrics is initialized
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    # WHEN log_metrics is used to flush metrics from memory
    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        return True

    lambda_handler({}, {})

    # THEN metric set should be empty after function has been run
    assert my_metrics.metric_set == {}
def test_log_metrics_error_propagation(capsys, metric, dimension, namespace):
    # GIVEN Metrics are serialized after handler execution
    # WHEN If an error occurs and metrics have been added
    # THEN we should log metrics and propagate exception up
    my_metrics = Metrics()

    my_metrics.add_metric(**metric)
    my_metrics.add_dimension(**dimension)
    my_metrics.add_namespace(**namespace)

    @my_metrics.log_metrics
    def lambda_handler(evt, context):
        raise ValueError("Bubble up")

    with pytest.raises(ValueError):
        lambda_handler({}, {})

    output = json.loads(capsys.readouterr().out.strip())
    expected = serialize_single_metric(metric=metric, dimension=dimension, namespace=namespace)

    remove_timestamp(metrics=[output, expected])  # Timestamp will always be different
    assert expected["_aws"] == output["_aws"]