示例#1
0
def test_set_dimensions_overwrites_all_dimensions():
    # arrange
    context = MetricsContext()
    context.set_default_dimensions({fake.word(): fake.word})
    context.put_dimensions({fake.word(): fake.word})

    expected_dimensions = {fake.word(): fake.word}

    # act
    context.set_dimensions(expected_dimensions)

    # assert
    assert context.dimensions == expected_dimensions
示例#2
0
def test_can_create_context_with_no_arguments(mock_time):
    # arrange
    # act
    context = MetricsContext()

    # assert
    assert context.namespace == "aws-embedded-metrics"
    assert context.meta == {"Timestamp": mock_time}
    assert context.properties == {}
    assert context.dimensions == []
    assert context.default_dimensions == {}
示例#3
0
    async def flush(self) -> None:
        # resolve the environment and get the sink
        # MOST of the time this will run synchonrously
        # This only runs asynchronously if executing for the
        # first time in a non-lambda environment
        environment = await self.resolve_environment()

        self.__configureContextForEnvironment(environment)
        sink = environment.get_sink()

        # accept and reset the context
        sink.accept(self.context)
        self.context = MetricsContext.empty()
示例#4
0
    def serialize(context: MetricsContext) -> List[str]:
        dimension_keys = []
        dimensions_properties: Dict[str, str] = {}

        for dimension_set in context.get_dimensions():
            keys = list(dimension_set.keys())
            dimension_keys.append(keys[0:MAX_DIMENSIONS])
            dimensions_properties = {**dimensions_properties, **dimension_set}

        metric_pointers: List[Dict[str, str]] = []

        metric_definitions = {
            "Dimensions": dimension_keys,
            "Metrics": metric_pointers,
            "Namespace": context.namespace,
        }
        cloud_watch_metrics = [metric_definitions]

        event_batches: List[str] = []

        body: Dict[str, Any] = {
            **dimensions_properties,
            **context.properties,
            "_aws": {
                **context.meta, "CloudWatchMetrics": cloud_watch_metrics
            },
        }

        for metric_name, metric in context.metrics.items():

            if len(metric.values) == 1:
                body[metric_name] = metric.values[0]
            else:
                body[metric_name] = metric.values

            metric_pointers.append({"Name": metric_name, "Unit": metric.unit})

            should_serialize: bool = len(
                metric_pointers) == MAX_METRICS_PER_EVENT
            if should_serialize:
                event_batches.append(json.dumps(body))
                metric_pointers = []
                body["_aws"]["CloudWatchMetrics"][0][
                    "Metrics"] = metric_pointers

        if not event_batches or metric_pointers:
            event_batches.append(json.dumps(body))

        return event_batches
def test_can_create_context_with_no_arguments(mock_time):
    # reload the configuration module since it is loaded on
    # startup and cached
    reload(config)

    # arrange
    # act
    context = MetricsContext()

    # assert
    assert context.namespace == DEFAULT_NAMESPACE
    assert context.meta == {"Timestamp": mock_time}
    assert context.properties == {}
    assert context.dimensions == []
    assert context.default_dimensions == {}
def test_accept_writes_multiple_messages_to_stdout(mock_serializer, capfd):
    # arrange
    expected_messages = [fake.word() for _ in range(10)]
    mock_serializer.serialize.return_value = expected_messages
    sink = StdoutSink(serializer=mock_serializer)
    context = MetricsContext.empty()
    context.meta["Timestamp"] = 1

    # act
    sink.accept(context)

    # assert
    out, err = capfd.readouterr()
    assert len(out.split()) == len(expected_messages)
    assert out.split() == expected_messages
def test_accept_writes_to_stdout(capfd):
    # arrange
    sink = LambdaSink()
    context = MetricsContext.empty()
    context.meta["Timestamp"] = 1

    # act
    sink.accept(context)

    # assert
    out, err = capfd.readouterr()
    assert (
        out
        == '{"_aws": {"Timestamp": 1, "CloudWatchMetrics": [{"Dimensions": [], "Metrics": [], "Namespace": "aws-embedded-metrics"}]}}\n'
    )
def test_create_copy_with_context_does_not_repeat_dimensions():
    # arrange
    context = MetricsContext()
    expected_dimensions = {fake.word(): fake.word()}

    custom = {fake.word(): fake.word()}
    context.set_default_dimensions(expected_dimensions)
    context.put_dimensions(custom)

    new_context = context.create_copy_with_context()
    new_context.set_default_dimensions(expected_dimensions)
    new_context.put_dimensions(custom)

    # assert
    assert len(new_context.get_dimensions()) == 1
示例#9
0
def test_more_than_max_number_of_metrics(mock_get_socket_client):
    # arrange
    context = MetricsContext.empty()
    expected_metrics = 401
    expected_send_message_calls = 5
    for index in range(expected_metrics):
        context.put_metric(f"{index}", 1)

    mock_tcp_client = Mock()
    mock_get_socket_client.return_value = mock_tcp_client

    # act
    sink = AgentSink("")
    sink.accept(context)

    # assert
    assert expected_send_message_calls == mock_tcp_client.send_message.call_count
def test_get_dimensions_returns_only_custom_dimensions_if_default_dimensions_are_empty(
):
    # arrange
    context = MetricsContext()
    dimension_key = fake.word()
    dimension_value = fake.word()
    expected_dimensions = {dimension_key: dimension_value}

    context.set_default_dimensions({})
    context.put_dimensions(expected_dimensions)

    # act
    actual_dimensions = context.get_dimensions()

    # assert
    assert [expected_dimensions] == actual_dimensions
def test_accept_writes_to_stdout(capfd):
    # arrange
    reload(config)

    sink = StdoutSink()
    context = MetricsContext.empty()
    context.meta["Timestamp"] = 1
    context.put_metric("Dummy", 1)

    # act
    sink.accept(context)

    # assert
    out, err = capfd.readouterr()
    assert (
        out
        == '{"_aws": {"Timestamp": 1, "CloudWatchMetrics": [{"Dimensions": [], "Metrics": [{"Name": "Dummy", "Unit": "None"}], '
           '"Namespace": "aws-embedded-metrics"}]}, "Dummy": 1}\n'
    )
示例#12
0
    def serialize(context: MetricsContext) -> str:
        dimension_keys = []
        dimensions_properties: Dict[str, str] = {}

        for dimension_set in context.get_dimensions():
            keys = list(dimension_set.keys())
            dimension_keys.append(keys[0:MAX_DIMENSIONS])
            dimensions_properties = {**dimensions_properties, **dimension_set}

        metric_pointers: List[Dict[str, str]] = []

        metric_definitions = {
            "Dimensions": dimension_keys,
            "Metrics": metric_pointers,
            "Namespace": context.namespace,
        }
        cloud_watch_metrics = [metric_definitions]

        body: Dict[str, Any] = {
            **dimensions_properties,
            **context.properties,
            "_aws": {
                **context.meta, "CloudWatchMetrics": cloud_watch_metrics
            },
        }

        for metric_name, metric in context.metrics.items():

            if len(metric.values) == 1:
                body[metric_name] = metric.values[0]
            else:
                body[metric_name] = metric.values

            metric_pointers.append({"Name": metric_name, "Unit": metric.unit})

        return json.dumps(body)
def test_get_dimensions_returns_merged_custom_and_default_dimensions():
    # arrange
    context = MetricsContext()
    custom_dimension_key = fake.word()
    custom_dimension_value = fake.word()

    default_dimension_key = fake.word()
    default_dimension_value = fake.word()

    expected_dimensions = {
        default_dimension_key: default_dimension_value,
        custom_dimension_key: custom_dimension_value,
    }

    context.set_default_dimensions(
        {default_dimension_key: default_dimension_value})
    context.put_dimensions({custom_dimension_key: custom_dimension_value})

    # act
    actual_dimensions = context.get_dimensions()

    # assert
    assert [expected_dimensions] == actual_dimensions
def get_context():
    context = MetricsContext.empty()
    context.meta["Timestamp"] = 0
    return context
    def serialize(context: MetricsContext) -> List[str]:
        config = get_config()

        dimension_keys = []
        dimensions_properties: Dict[str, str] = {}

        for dimension_set in context.get_dimensions():
            keys = list(dimension_set.keys())
            dimension_keys.append(keys[0:MAX_DIMENSIONS])
            dimensions_properties = {**dimensions_properties, **dimension_set}

        def create_body() -> Dict[str, Any]:
            body: Dict[str, Any] = {
                **dimensions_properties,
                **context.properties,
            }
            if not config.disable_metric_extraction:
                body["_aws"] = {
                    **context.meta,
                    "CloudWatchMetrics": [
                        {
                            "Dimensions": dimension_keys,
                            "Metrics": [],
                            "Namespace": context.namespace,
                        },
                    ],
                }
            return body

        current_body: Dict[str, Any] = {}
        event_batches: List[str] = []
        num_metrics_in_current_body = 0

        # Track if any given metric has data remaining to be serialized
        remaining_data = True

        # Track batch number to know where to slice metric data
        i = 0

        while remaining_data:
            remaining_data = False
            current_body = create_body()

            for metric_name, metric in context.metrics.items():

                if len(metric.values) == 1:
                    current_body[metric_name] = metric.values[0]
                else:
                    # Slice metric data as each batch cannot contain more than
                    # MAX_DATAPOINTS_PER_METRIC entries for a given metric
                    start_index = i * MAX_DATAPOINTS_PER_METRIC
                    end_index = (i + 1) * MAX_DATAPOINTS_PER_METRIC
                    current_body[metric_name] = metric.values[
                        start_index:end_index]

                    # Make sure to consume remaining values if we sliced before the end
                    # of the metric value list
                    if len(metric.values) > end_index:
                        remaining_data = True

                if not config.disable_metric_extraction:
                    current_body["_aws"]["CloudWatchMetrics"][0][
                        "Metrics"].append({
                            "Name": metric_name,
                            "Unit": metric.unit
                        })
                num_metrics_in_current_body += 1

                if (num_metrics_in_current_body == MAX_METRICS_PER_EVENT):
                    event_batches.append(json.dumps(current_body))
                    current_body = create_body()
                    num_metrics_in_current_body = 0

            # iter over missing datapoints
            i += 1
            if not event_batches or num_metrics_in_current_body > 0:
                event_batches.append(json.dumps(current_body))

        return event_batches
def create_metrics_logger() -> MetricsLogger:
    context = MetricsContext.empty()
    logger = MetricsLogger(resolve_environment, context)
    return logger