def detect(self) -> "Resource":
        try:
            token = _get_token()
            identity_dict = json.loads(_get_identity(token))
            hostname = _get_host(token)

            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_EC2.value,
                ResourceAttributes.CLOUD_ACCOUNT_ID:
                identity_dict["accountId"],
                ResourceAttributes.CLOUD_REGION:
                identity_dict["region"],
                ResourceAttributes.CLOUD_AVAILABILITY_ZONE:
                identity_dict["availabilityZone"],
                ResourceAttributes.HOST_ID:
                identity_dict["instanceId"],
                ResourceAttributes.HOST_TYPE:
                identity_dict["instanceType"],
                ResourceAttributes.HOST_NAME:
                hostname,
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 2
0
    def detect(self) -> "Resource":
        try:
            cred_value = _get_k8s_cred_value()

            if not _is_eks(cred_value):
                raise RuntimeError(
                    "Could not confirm process is running on EKS."
                )

            cluster_name = _get_cluster_name(cred_value)
            container_id = _get_container_id()

            if not container_id and not cluster_name:
                raise RuntimeError(
                    "Neither cluster name nor container ID found on EKS process."
                )

            return Resource(
                {
                    ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
                    ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_EKS.value,
                    ResourceAttributes.K8S_CLUSTER_NAME: cluster_name,
                    ResourceAttributes.CONTAINER_ID: container_id,
                }
            )
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 3
0
    def detect(self) -> "Resource":
        try:
            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_LAMBDA.value,
                ResourceAttributes.CLOUD_REGION:
                environ["AWS_REGION"],
                ResourceAttributes.FAAS_NAME:
                environ["AWS_LAMBDA_FUNCTION_NAME"],
                ResourceAttributes.FAAS_VERSION:
                environ["AWS_LAMBDA_FUNCTION_VERSION"],
                ResourceAttributes.FAAS_INSTANCE:
                environ["AWS_LAMBDA_LOG_STREAM_NAME"],
                ResourceAttributes.FAAS_MAX_MEMORY:
                int(environ["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"]),
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 4
0
    def detect(self) -> "Resource":
        if os.name == "nt":
            conf_file_path = (
                "C:\\Program Files\\Amazon\\XRay\\environment.conf")
        else:
            conf_file_path = "/var/elasticbeanstalk/xray/environment.conf"

        try:
            with open(conf_file_path, encoding="utf-8") as conf_file:
                parsed_data = json.load(conf_file)

            return Resource({
                ResourceAttributes.CLOUD_PROVIDER:
                CloudProviderValues.AWS.value,
                ResourceAttributes.CLOUD_PLATFORM:
                CloudPlatformValues.AWS_ELASTIC_BEANSTALK.value,
                ResourceAttributes.SERVICE_NAME:
                CloudPlatformValues.AWS_ELASTIC_BEANSTALK.value,
                ResourceAttributes.SERVICE_INSTANCE_ID:
                parsed_data["deployment_id"],
                ResourceAttributes.SERVICE_NAMESPACE:
                parsed_data["environment_name"],
                ResourceAttributes.SERVICE_VERSION:
                parsed_data["version_label"],
            })
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
Esempio n. 5
0
 def test_get_value_observer_metric_descriptor(self):
     client = mock.Mock()
     exporter = CloudMonitoringMetricsExporter(project_id=self.project_id,
                                               client=client)
     exporter.project_name = self.project_name
     record = ExportRecord(
         MockMetric(),
         (),
         ValueObserverAggregator(),
         Resource.get_empty(),
     )
     exporter._get_metric_descriptor(record)
     client.create_metric_descriptor.assert_called_with(
         self.project_name,
         MetricDescriptor(
             **{
                 "name": None,
                 "type": "custom.googleapis.com/OpenTelemetry/name",
                 "display_name": "name",
                 "description": "description",
                 "labels": [],
                 "metric_kind": "GAUGE",
                 "value_type": "INT64",
             }),
     )
Esempio n. 6
0
    def test_extract_resources(self):
        exporter = CloudMonitoringMetricsExporter(project_id=self.project_id)

        self.assertIsNone(
            exporter._get_monitored_resource(Resource.get_empty()))
        resource = Resource(
            attributes={
                "cloud.account.id": 123,
                "host.id": "host",
                "cloud.zone": "US",
                "cloud.provider": "gcp",
                "extra_info": "extra",
                "gcp.resource_type": "gce_instance",
                "not_gcp_resource": "value",
            })
        expected_extract = MonitoredResource(
            type="gce_instance",
            labels={
                "project_id": "123",
                "instance_id": "host",
                "zone": "US"
            },
        )
        self.assertEqual(exporter._get_monitored_resource(resource),
                         expected_extract)

        resource = Resource(
            attributes={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "gcp.resource_type": "gce_instance",
                "cloud.provider": "gcp",
            })
        # Should throw when passed a malformed GCP resource dict
        self.assertRaises(KeyError, exporter._get_monitored_resource, resource)

        resource = Resource(
            attributes={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "gcp.resource_type": "unsupported_gcp_resource",
                "cloud.provider": "gcp",
            })
        self.assertIsNone(exporter._get_monitored_resource(resource))

        resource = Resource(
            attributes={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "cloud.provider": "aws",
            })
        self.assertIsNone(exporter._get_monitored_resource(resource))
Esempio n. 7
0
    def test_unique_identifier(self):
        client = mock.Mock()
        exporter1 = CloudMonitoringMetricsExporter(
            project_id=self.project_id,
            client=client,
            add_unique_identifier=True,
        )
        exporter2 = CloudMonitoringMetricsExporter(
            project_id=self.project_id,
            client=client,
            add_unique_identifier=True,
        )
        exporter1.project_name = self.project_name
        exporter2.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY,
                                    value_type="STRING"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        sum_agg_one = SumAggregator()
        sum_agg_one.update(1)
        metric_record = ExportRecord(MockMetric(), (), sum_agg_one,
                                     Resource.get_empty())
        exporter1.export([metric_record])
        exporter2.export([metric_record])

        (
            first_call,
            second_call,
        ) = client.create_metric_descriptor.call_args_list
        self.assertEqual(first_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY)
        self.assertEqual(second_call[0][1].labels[0].key,
                         UNIQUE_IDENTIFIER_KEY)

        first_call, second_call = client.create_time_series.call_args_list
        self.assertNotEqual(
            first_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
            second_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
        )
def _get_cluster_name():
    cred_value = _get_k8s_cred_value()
    if not _is_eks(cred_value):
        return Resource.get_empty()

    cluster_info = json.loads(_get_cluster_info(cred_value))
    cluster_name = ""
    try:
        cluster_name = cluster_info["data"]["cluster.name"]
    except KeyError as exception:
        logger.warning("Cannot get cluster name on EKS: %s", exception)

    return cluster_name
Esempio n. 9
0
    def test_export_value_observer(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name": None,
                "type": "custom.googleapis.com/OpenTelemetry/name",
                "display_name": "name",
                "description": "description",
                "labels": [],
                "metric_kind": "GAUGE",
                "value_type": "INT64",
            })

        aggregator = ValueObserverAggregator()
        aggregator.checkpoint = aggregator._TYPE(1, 2, 3, 4, 5)
        aggregator.last_update_timestamp = (WRITE_INTERVAL +
                                            1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (),
                aggregator,
                Resource.get_empty(),
            )
        ])

        series = TimeSeries()
        series.metric_kind = MetricDescriptor.MetricKind.GAUGE
        series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        point = series.points.add()
        point.value.int64_value = 5
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = WRITE_INTERVAL + 1
        point.interval.start_time.nanos = 0
        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series])])
Esempio n. 10
0
    def detect(self) -> "Resource":
        try:
            if not os.environ.get(
                "ECS_CONTAINER_METADATA_URI"
            ) and not os.environ.get("ECS_CONTAINER_METADATA_URI_V4"):
                raise RuntimeError(
                    "Missing ECS_CONTAINER_METADATA_URI therefore process is not on ECS."
                )

            container_id = ""
            try:
                with open(
                    "/proc/self/cgroup", encoding="utf8"
                ) as container_info_file:
                    for raw_line in container_info_file.readlines():
                        line = raw_line.strip()
                        # Subsequent IDs should be the same, exit if found one
                        if len(line) > _CONTAINER_ID_LENGTH:
                            container_id = line[-_CONTAINER_ID_LENGTH:]
                            break
            except FileNotFoundError as exception:
                logger.warning(
                    "Failed to get container ID on ECS: %s.", exception
                )

            return Resource(
                {
                    ResourceAttributes.CLOUD_PROVIDER: CloudProviderValues.AWS.value,
                    ResourceAttributes.CLOUD_PLATFORM: CloudPlatformValues.AWS_ECS.value,
                    ResourceAttributes.CONTAINER_NAME: socket.gethostname(),
                    ResourceAttributes.CONTAINER_ID: container_id,
                }
            )
        # pylint: disable=broad-except
        except Exception as exception:
            if self.raise_on_error:
                raise exception

            logger.warning("%s failed: %s", self.__class__.__name__, exception)
            return Resource.get_empty()
 def test_extract_empty_resources(self):
     self.assertEqual(_extract_resources(Resource.get_empty()), {})
Esempio n. 12
0
    def test_stateless_times(self):
        client = mock.Mock()
        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id,
                client=client,
            )

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY,
                                    value_type="STRING"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        agg = SumAggregator()
        agg.checkpoint = 1
        agg.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND

        metric_record = ExportRecord(MockMetric(stateful=False), (), agg,
                                     Resource.get_empty())

        exporter.export([metric_record])

        exports_1 = client.create_time_series.call_args_list[0]

        # verify the first metric started at exporter start time
        self.assertEqual(
            exports_1[0][1][0].points[0].interval.start_time.seconds, 1)
        self.assertEqual(
            exports_1[0][1][0].points[0].interval.start_time.nanos, 0)

        self.assertEqual(
            exports_1[0][1][0].points[0].interval.end_time.seconds,
            WRITE_INTERVAL + 1,
        )

        agg.last_update_timestamp = (WRITE_INTERVAL * 2 + 2) * NANOS_PER_SECOND

        metric_record = ExportRecord(MockMetric(stateful=False), (), agg,
                                     Resource.get_empty())

        exporter.export([metric_record])

        exports_2 = client.create_time_series.call_args_list[1]

        # 1ms ahead of end time of last export
        self.assertEqual(
            exports_2[0][1][0].points[0].interval.start_time.seconds,
            WRITE_INTERVAL + 1,
        )
        self.assertEqual(
            exports_2[0][1][0].points[0].interval.start_time.nanos, 1e6)

        self.assertEqual(
            exports_2[0][1][0].points[0].interval.end_time.seconds,
            WRITE_INTERVAL * 2 + 2,
        )
Esempio n. 13
0
    def test_export_histogram(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name": None,
                "type": "custom.googleapis.com/OpenTelemetry/name",
                "display_name": "name",
                "description": "description",
                "labels": [],
                "metric_kind": "CUMULATIVE",
                "value_type": "DISTRIBUTION",
            })

        aggregator = HistogramAggregator(config={"bounds": [2, 4, 6]})
        aggregator.checkpoint = OrderedDict([(2, 1), (4, 2), (6, 4), (">", 3)])
        aggregator.last_update_timestamp = (WRITE_INTERVAL +
                                            1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (),
                aggregator,
                Resource.get_empty(),
            )
        ])

        series = TimeSeries()
        series.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        point = {
            "interval": {
                "start_time": {
                    "seconds": 1
                },
                "end_time": {
                    "seconds": 11
                },
            },
            "value": {
                "distribution_value": {
                    "count": 10,
                    "bucket_options": {
                        "explicit_buckets": {
                            "bounds": [2.0, 4.0, 6.0]
                        }
                    },
                    "bucket_counts": [1, 2, 4, 3],
                }
            },
        }
        series.points.add(**point)
        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series])])
Esempio n. 14
0
    def test_export(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        exporter.export([
            ExportRecord(
                MockMetric(),
                (("label1", "value1"), ),
                UnsupportedAggregator(),
                Resource.get_empty(),
            )
        ])
        client.create_time_series.assert_not_called()

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key="label1", value_type="STRING"),
                    LabelDescriptor(key="label2", value_type="INT64"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        resource = Resource(
            attributes={
                "cloud.account.id": 123,
                "host.id": "host",
                "cloud.zone": "US",
                "cloud.provider": "gcp",
                "extra_info": "extra",
                "gcp.resource_type": "gce_instance",
                "not_gcp_resource": "value",
            })

        sum_agg_one = SumAggregator()
        sum_agg_one.checkpoint = 1
        sum_agg_one.last_update_timestamp = (WRITE_INTERVAL +
                                             1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (
                    ("label1", "value1"),
                    ("label2", 1),
                ),
                sum_agg_one,
                resource,
            ),
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (
                    ("label1", "value2"),
                    ("label2", 2),
                ),
                sum_agg_one,
                resource,
            ),
        ])
        expected_resource = MonitoredResource(
            type="gce_instance",
            labels={
                "project_id": "123",
                "instance_id": "host",
                "zone": "US"
            },
        )

        series1 = TimeSeries(resource=expected_resource)
        series1.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series1.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series1.metric.labels["label1"] = "value1"
        series1.metric.labels["label2"] = "1"
        point = series1.points.add()
        point.value.int64_value = 1
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        series2 = TimeSeries(resource=expected_resource)
        series2.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series2.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series2.metric.labels["label1"] = "value2"
        series2.metric.labels["label2"] = "2"
        point = series2.points.add()
        point.value.int64_value = 1
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series1, series2])])

        # Attempting to export too soon after another export with the exact
        # same labels leads to it being dropped

        sum_agg_two = SumAggregator()
        sum_agg_two.checkpoint = 1
        sum_agg_two.last_update_timestamp = (WRITE_INTERVAL +
                                             2) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "value1"),
                    ("label2", 1),
                ),
                sum_agg_two,
                Resource.get_empty(),
            ),
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "value2"),
                    ("label2", 2),
                ),
                sum_agg_two,
                Resource.get_empty(),
            ),
        ])
        self.assertEqual(client.create_time_series.call_count, 1)

        # But exporting with different labels is fine
        sum_agg_two.checkpoint = 2
        exporter.export([
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "changed_label"),
                    ("label2", 2),
                ),
                sum_agg_two,
                Resource.get_empty(),
            ),
        ])
        series3 = TimeSeries()
        series3.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series3.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series3.metric.labels["label1"] = "changed_label"
        series3.metric.labels["label2"] = "2"
        point = series3.points.add()
        point.value.int64_value = 2
        point.interval.end_time.seconds = WRITE_INTERVAL + 2
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        client.create_time_series.assert_has_calls([
            mock.call(self.project_name, [series1, series2]),
            mock.call(self.project_name, [series3]),
        ])
Esempio n. 15
0
    def test_get_metric_descriptor(self):
        client = mock.Mock()
        exporter = CloudMonitoringMetricsExporter(project_id=self.project_id,
                                                  client=client)
        exporter.project_name = self.project_name

        self.assertIsNone(
            exporter._get_metric_descriptor(
                ExportRecord(
                    MockMetric(),
                    (),
                    UnsupportedAggregator(),
                    Resource.get_empty(),
                )))

        record = ExportRecord(
            MockMetric(),
            (("label1", "value1"), ),
            SumAggregator(),
            Resource.get_empty(),
        )
        metric_descriptor = exporter._get_metric_descriptor(record)
        client.create_metric_descriptor.assert_called_with(
            self.project_name,
            MetricDescriptor(
                **{
                    "name": None,
                    "type": "custom.googleapis.com/OpenTelemetry/name",
                    "display_name": "name",
                    "description": "description",
                    "labels":
                    [LabelDescriptor(key="label1", value_type="STRING")],
                    "metric_kind": "CUMULATIVE",
                    "value_type": "INT64",
                }),
        )

        # Getting a cached metric descriptor shouldn't use another call
        cached_metric_descriptor = exporter._get_metric_descriptor(record)
        self.assertEqual(client.create_metric_descriptor.call_count, 1)
        self.assertEqual(metric_descriptor, cached_metric_descriptor)

        # Drop labels with values that aren't string, int or bool
        exporter._get_metric_descriptor(
            ExportRecord(
                MockMetric(name="name2", value_type=float),
                (
                    ("label1", "value1"),
                    ("label2", dict()),
                    ("label3", 3),
                    ("label4", False),
                ),
                SumAggregator(),
                Resource.get_empty(),
            ))
        client.create_metric_descriptor.assert_called_with(
            self.project_name,
            MetricDescriptor(
                **{
                    "name":
                    None,
                    "type":
                    "custom.googleapis.com/OpenTelemetry/name2",
                    "display_name":
                    "name2",
                    "description":
                    "description",
                    "labels": [
                        LabelDescriptor(key="label1", value_type="STRING"),
                        LabelDescriptor(key="label3", value_type="INT64"),
                        LabelDescriptor(key="label4", value_type="BOOL"),
                    ],
                    "metric_kind":
                    "CUMULATIVE",
                    "value_type":
                    "DOUBLE",
                }),
        )