Esempio n. 1
0
 def __init__(
         self,
         stateful=True,
         resource: Resource = Resource.create_empty(),
 ):
     self.stateful = stateful
     self.resource = resource
Esempio n. 2
0
    def test_extract_resources(self):
        exporter = CloudMonitoringMetricsExporter(project_id=self.project_id)

        self.assertIsNone(
            exporter._get_monitored_resource(Resource.create_empty())
        )
        resource = Resource(
            labels={
                "cloud.account.id": 123,
                "host.id": "host",
                "cloud.zone": "US",
                "cloud.provider": "gcp",
                "extra_info": "extra",
                "gcp.resource_type": "gce_instance",
                "not_gcp_resource": "value",
            }
        )
        expected_extract = MonitoredResource(
            type="gce_instance",
            labels={"project_id": "123", "instance_id": "host", "zone": "US"},
        )
        self.assertEqual(
            exporter._get_monitored_resource(resource), expected_extract
        )

        resource = Resource(
            labels={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "gcp.resource_type": "gce_instance",
                "cloud.provider": "gcp",
            }
        )
        # Should throw when passed a malformed GCP resource dict
        self.assertRaises(KeyError, exporter._get_monitored_resource, resource)

        resource = Resource(
            labels={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "gcp.resource_type": "unsupported_gcp_resource",
                "cloud.provider": "gcp",
            }
        )
        self.assertIsNone(exporter._get_monitored_resource(resource))

        resource = Resource(
            labels={
                "cloud.account.id": "123",
                "host.id": "host",
                "extra_info": "extra",
                "not_gcp_resource": "value",
                "cloud.provider": "aws",
            }
        )
        self.assertIsNone(exporter._get_monitored_resource(resource))
 def test_get_value_observer_metric_descriptor(self):
     client = mock.Mock()
     exporter = CloudMonitoringMetricsExporter(project_id=self.project_id,
                                               client=client)
     exporter.project_name = self.project_name
     record = ExportRecord(
         MockMetric(),
         (),
         ValueObserverAggregator(),
         Resource.create_empty(),
     )
     exporter._get_metric_descriptor(record)
     client.create_metric_descriptor.assert_called_with(
         self.project_name,
         MetricDescriptor(
             **{
                 "name": None,
                 "type": "custom.googleapis.com/OpenTelemetry/name",
                 "display_name": "name",
                 "description": "description",
                 "labels": [],
                 "metric_kind": "GAUGE",
                 "value_type": "INT64",
             }),
     )
    def test_unique_identifier(self):
        client = mock.Mock()
        exporter1 = CloudMonitoringMetricsExporter(
            project_id=self.project_id,
            client=client,
            add_unique_identifier=True,
        )
        exporter2 = CloudMonitoringMetricsExporter(
            project_id=self.project_id,
            client=client,
            add_unique_identifier=True,
        )
        exporter1.project_name = self.project_name
        exporter2.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY,
                                    value_type="STRING"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        sum_agg_one = SumAggregator()
        sum_agg_one.update(1)
        metric_record = ExportRecord(MockMetric(), (), sum_agg_one,
                                     Resource.create_empty())
        exporter1.export([metric_record])
        exporter2.export([metric_record])

        (
            first_call,
            second_call,
        ) = client.create_metric_descriptor.call_args_list
        self.assertEqual(first_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY)
        self.assertEqual(second_call[0][1].labels[0].key,
                         UNIQUE_IDENTIFIER_KEY)

        first_call, second_call = client.create_time_series.call_args_list
        self.assertNotEqual(
            first_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
            second_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY],
        )
Esempio n. 5
0
 def __init__(
     self,
     instrumentation_info: "InstrumentationInfo",
     stateful: bool,
     resource: Resource = Resource.create_empty(),
 ):
     self.instrumentation_info = instrumentation_info
     self.metrics = set()
     self.observers = set()
     self.batcher = UngroupedBatcher(stateful)
     self.observers_lock = threading.Lock()
     self.resource = resource
 def __init__(
     self,
     sampler: sampling.Sampler = trace_api.sampling.ALWAYS_ON,
     resource: Resource = Resource.create_empty(),
     shutdown_on_exit: bool = True,
 ):
     self._active_span_processor = MultiSpanProcessor()
     self.resource = resource
     self.sampler = sampler
     self._atexit_handler = None
     if shutdown_on_exit:
         self._atexit_handler = atexit.register(self.shutdown)
Esempio n. 7
0
    def __init__(
        self,
        name: str,
        context: trace_api.SpanContext,
        parent: Optional[trace_api.SpanContext] = None,
        sampler: Optional[sampling.Sampler] = None,
        trace_config: None = None,  # TODO
        resource: Resource = Resource.create_empty(),
        attributes: types.Attributes = None,  # TODO
        events: Sequence[Event] = None,  # TODO
        links: Sequence[trace_api.Link] = (),
        kind: trace_api.SpanKind = trace_api.SpanKind.INTERNAL,
        span_processor: SpanProcessor = SpanProcessor(),
        instrumentation_info: InstrumentationInfo = None,
        set_status_on_exception: bool = True,
    ) -> None:

        self.name = name
        self.context = context
        self.parent = parent
        self.sampler = sampler
        self.trace_config = trace_config
        self.resource = resource
        self.kind = kind
        self._set_status_on_exception = set_status_on_exception

        self.span_processor = span_processor
        self.status = None
        self._lock = threading.Lock()

        self._filter_attribute_values(attributes)
        if not attributes:
            self.attributes = Span._empty_attributes
        else:
            self.attributes = BoundedDict.from_map(MAX_NUM_ATTRIBUTES,
                                                   attributes)

        if events is None:
            self.events = Span._empty_events
        else:
            self.events = BoundedList(MAX_NUM_EVENTS)
            for event in events:
                self._filter_attribute_values(event.attributes)
                self.events.append(event)

        if links is None:
            self.links = Span._empty_links
        else:
            self.links = BoundedList.from_seq(MAX_NUM_LINKS, links)

        self._end_time = None  # type: Optional[int]
        self._start_time = None  # type: Optional[int]
        self.instrumentation_info = instrumentation_info
Esempio n. 8
0
 def __init__(
     self,
     stateful=True,
     resource: Resource = Resource.create_empty(),
     shutdown_on_exit: bool = True,
 ):
     self.stateful = stateful
     self.resource = resource
     self._controllers = []
     self._exporters = set()
     self._atexit_handler = None
     if shutdown_on_exit:
         self._atexit_handler = atexit.register(self.shutdown)
 def __init__(
     self,
     sampler: sampling.Sampler = sampling.DEFAULT_ON,
     resource: Resource = Resource.create_empty(),
     shutdown_on_exit: bool = True,
     active_span_processor: Union[SynchronousMultiSpanProcessor,
                                  ConcurrentMultiSpanProcessor] = None,
 ):
     self._active_span_processor = (active_span_processor
                                    or SynchronousMultiSpanProcessor())
     self.resource = resource
     self.sampler = sampler
     self._atexit_handler = None
     if shutdown_on_exit:
         self._atexit_handler = atexit.register(self.shutdown)
    def test_export_value_observer(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name": None,
                "type": "custom.googleapis.com/OpenTelemetry/name",
                "display_name": "name",
                "description": "description",
                "labels": [],
                "metric_kind": "GAUGE",
                "value_type": "INT64",
            })

        aggregator = ValueObserverAggregator()
        aggregator.checkpoint = aggregator._TYPE(1, 2, 3, 4, 5)
        aggregator.last_update_timestamp = (WRITE_INTERVAL +
                                            1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (),
                aggregator,
                Resource.create_empty(),
            )
        ])

        series = TimeSeries()
        series.metric_kind = MetricDescriptor.MetricKind.GAUGE
        series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        point = series.points.add()
        point.value.int64_value = 5
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = WRITE_INTERVAL + 1
        point.interval.start_time.nanos = 0
        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series])])
    def test_get_metric_descriptor(self):
        client = mock.Mock()
        exporter = CloudMonitoringMetricsExporter(project_id=self.project_id,
                                                  client=client)
        exporter.project_name = self.project_name

        self.assertIsNone(
            exporter._get_metric_descriptor(
                ExportRecord(
                    MockMetric(),
                    (),
                    UnsupportedAggregator(),
                    Resource.create_empty(),
                )))

        record = ExportRecord(
            MockMetric(),
            (("label1", "value1"), ),
            SumAggregator(),
            Resource.create_empty(),
        )
        metric_descriptor = exporter._get_metric_descriptor(record)
        client.create_metric_descriptor.assert_called_with(
            self.project_name,
            MetricDescriptor(
                **{
                    "name": None,
                    "type": "custom.googleapis.com/OpenTelemetry/name",
                    "display_name": "name",
                    "description": "description",
                    "labels":
                    [LabelDescriptor(key="label1", value_type="STRING")],
                    "metric_kind": "CUMULATIVE",
                    "value_type": "INT64",
                }),
        )

        # Getting a cached metric descriptor shouldn't use another call
        cached_metric_descriptor = exporter._get_metric_descriptor(record)
        self.assertEqual(client.create_metric_descriptor.call_count, 1)
        self.assertEqual(metric_descriptor, cached_metric_descriptor)

        # Drop labels with values that aren't string, int or bool
        exporter._get_metric_descriptor(
            ExportRecord(
                MockMetric(name="name2", value_type=float),
                (
                    ("label1", "value1"),
                    ("label2", dict()),
                    ("label3", 3),
                    ("label4", False),
                ),
                SumAggregator(),
                Resource.create_empty(),
            ))
        client.create_metric_descriptor.assert_called_with(
            self.project_name,
            MetricDescriptor(
                **{
                    "name":
                    None,
                    "type":
                    "custom.googleapis.com/OpenTelemetry/name2",
                    "display_name":
                    "name2",
                    "description":
                    "description",
                    "labels": [
                        LabelDescriptor(key="label1", value_type="STRING"),
                        LabelDescriptor(key="label3", value_type="INT64"),
                        LabelDescriptor(key="label4", value_type="BOOL"),
                    ],
                    "metric_kind":
                    "CUMULATIVE",
                    "value_type":
                    "DOUBLE",
                }),
        )
 def test_extract_empty_resources(self):
     self.assertEqual(_extract_resources(Resource.create_empty()), {})
    def test_export(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        exporter.export([
            ExportRecord(
                MockMetric(),
                (("label1", "value1"), ),
                UnsupportedAggregator(),
                Resource.create_empty(),
            )
        ])
        client.create_time_series.assert_not_called()

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key="label1", value_type="STRING"),
                    LabelDescriptor(key="label2", value_type="INT64"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        resource = Resource(
            attributes={
                "cloud.account.id": 123,
                "host.id": "host",
                "cloud.zone": "US",
                "cloud.provider": "gcp",
                "extra_info": "extra",
                "gcp.resource_type": "gce_instance",
                "not_gcp_resource": "value",
            })

        sum_agg_one = SumAggregator()
        sum_agg_one.checkpoint = 1
        sum_agg_one.last_update_timestamp = (WRITE_INTERVAL +
                                             1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (
                    ("label1", "value1"),
                    ("label2", 1),
                ),
                sum_agg_one,
                resource,
            ),
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (
                    ("label1", "value2"),
                    ("label2", 2),
                ),
                sum_agg_one,
                resource,
            ),
        ])
        expected_resource = MonitoredResource(
            type="gce_instance",
            labels={
                "project_id": "123",
                "instance_id": "host",
                "zone": "US"
            },
        )

        series1 = TimeSeries(resource=expected_resource)
        series1.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series1.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series1.metric.labels["label1"] = "value1"
        series1.metric.labels["label2"] = "1"
        point = series1.points.add()
        point.value.int64_value = 1
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        series2 = TimeSeries(resource=expected_resource)
        series2.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series2.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series2.metric.labels["label1"] = "value2"
        series2.metric.labels["label2"] = "2"
        point = series2.points.add()
        point.value.int64_value = 1
        point.interval.end_time.seconds = WRITE_INTERVAL + 1
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series1, series2])])

        # Attempting to export too soon after another export with the exact
        # same labels leads to it being dropped

        sum_agg_two = SumAggregator()
        sum_agg_two.checkpoint = 1
        sum_agg_two.last_update_timestamp = (WRITE_INTERVAL +
                                             2) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "value1"),
                    ("label2", 1),
                ),
                sum_agg_two,
                Resource.create_empty(),
            ),
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "value2"),
                    ("label2", 2),
                ),
                sum_agg_two,
                Resource.create_empty(),
            ),
        ])
        self.assertEqual(client.create_time_series.call_count, 1)

        # But exporting with different labels is fine
        sum_agg_two.checkpoint = 2
        exporter.export([
            ExportRecord(
                MockMetric(),
                (
                    ("label1", "changed_label"),
                    ("label2", 2),
                ),
                sum_agg_two,
                Resource.create_empty(),
            ),
        ])
        series3 = TimeSeries()
        series3.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series3.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        series3.metric.labels["label1"] = "changed_label"
        series3.metric.labels["label2"] = "2"
        point = series3.points.add()
        point.value.int64_value = 2
        point.interval.end_time.seconds = WRITE_INTERVAL + 2
        point.interval.end_time.nanos = 0
        point.interval.start_time.seconds = 1
        point.interval.start_time.nanos = 0

        client.create_time_series.assert_has_calls([
            mock.call(self.project_name, [series1, series2]),
            mock.call(self.project_name, [series3]),
        ])
Esempio n. 14
0
 def __init__(self, resource=Resource.create_empty(), stateful=True):
     self.resource = resource
     self.batcher = MockBatcher(stateful)
Esempio n. 15
0
 def __init__(
     self, resource: Resource = Resource.create_empty(),
 ):
     self.resource = resource
    def test_export_histogram(self):
        client = mock.Mock()

        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id, client=client)

        exporter.project_name = self.project_name

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name": None,
                "type": "custom.googleapis.com/OpenTelemetry/name",
                "display_name": "name",
                "description": "description",
                "labels": [],
                "metric_kind": "CUMULATIVE",
                "value_type": "DISTRIBUTION",
            })

        aggregator = HistogramAggregator(config={"bounds": [2, 4, 6]})
        aggregator.checkpoint = OrderedDict([(2, 1), (4, 2), (6, 4), (">", 3)])
        aggregator.last_update_timestamp = (WRITE_INTERVAL +
                                            1) * NANOS_PER_SECOND
        exporter.export([
            ExportRecord(
                MockMetric(meter=mock_meter()),
                (),
                aggregator,
                Resource.create_empty(),
            )
        ])

        series = TimeSeries()
        series.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE
        series.metric.type = "custom.googleapis.com/OpenTelemetry/name"
        point = {
            "interval": {
                "start_time": {
                    "seconds": 1
                },
                "end_time": {
                    "seconds": 11
                },
            },
            "value": {
                "distribution_value": {
                    "count": 10,
                    "bucket_options": {
                        "explicit_buckets": {
                            "bounds": [2.0, 4.0, 6.0]
                        }
                    },
                    "bucket_counts": [1, 2, 4, 3],
                }
            },
        }
        series.points.add(**point)
        client.create_time_series.assert_has_calls(
            [mock.call(self.project_name, [series])])
    def test_stateless_times(self):
        client = mock.Mock()
        with mock.patch(
                "opentelemetry.exporter.cloud_monitoring.time_ns",
                lambda: NANOS_PER_SECOND,
        ):
            exporter = CloudMonitoringMetricsExporter(
                project_id=self.project_id,
                client=client,
            )

        client.create_metric_descriptor.return_value = MetricDescriptor(
            **{
                "name":
                None,
                "type":
                "custom.googleapis.com/OpenTelemetry/name",
                "display_name":
                "name",
                "description":
                "description",
                "labels": [
                    LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY,
                                    value_type="STRING"),
                ],
                "metric_kind":
                "CUMULATIVE",
                "value_type":
                "DOUBLE",
            })

        agg = SumAggregator()
        agg.checkpoint = 1
        agg.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND

        metric_record = ExportRecord(MockMetric(stateful=False), (), agg,
                                     Resource.create_empty())

        exporter.export([metric_record])

        exports_1 = client.create_time_series.call_args_list[0]

        # verify the first metric started at exporter start time
        self.assertEqual(
            exports_1[0][1][0].points[0].interval.start_time.seconds, 1)
        self.assertEqual(
            exports_1[0][1][0].points[0].interval.start_time.nanos, 0)

        self.assertEqual(
            exports_1[0][1][0].points[0].interval.end_time.seconds,
            WRITE_INTERVAL + 1,
        )

        agg.last_update_timestamp = (WRITE_INTERVAL * 2 + 2) * NANOS_PER_SECOND

        metric_record = ExportRecord(MockMetric(stateful=False), (), agg,
                                     Resource.create_empty())

        exporter.export([metric_record])

        exports_2 = client.create_time_series.call_args_list[1]

        # 1ms ahead of end time of last export
        self.assertEqual(
            exports_2[0][1][0].points[0].interval.start_time.seconds,
            WRITE_INTERVAL + 1,
        )
        self.assertEqual(
            exports_2[0][1][0].points[0].interval.start_time.nanos, 1e6)

        self.assertEqual(
            exports_2[0][1][0].points[0].interval.end_time.seconds,
            WRITE_INTERVAL * 2 + 2,
        )
 def test_checkpoint_set_empty(self):
     processor = Processor(True, Resource.create_empty())
     records = processor.checkpoint_set()
     self.assertEqual(len(records), 0)