def test_export_histogram(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client) exporter.project_name = self.project_name client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [], "metric_kind": "CUMULATIVE", "value_type": "DISTRIBUTION", }) aggregator = HistogramAggregator(config={"bounds": [2, 4, 6]}) aggregator.checkpoint = OrderedDict([(2, 1), (4, 2), (6, 4), (">", 3)]) aggregator.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND exporter.export( [MetricRecord( MockMetric(meter=MockMeter()), (), aggregator, )]) series = TimeSeries() series.metric.type = "custom.googleapis.com/OpenTelemetry/name" point = { "interval": { "start_time": { "seconds": 1 }, "end_time": { "seconds": 11 }, }, "value": { "distribution_value": { "count": 10, "bucket_options": { "explicit_buckets": { "bounds": [2.0, 4.0, 6.0] } }, "bucket_counts": [1, 2, 4, 3], } }, } series.points.add(**point) client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series])])
def test_get_value_observer_metric_descriptor(self): client = mock.Mock() exporter = CloudMonitoringMetricsExporter(project_id=self.project_id, client=client) exporter.project_name = self.project_name record = ExportRecord( MockMetric(), (), ValueObserverAggregator(), Resource.create_empty(), ) exporter._get_metric_descriptor(record) client.create_metric_descriptor.assert_called_with( self.project_name, MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [], "metric_kind": "GAUGE", "value_type": "INT64", }), )
def test_unique_identifier(self): client = mock.Mock() exporter1 = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client, add_unique_identifier=True, ) exporter2 = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client, add_unique_identifier=True, ) exporter1.project_name = self.project_name exporter2.project_name = self.project_name client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY, value_type="STRING"), ], "metric_kind": "CUMULATIVE", "value_type": "DOUBLE", }) sum_agg_one = SumAggregator() sum_agg_one.update(1) metric_record = MetricRecord( MockMetric(), (), sum_agg_one, ) exporter1.export([metric_record]) exporter2.export([metric_record]) ( first_call, second_call, ) = client.create_metric_descriptor.call_args_list self.assertEqual(first_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY) self.assertEqual(second_call[0][1].labels[0].key, UNIQUE_IDENTIFIER_KEY) first_call, second_call = client.create_time_series.call_args_list self.assertNotEqual( first_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY], second_call[0][1][0].metric.labels[UNIQUE_IDENTIFIER_KEY], )
def test_export_value_observer(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client) exporter.project_name = self.project_name client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [], "metric_kind": "GAUGE", "value_type": "INT64", }) aggregator = ValueObserverAggregator() aggregator.checkpoint = aggregator._TYPE(1, 2, 3, 4, 5) aggregator.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND exporter.export([ ExportRecord( MockMetric(meter=mock_meter()), (), aggregator, Resource.create_empty(), ) ]) series = TimeSeries() series.metric_kind = MetricDescriptor.MetricKind.GAUGE series.metric.type = "custom.googleapis.com/OpenTelemetry/name" point = series.points.add() point.value.int64_value = 5 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = WRITE_INTERVAL + 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series])])
def initialize_base_metrics_message( self, metric_name: str, labels: dict, metric_kind=MetricDescriptor.GAUGE, value_type=MetricDescriptor.INT64, unit=None, ) -> TimeSeries: """ creates an TimeSeries metrics object called metric_name and with labels :param metric_name: name to call custom metric. As in custom.googleapis.com/ + metric_name :param labels: metric labels to add :param metric_kind: the kind of measurement. It describes how the data is reported :param value_type: Type of metric value :param unit: The unit in which the metric value is reported. :return: ::google.cloud.monitoring_v3.types.TimeSeries:: """ metric_descriptor_values = { "metric_kind": metric_kind, "value_type": value_type, "type": f"custom.googleapis.com/{metric_name}", } if unit is not None: metric_descriptor_values["unit"] = unit self.metrics_client.create_metric_descriptor( name=self.monitoring_project_path, metric_descriptor=MetricDescriptor(**metric_descriptor_values), ) # if we send requests through metrics_client one after another, we receive unclear error 500, # probably due to google's requests throttling sleep(1) series = self.metrics_type(metric_kind=metric_kind, value_type=value_type) series.resource.type = "global" series.metric.type = f"custom.googleapis.com/{metric_name}" series.metric.labels.update(labels) return series
def _get_metric_descriptor( self, record: MetricRecord ) -> Optional[MetricDescriptor]: """ We can map Metric to MetricDescriptor using Metric.name or MetricDescriptor.type. We create the MetricDescriptor if it doesn't exist already and cache it. Note that recreating MetricDescriptors is a no-op if it already exists. :param record: :return: """ instrument = record.instrument descriptor_type = "custom.googleapis.com/OpenTelemetry/{}".format( instrument.name ) if descriptor_type in self._metric_descriptors: return self._metric_descriptors[descriptor_type] descriptor = { "name": None, "type": descriptor_type, "display_name": instrument.name, "description": instrument.description, "labels": [], } for key, value in record.labels: if isinstance(value, str): descriptor["labels"].append( LabelDescriptor(key=key, value_type="STRING") ) elif isinstance(value, bool): descriptor["labels"].append( LabelDescriptor(key=key, value_type="BOOL") ) elif isinstance(value, int): descriptor["labels"].append( LabelDescriptor(key=key, value_type="INT64") ) else: logger.warning( "Label value %s is not a string, bool or integer, ignoring it", value, ) if self.unique_identifier: descriptor["labels"].append( LabelDescriptor(key=UNIQUE_IDENTIFIER_KEY, value_type="STRING") ) # SumAggregator is best represented as a cumulative, but it can't be # represented that way if it can decrement. So we need to make sure # that the instrument is not an UpDownCounter if isinstance(record.aggregator, SumAggregator) and not isinstance( record.instrument, UpDownCounter ): descriptor["metric_kind"] = MetricDescriptor.MetricKind.CUMULATIVE elif isinstance(record.aggregator, ValueObserverAggregator): descriptor["metric_kind"] = MetricDescriptor.MetricKind.GAUGE elif isinstance(record.aggregator, HistogramAggregator): descriptor["metric_kind"] = MetricDescriptor.MetricKind.CUMULATIVE else: logger.warning( "Unsupported instrument/aggregator combo, types %s and %s, ignoring it", type(record.instrument).__name__, type(record.aggregator).__name__, ) return None if isinstance(record.aggregator, HistogramAggregator): descriptor["value_type"] = MetricDescriptor.ValueType.DISTRIBUTION elif instrument.value_type == int: descriptor["value_type"] = MetricDescriptor.ValueType.INT64 elif instrument.value_type == float: descriptor["value_type"] = MetricDescriptor.ValueType.DOUBLE proto_descriptor = MetricDescriptor(**descriptor) try: descriptor = self.client.create_metric_descriptor( self.project_name, proto_descriptor ) # pylint: disable=broad-except except Exception as ex: logger.error( "Failed to create metric descriptor %s", proto_descriptor, exc_info=ex, ) return None self._metric_descriptors[descriptor_type] = descriptor return descriptor
def test_stateless_times(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client, ) client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor( key=UNIQUE_IDENTIFIER_KEY, value_type="STRING" ), ], "metric_kind": "CUMULATIVE", "value_type": "DOUBLE", } ) agg = SumAggregator() agg.checkpoint = 1 agg.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND metric_record = MetricRecord(MockMetric(stateful=False), (), agg) exporter.export([metric_record]) exports_1 = client.create_time_series.call_args_list[0] # verify the first metric started at exporter start time self.assertEqual( exports_1[0][1][0].points[0].interval.start_time.seconds, 1 ) self.assertEqual( exports_1[0][1][0].points[0].interval.start_time.nanos, 0 ) self.assertEqual( exports_1[0][1][0].points[0].interval.end_time.seconds, WRITE_INTERVAL + 1, ) agg.last_update_timestamp = (WRITE_INTERVAL * 2 + 2) * NANOS_PER_SECOND metric_record = MetricRecord(MockMetric(stateful=False), (), agg) exporter.export([metric_record]) exports_2 = client.create_time_series.call_args_list[1] # 1ms ahead of end time of last export self.assertEqual( exports_2[0][1][0].points[0].interval.start_time.seconds, WRITE_INTERVAL + 1, ) self.assertEqual( exports_2[0][1][0].points[0].interval.start_time.nanos, 1e6 ) self.assertEqual( exports_2[0][1][0].points[0].interval.end_time.seconds, WRITE_INTERVAL * 2 + 2, )
def test_export(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"),), UnsupportedAggregator(), ) ] ) client.create_time_series.assert_not_called() client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING"), LabelDescriptor(key="label2", value_type="INT64"), ], "metric_kind": "CUMULATIVE", "value_type": "DOUBLE", } ) resource = Resource( labels={ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "extra_info": "extra", "gcp.resource_type": "gce_instance", "not_gcp_resource": "value", } ) sum_agg_one = SumAggregator() sum_agg_one.checkpoint = 1 sum_agg_one.last_update_timestamp = ( WRITE_INTERVAL + 1 ) * NANOS_PER_SECOND exporter.export( [ MetricRecord( MockMetric(meter=MockMeter(resource=resource)), (("label1", "value1"), ("label2", 1),), sum_agg_one, ), MetricRecord( MockMetric(meter=MockMeter(resource=resource)), (("label1", "value2"), ("label2", 2),), sum_agg_one, ), ] ) expected_resource = MonitoredResource( type="gce_instance", labels={"project_id": "123", "instance_id": "host", "zone": "US"}, ) series1 = TimeSeries(resource=expected_resource) series1.metric.type = "custom.googleapis.com/OpenTelemetry/name" series1.metric.labels["label1"] = "value1" series1.metric.labels["label2"] = "1" point = series1.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 series2 = TimeSeries(resource=expected_resource) series2.metric.type = "custom.googleapis.com/OpenTelemetry/name" series2.metric.labels["label1"] = "value2" series2.metric.labels["label2"] = "2" point = series2.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series1, series2])] ) # Attempting to export too soon after another export with the exact # same labels leads to it being dropped sum_agg_two = SumAggregator() sum_agg_two.checkpoint = 1 sum_agg_two.last_update_timestamp = ( WRITE_INTERVAL + 2 ) * NANOS_PER_SECOND exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), sum_agg_two, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), sum_agg_two, ), ] ) self.assertEqual(client.create_time_series.call_count, 1) # But exporting with different labels is fine sum_agg_two.checkpoint = 2 exporter.export( [ MetricRecord( MockMetric(), (("label1", "changed_label"), ("label2", 2),), sum_agg_two, ), ] ) series3 = TimeSeries() series3.metric.type = "custom.googleapis.com/OpenTelemetry/name" series3.metric.labels["label1"] = "changed_label" series3.metric.labels["label2"] = "2" point = series3.points.add() point.value.int64_value = 2 point.interval.end_time.seconds = WRITE_INTERVAL + 2 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [ mock.call(self.project_name, [series1, series2]), mock.call(self.project_name, [series3]), ] )
def test_get_metric_descriptor(self): client = mock.Mock() exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name self.assertIsNone( exporter._get_metric_descriptor( MetricRecord(MockMetric(), (), UnsupportedAggregator()) ) ) record = MetricRecord( MockMetric(), (("label1", "value1"),), SumAggregator(), ) metric_descriptor = exporter._get_metric_descriptor(record) client.create_metric_descriptor.assert_called_with( self.project_name, MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING") ], "metric_kind": "CUMULATIVE", "value_type": "INT64", } ), ) # Getting a cached metric descriptor shouldn't use another call cached_metric_descriptor = exporter._get_metric_descriptor(record) self.assertEqual(client.create_metric_descriptor.call_count, 1) self.assertEqual(metric_descriptor, cached_metric_descriptor) # Drop labels with values that aren't string, int or bool exporter._get_metric_descriptor( MetricRecord( MockMetric(name="name2", value_type=float), ( ("label1", "value1"), ("label2", dict()), ("label3", 3), ("label4", False), ), SumAggregator(), ) ) client.create_metric_descriptor.assert_called_with( self.project_name, MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name2", "display_name": "name2", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING"), LabelDescriptor(key="label3", value_type="INT64"), LabelDescriptor(key="label4", value_type="BOOL"), ], "metric_kind": "CUMULATIVE", "value_type": "DOUBLE", } ), )
def test_export(self): client = mock.Mock() exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"),), UnsupportedAggregator(), ) ] ) client.create_time_series.assert_not_called() client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING"), LabelDescriptor(key="label2", value_type="INT64"), ], "metric_kind": "GAUGE", "value_type": "DOUBLE", } ) counter_one = CounterAggregator() counter_one.checkpoint = 1 counter_one.last_update_timestamp = (WRITE_INTERVAL + 1) * 1e9 exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), counter_one, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), counter_one, ), ] ) series1 = TimeSeries() series1.metric.type = "custom.googleapis.com/OpenTelemetry/name" series1.metric.labels["label1"] = "value1" series1.metric.labels["label2"] = "1" point = series1.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 series2 = TimeSeries() series2.metric.type = "custom.googleapis.com/OpenTelemetry/name" series2.metric.labels["label1"] = "value2" series2.metric.labels["label2"] = "2" point = series2.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series1, series2])] ) # Attempting to export too soon after another export with the exact # same labels leads to it being dropped counter_two = CounterAggregator() counter_two.checkpoint = 1 counter_two.last_update_timestamp = (WRITE_INTERVAL + 2) * 1e9 exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), counter_two, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), counter_two, ), ] ) self.assertEqual(client.create_time_series.call_count, 1) # But exporting with different labels is fine counter_two.checkpoint = 2 exporter.export( [ MetricRecord( MockMetric(), (("label1", "changed_label"), ("label2", 2),), counter_two, ), ] ) series3 = TimeSeries() series3.metric.type = "custom.googleapis.com/OpenTelemetry/name" series3.metric.labels["label1"] = "changed_label" series3.metric.labels["label2"] = "2" point = series3.points.add() point.value.int64_value = 2 point.interval.end_time.seconds = WRITE_INTERVAL + 2 point.interval.end_time.nanos = 0 client.create_time_series.assert_has_calls( [ mock.call(self.project_name, [series1, series2]), mock.call(self.project_name, [series3]), ] )
def _get_metric_descriptor( self, record: MetricRecord) -> Optional[MetricDescriptor]: """ We can map Metric to MetricDescriptor using Metric.name or MetricDescriptor.type. We create the MetricDescriptor if it doesn't exist already and cache it. Note that recreating MetricDescriptors is a no-op if it already exists. :param record: :return: """ instrument = record.instrument descriptor_type = "custom.googleapis.com/OpenTelemetry/{}".format( instrument.name) if descriptor_type in self._metric_descriptors: return self._metric_descriptors[descriptor_type] descriptor = { "name": None, "type": descriptor_type, "display_name": instrument.name, "description": instrument.description, "labels": [], } for key, value in record.labels: if isinstance(value, str): descriptor["labels"].append( LabelDescriptor(key=key, value_type="STRING")) elif isinstance(value, bool): descriptor["labels"].append( LabelDescriptor(key=key, value_type="BOOL")) elif isinstance(value, int): descriptor["labels"].append( LabelDescriptor(key=key, value_type="INT64")) else: logger.warning( "Label value %s is not a string, bool or integer", value) if isinstance(record.aggregator, SumAggregator): descriptor["metric_kind"] = MetricDescriptor.MetricKind.GAUGE else: logger.warning( "Unsupported aggregation type %s, ignoring it", type(record.aggregator).__name__, ) return None if instrument.value_type == int: descriptor["value_type"] = MetricDescriptor.ValueType.INT64 elif instrument.value_type == float: descriptor["value_type"] = MetricDescriptor.ValueType.DOUBLE proto_descriptor = MetricDescriptor(**descriptor) try: descriptor = self.client.create_metric_descriptor( self.project_name, proto_descriptor) # pylint: disable=broad-except except Exception as ex: logger.error( "Failed to create metric descriptor %s", proto_descriptor, exc_info=ex, ) return None self._metric_descriptors[descriptor_type] = descriptor return descriptor