def export( self, metric_records: Sequence[MetricRecord]) -> "MetricsExportResult": all_series = [] for record in metric_records: instrument = record.instrument metric_descriptor = self._get_metric_descriptor(record) if not metric_descriptor: continue series = TimeSeries(resource=self._get_monitored_resource( record.instrument.meter.resource)) series.metric.type = metric_descriptor.type for key, value in record.labels: series.metric.labels[key] = str(value) if self.unique_identifier: series.metric.labels[ UNIQUE_IDENTIFIER_KEY] = self.unique_identifier point = series.points.add() if isinstance(record.aggregator, SumAggregator): data_point = record.aggregator.checkpoint elif isinstance(record.aggregator, ValueObserverAggregator): data_point = record.aggregator.checkpoint.last if instrument.value_type == int: point.value.int64_value = data_point elif instrument.value_type == float: point.value.double_value = data_point seconds = (record.aggregator.last_update_timestamp // NANOS_PER_SECOND) # Cloud Monitoring API allows, for any combination of labels and # metric name, one update per WRITE_INTERVAL seconds updated_key = (metric_descriptor.type, record.labels) last_updated_time = self._last_updated.get(updated_key, 0) last_updated_time_seconds = last_updated_time // NANOS_PER_SECOND if seconds <= last_updated_time_seconds + WRITE_INTERVAL: continue self._set_start_end_times(point, record, metric_descriptor) all_series.append(series) try: self._batch_write(all_series) # pylint: disable=broad-except except Exception as ex: logger.error("Error while writing to Cloud Monitoring", exc_info=ex) return MetricsExportResult.FAILURE return MetricsExportResult.SUCCESS
def export( self, metric_records: Sequence[MetricRecord] ) -> "MetricsExportResult": all_series = [] for record in metric_records: instrument = record.instrument metric_descriptor = self._get_metric_descriptor(record) if not metric_descriptor: continue series = TimeSeries() self._add_resource_info(series) series.metric.type = metric_descriptor.type for key, value in record.labels: series.metric.labels[key] = str(value) if self.unique_identifier: series.metric.labels[ UNIQUE_IDENTIFIER_KEY ] = self.unique_identifier point = series.points.add() if instrument.value_type == int: point.value.int64_value = record.aggregator.checkpoint elif instrument.value_type == float: point.value.double_value = record.aggregator.checkpoint seconds, nanos = divmod( record.aggregator.last_update_timestamp, 1e9 ) # Cloud Monitoring API allows, for any combination of labels and # metric name, one update per WRITE_INTERVAL seconds updated_key = (metric_descriptor.type, record.labels) last_updated_seconds = self._last_updated.get(updated_key, 0) if seconds <= last_updated_seconds + WRITE_INTERVAL: continue self._last_updated[updated_key] = seconds point.interval.end_time.seconds = int(seconds) point.interval.end_time.nanos = int(nanos) all_series.append(series) try: self._batch_write(all_series) # pylint: disable=broad-except except Exception as ex: logger.error( "Error while writing to Cloud Monitoring", exc_info=ex ) return MetricsExportResult.FAILURE return MetricsExportResult.SUCCESS
def test_export_value_observer(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [], "metric_kind": "GAUGE", "value_type": "INT64", } ) aggregator = ValueObserverAggregator() aggregator.checkpoint = aggregator._TYPE(1, 2, 3, 4, 5) aggregator.last_update_timestamp = ( WRITE_INTERVAL + 1 ) * NANOS_PER_SECOND exporter.export( [MetricRecord(MockMetric(meter=MockMeter()), (), aggregator,)] ) series = TimeSeries() series.metric.type = "custom.googleapis.com/OpenTelemetry/name" point = series.points.add() point.value.int64_value = 5 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = WRITE_INTERVAL + 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series])] )
def export( self, metric_records: Sequence[MetricRecord] ) -> "MetricsExportResult": all_series = [] for record in metric_records: instrument = record.instrument metric_descriptor = self._get_metric_descriptor(record) if not metric_descriptor: continue series = TimeSeries( resource=self._get_monitored_resource( record.instrument.meter.resource ) ) series.metric.type = metric_descriptor.type for key, value in record.labels: series.metric.labels[key] = str(value) if self.unique_identifier: series.metric.labels[ UNIQUE_IDENTIFIER_KEY ] = self.unique_identifier point_dict = {"interval": {}} if isinstance(record.aggregator, HistogramAggregator): bucket_bounds = list(record.aggregator.checkpoint.keys()) bucket_values = list(record.aggregator.checkpoint.values()) point_dict["value"] = { "distribution_value": Distribution( count=sum(bucket_values), bucket_counts=bucket_values, bucket_options={ "explicit_buckets": { # don't put in > bucket "bounds": bucket_bounds[:-1] } }, ) } else: if isinstance(record.aggregator, SumAggregator): data_point = record.aggregator.checkpoint elif isinstance(record.aggregator, ValueObserverAggregator): data_point = record.aggregator.checkpoint.last if instrument.value_type == int: point_dict["value"] = {"int64_value": data_point} elif instrument.value_type == float: point_dict["value"] = {"double_value": data_point} seconds = ( record.aggregator.last_update_timestamp // NANOS_PER_SECOND ) # Cloud Monitoring API allows, for any combination of labels and # metric name, one update per WRITE_INTERVAL seconds updated_key = (metric_descriptor.type, record.labels) last_updated_time = self._last_updated.get(updated_key, 0) last_updated_time_seconds = last_updated_time // NANOS_PER_SECOND if seconds <= last_updated_time_seconds + WRITE_INTERVAL: continue self._set_start_end_times(point_dict, record, metric_descriptor) series.points.add(**point_dict) all_series.append(series) try: self._batch_write(all_series) # pylint: disable=broad-except except Exception as ex: logger.error( "Error while writing to Cloud Monitoring", exc_info=ex ) return MetricsExportResult.FAILURE return MetricsExportResult.SUCCESS
def test_export(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"),), UnsupportedAggregator(), ) ] ) client.create_time_series.assert_not_called() client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING"), LabelDescriptor(key="label2", value_type="INT64"), ], "metric_kind": "CUMULATIVE", "value_type": "DOUBLE", } ) resource = Resource( labels={ "cloud.account.id": 123, "host.id": "host", "cloud.zone": "US", "cloud.provider": "gcp", "extra_info": "extra", "gcp.resource_type": "gce_instance", "not_gcp_resource": "value", } ) sum_agg_one = SumAggregator() sum_agg_one.checkpoint = 1 sum_agg_one.last_update_timestamp = ( WRITE_INTERVAL + 1 ) * NANOS_PER_SECOND exporter.export( [ MetricRecord( MockMetric(meter=MockMeter(resource=resource)), (("label1", "value1"), ("label2", 1),), sum_agg_one, ), MetricRecord( MockMetric(meter=MockMeter(resource=resource)), (("label1", "value2"), ("label2", 2),), sum_agg_one, ), ] ) expected_resource = MonitoredResource( type="gce_instance", labels={"project_id": "123", "instance_id": "host", "zone": "US"}, ) series1 = TimeSeries(resource=expected_resource) series1.metric.type = "custom.googleapis.com/OpenTelemetry/name" series1.metric.labels["label1"] = "value1" series1.metric.labels["label2"] = "1" point = series1.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 series2 = TimeSeries(resource=expected_resource) series2.metric.type = "custom.googleapis.com/OpenTelemetry/name" series2.metric.labels["label1"] = "value2" series2.metric.labels["label2"] = "2" point = series2.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series1, series2])] ) # Attempting to export too soon after another export with the exact # same labels leads to it being dropped sum_agg_two = SumAggregator() sum_agg_two.checkpoint = 1 sum_agg_two.last_update_timestamp = ( WRITE_INTERVAL + 2 ) * NANOS_PER_SECOND exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), sum_agg_two, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), sum_agg_two, ), ] ) self.assertEqual(client.create_time_series.call_count, 1) # But exporting with different labels is fine sum_agg_two.checkpoint = 2 exporter.export( [ MetricRecord( MockMetric(), (("label1", "changed_label"), ("label2", 2),), sum_agg_two, ), ] ) series3 = TimeSeries() series3.metric.type = "custom.googleapis.com/OpenTelemetry/name" series3.metric.labels["label1"] = "changed_label" series3.metric.labels["label2"] = "2" point = series3.points.add() point.value.int64_value = 2 point.interval.end_time.seconds = WRITE_INTERVAL + 2 point.interval.end_time.nanos = 0 point.interval.start_time.seconds = 1 point.interval.start_time.nanos = 0 client.create_time_series.assert_has_calls( [ mock.call(self.project_name, [series1, series2]), mock.call(self.project_name, [series3]), ] )
def test_export(self): client = mock.Mock() exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client ) exporter.project_name = self.project_name exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"),), UnsupportedAggregator(), ) ] ) client.create_time_series.assert_not_called() client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [ LabelDescriptor(key="label1", value_type="STRING"), LabelDescriptor(key="label2", value_type="INT64"), ], "metric_kind": "GAUGE", "value_type": "DOUBLE", } ) counter_one = CounterAggregator() counter_one.checkpoint = 1 counter_one.last_update_timestamp = (WRITE_INTERVAL + 1) * 1e9 exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), counter_one, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), counter_one, ), ] ) series1 = TimeSeries() series1.metric.type = "custom.googleapis.com/OpenTelemetry/name" series1.metric.labels["label1"] = "value1" series1.metric.labels["label2"] = "1" point = series1.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 series2 = TimeSeries() series2.metric.type = "custom.googleapis.com/OpenTelemetry/name" series2.metric.labels["label1"] = "value2" series2.metric.labels["label2"] = "2" point = series2.points.add() point.value.int64_value = 1 point.interval.end_time.seconds = WRITE_INTERVAL + 1 point.interval.end_time.nanos = 0 client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series1, series2])] ) # Attempting to export too soon after another export with the exact # same labels leads to it being dropped counter_two = CounterAggregator() counter_two.checkpoint = 1 counter_two.last_update_timestamp = (WRITE_INTERVAL + 2) * 1e9 exporter.export( [ MetricRecord( MockMetric(), (("label1", "value1"), ("label2", 1),), counter_two, ), MetricRecord( MockMetric(), (("label1", "value2"), ("label2", 2),), counter_two, ), ] ) self.assertEqual(client.create_time_series.call_count, 1) # But exporting with different labels is fine counter_two.checkpoint = 2 exporter.export( [ MetricRecord( MockMetric(), (("label1", "changed_label"), ("label2", 2),), counter_two, ), ] ) series3 = TimeSeries() series3.metric.type = "custom.googleapis.com/OpenTelemetry/name" series3.metric.labels["label1"] = "changed_label" series3.metric.labels["label2"] = "2" point = series3.points.add() point.value.int64_value = 2 point.interval.end_time.seconds = WRITE_INTERVAL + 2 point.interval.end_time.nanos = 0 client.create_time_series.assert_has_calls( [ mock.call(self.project_name, [series1, series2]), mock.call(self.project_name, [series3]), ] )
def test_export_histogram(self): client = mock.Mock() with mock.patch( "opentelemetry.exporter.cloud_monitoring.time_ns", lambda: NANOS_PER_SECOND, ): exporter = CloudMonitoringMetricsExporter( project_id=self.project_id, client=client) exporter.project_name = self.project_name client.create_metric_descriptor.return_value = MetricDescriptor( **{ "name": None, "type": "custom.googleapis.com/OpenTelemetry/name", "display_name": "name", "description": "description", "labels": [], "metric_kind": "CUMULATIVE", "value_type": "DISTRIBUTION", }) aggregator = HistogramAggregator(config={"bounds": [2, 4, 6]}) aggregator.checkpoint = OrderedDict([(2, 1), (4, 2), (6, 4), (">", 3)]) aggregator.last_update_timestamp = (WRITE_INTERVAL + 1) * NANOS_PER_SECOND exporter.export([ ExportRecord( MockMetric(meter=mock_meter()), (), aggregator, Resource.create_empty(), ) ]) series = TimeSeries() series.metric_kind = MetricDescriptor.MetricKind.CUMULATIVE series.metric.type = "custom.googleapis.com/OpenTelemetry/name" point = { "interval": { "start_time": { "seconds": 1 }, "end_time": { "seconds": 11 }, }, "value": { "distribution_value": { "count": 10, "bucket_options": { "explicit_buckets": { "bounds": [2.0, 4.0, 6.0] } }, "bucket_counts": [1, 2, 4, 3], } }, } series.points.add(**point) client.create_time_series.assert_has_calls( [mock.call(self.project_name, [series])])