def __init__(self, stateful: bool = False): super().__init__(span_processor=None, stateful=stateful) self.span_exporter = InMemorySpanExporter() self.metrics_exporter = InMemoryMetricsExporter() self.add_span_exporter(span_exporter=self.span_exporter) self.add_metrics_exporter(metrics_exporter=self.metrics_exporter, interval=10000) self.collected = False self.caplog = JsonLogCaptureFormatter()
def create_meter_provider(**kwargs): """Helper to create a configured meter provider Creates a `MeterProvider` and an `InMemoryMetricsExporter`. Returns: A list with the meter provider in the first element and the in-memory metrics exporter in the second """ meter_provider = MeterProvider(**kwargs) memory_exporter = InMemoryMetricsExporter() return meter_provider, memory_exporter
def test_histogram_stateless(self): # Use the meter type provided by the SDK package meter = metrics.MeterProvider(stateful=False).get_meter(__name__) exporter = InMemoryMetricsExporter() controller = PushController(meter, exporter, 30) requests_size = meter.create_metric( name="requests_size", description="size of requests", unit="1", value_type=int, metric_type=ValueRecorder, ) size_view = View( requests_size, HistogramAggregator, aggregator_config={"bounds": [20, 40, 60, 80, 100]}, label_keys=["environment"], view_config=ViewConfig.LABEL_KEYS, ) meter.register_view(size_view) # Since this is using the HistogramAggregator, the bucket counts will be reflected # with each record requests_size.record(25, {"environment": "staging", "test": "value"}) requests_size.record(1, {"environment": "staging", "test": "value2"}) requests_size.record(200, {"environment": "staging", "test": "value3"}) controller.tick() metrics_list = exporter.get_exported_metrics() self.assertEqual(len(metrics_list), 1) checkpoint = metrics_list[0].aggregator.checkpoint self.assertEqual( tuple(checkpoint.items()), ((20, 1), (40, 1), (60, 0), (80, 0), (100, 0), (inf, 1)), ) exporter.clear() requests_size.record(25, {"environment": "staging", "test": "value"}) requests_size.record(1, {"environment": "staging", "test": "value2"}) requests_size.record(200, {"environment": "staging", "test": "value3"}) controller.tick() metrics_list = exporter.get_exported_metrics() self.assertEqual(len(metrics_list), 1) checkpoint = metrics_list[0].aggregator.checkpoint self.assertEqual( tuple(checkpoint.items()), ((20, 1), (40, 1), (60, 0), (80, 0), (100, 0), (inf, 1)), )
def setUp(self): self.meter = metrics.MeterProvider(stateful=False).get_meter(__name__) self.exporter = InMemoryMetricsExporter() self.controller = PushController(self.meter, self.exporter, 30)
class TestStateless(unittest.TestCase): def setUp(self): self.meter = metrics.MeterProvider(stateful=False).get_meter(__name__) self.exporter = InMemoryMetricsExporter() self.controller = PushController(self.meter, self.exporter, 30) def tearDown(self): self.controller.shutdown() def test_label_keys(self): test_counter = self.meter.create_metric( name="test_counter", description="description", unit="By", value_type=int, metric_type=Counter, ) counter_view = View( test_counter, SumAggregator, label_keys=["environment"], view_config=ViewConfig.LABEL_KEYS, ) self.meter.register_view(counter_view) test_counter.add(6, {"environment": "production", "customer_id": 123}) test_counter.add(5, {"environment": "production", "customer_id": 247}) self.controller.tick() metric_data = self.exporter.get_exported_metrics() self.assertEqual(len(metric_data), 1) self.assertEqual(metric_data[0].labels, (("environment", "production"), )) self.assertEqual(metric_data[0].aggregator.checkpoint, 11) def test_ungrouped(self): test_counter = self.meter.create_metric( name="test_counter", description="description", unit="By", value_type=int, metric_type=Counter, ) counter_view = View( test_counter, SumAggregator, label_keys=["environment"], view_config=ViewConfig.UNGROUPED, ) self.meter.register_view(counter_view) test_counter.add(6, {"environment": "production", "customer_id": 123}) test_counter.add(5, {"environment": "production", "customer_id": 247}) self.controller.tick() metric_data = self.exporter.get_exported_metrics() data_set = set() for data in metric_data: data_set.add((data.labels, data.aggregator.checkpoint)) self.assertEqual(len(metric_data), 2) label1 = (("customer_id", 123), ("environment", "production")) label2 = (("customer_id", 247), ("environment", "production")) self.assertTrue((label1, 6) in data_set) self.assertTrue((label2, 5) in data_set) def test_multiple_views(self): test_counter = self.meter.create_metric( name="test_counter", description="description", unit="By", value_type=int, metric_type=Counter, ) counter_view = View( test_counter, SumAggregator, label_keys=["environment"], view_config=ViewConfig.UNGROUPED, ) mmsc_view = View( test_counter, MinMaxSumCountAggregator, label_keys=["environment"], view_config=ViewConfig.LABEL_KEYS, ) self.meter.register_view(counter_view) self.meter.register_view(mmsc_view) test_counter.add(6, {"environment": "production", "customer_id": 123}) test_counter.add(5, {"environment": "production", "customer_id": 247}) self.controller.tick() metric_data = self.exporter.get_exported_metrics() sum_set = set() mmsc_set = set() for data in metric_data: if isinstance(data.aggregator, SumAggregator): tup = (data.labels, data.aggregator.checkpoint) sum_set.add(tup) elif isinstance(data.aggregator, MinMaxSumCountAggregator): mmsc_set.add(data) self.assertEqual(data.labels, (("environment", "production"), )) self.assertEqual(data.aggregator.checkpoint.sum, 11) # we have to assert this way because order is unknown self.assertEqual(len(sum_set), 2) self.assertEqual(len(mmsc_set), 1) label1 = (("customer_id", 123), ("environment", "production")) label2 = (("customer_id", 247), ("environment", "production")) self.assertTrue((label1, 6) in sum_set) self.assertTrue((label2, 5) in sum_set)
class TelemetryFixture(Telemetry): def __init__(self, stateful: bool = False): super().__init__(span_processor=None, stateful=stateful) self.span_exporter = InMemorySpanExporter() self.metrics_exporter = InMemoryMetricsExporter() self.add_span_exporter(span_exporter=self.span_exporter) self.add_metrics_exporter(metrics_exporter=self.metrics_exporter, interval=10000) self.collected = False self.caplog = JsonLogCaptureFormatter() def enable_log_record_capture(self, caplog: LogCaptureFixture): """ This is exposed to be called from a test to install the json log format on the 'caplog' fixture. If there's a way to do this automatically, then we could get rid of this, but all attempts to do this as part of fixture initialization, etc didn't work because the fixture is replaced for each phase of the test run (setup, run test) """ caplog.handler.setFormatter(self.caplog) def _get_labels(self, metric: ExportRecord): return dict(filter(lambda label: not label[0].startswith('_'), metric.labels)) def _find_metric(self, metric_type: Type, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[ExportRecord]: labels = labels or {} def fail_no_match(msg: str, candidates: Optional[List[ExportRecord]] = None): if candidates is None: candidates = self.metrics_exporter.get_exported_metrics() msg = f"{msg}\n\nMetric:\n\t{name} {labels}\n\nRecorded {metric_type.__name__} metric(s):\n" if len(candidates) > 0: for m in candidates: msg = f"{msg}\t{m.instrument.name} {self._get_labels(m)}\n" else: msg = f"{msg}\t(none)" return msg if not self.collected: self.collect() candidates = [] for metric in self.metrics_exporter.get_exported_metrics(): m: ExportRecord = metric if type(m.instrument) != metric_type: continue candidates.append(m) if m.instrument.name == name: if self._get_labels(m) == labels: return m # exact match, return immediately pytest.fail(fail_no_match(f"No matching {metric_type.__name__} metric found!", candidates)) def collect(self): self.collected = True for controller in self.metrics.meter_provider._controllers: if isinstance(controller, PushController): controller.tick() def get_metrics(self, type_filter: Callable[[Type], bool] = lambda v: True, name_filter: Callable[[str], bool] = lambda v: True, label_filter: Callable[[Dict[str, str]], bool] = lambda v: True, instrumentor_filter: Callable[[str], bool] = lambda v: True) -> List[ Union[CounterInfo, ValueRecorderInfo]]: metrics = [] for metric in self.metrics_exporter.get_exported_metrics(): m: ExportRecord = metric if not type_filter(type(m.instrument)) or not name_filter(m.instrument.name) or \ not label_filter(self._get_labels(m)) or not instrumentor_filter(m.instrument.meter.instrumentation_info.name): continue if type(m.instrument) == Counter: metrics.append(CounterInfo(m.instrument.name, m.aggregator.checkpoint, self._get_labels(m))) elif type(m.instrument) == ValueRecorder: metrics.append(ValueRecorderInfo(m.instrument.name, m.aggregator.checkpoint.min, m.aggregator.checkpoint.max, m.aggregator.checkpoint.sum, m.aggregator.checkpoint.count, self._get_labels(m))) else: # TODO: other metric types? pass return metrics def get_counters(self, name_filter: Callable[[str], bool] = lambda v: True, label_filter: Callable[[Dict[str, str]], bool] = lambda v: True) -> List[CounterInfo]: return self.get_metrics(type_filter=lambda t: t == Counter, name_filter=name_filter, label_filter=label_filter) def get_finished_spans(self, name_filter: Callable[[str], bool] = lambda v: True, attribute_filter: Callable[[Optional[Mapping[str, AttributeValue]]], bool] = lambda v: True, label_filter: Callable[[Dict[str, str]], bool] = lambda v: True) -> List[Span]: spans = [] for span in self.span_exporter.get_finished_spans(): if not name_filter(f"{span.qname}") or not attribute_filter( span.attributes) or not label_filter(span.labels): continue spans.append(span) return spans def get_value_recorders(self, name_filter: Callable[[str], bool] = lambda v: True, label_filter: Callable[[Dict[str, str]], bool] = lambda v: True) -> List[ValueRecorderInfo]: return self.get_metrics(type_filter=lambda t: t == ValueRecorder, name_filter=name_filter, label_filter=label_filter) def get_counter(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[CounterInfo]: m = self._find_metric(Counter, name, labels) if m: return CounterInfo(m.instrument.name, m.aggregator.checkpoint, self._get_labels(m)) else: return None def get_up_down_counter(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[CounterInfo]: m = self._find_metric(UpDownCounter, name, labels) if m: return CounterInfo(m.instrument.name, m.aggregator.checkpoint, self._get_labels(m)) else: return None def get_value_recorder(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[ValueRecorderInfo]: m = self._find_metric(ValueRecorder, name, labels) if m: return ValueRecorderInfo(m.instrument.name, m.aggregator.checkpoint.min, m.aggregator.checkpoint.max, m.aggregator.checkpoint.sum, m.aggregator.checkpoint.count, self._get_labels(m)) else: return None def get_gauge(self, name: str, labels: Optional[Dict[str, str]] = None) -> Optional[GaugeInfo]: m = self._find_metric(ValueObserver, name, labels) if m: return GaugeInfo(f"{m.instrument.name}.{name}", m.aggregator.checkpoint.min, m.aggregator.checkpoint.max, m.aggregator.checkpoint.sum, m.aggregator.checkpoint.last, m.aggregator.checkpoint.count, self._get_labels(m)) else: return None