def Run(self): # We have to include all server metadata in the test context since server # code that uses the metrics runs within the context. non_test_metadata = list( itervalues(stats_collector_instance.Get().GetAllMetricsMetadata())) test_metadata = non_test_metadata + [ stats_utils.CreateCounterMetadata( _TEST_COUNTER, docstring="Sample counter metric."), stats_utils.CreateGaugeMetadata( _TEST_GAUGE_METRIC, str, docstring="Sample gauge metric."), stats_utils.CreateEventMetadata( _TEST_EVENT_METRIC, docstring="Sample event metric."), ] stats_collector = default_stats_collector.DefaultStatsCollector( test_metadata) with stats_test_utils.FakeStatsContext(stats_collector): with aff4.FACTORY.Create( None, aff4_stats_store.StatsStore, mode="w", token=self.token) as stats_store: stats_store.WriteStats(process_id="worker_1") # We use mixins to run the same tests against multiple APIs. # Result-filtering is only needed for HTTP API tests. if isinstance(self, api_regression_http.HttpApiRegressionTestMixinBase): api_post_process_fn = self._PostProcessApiResult else: api_post_process_fn = None self.Check( "ListStatsStoreMetricsMetadata", args=stats_plugin.ApiListStatsStoreMetricsMetadataArgs( component="WORKER"), api_post_process_fn=api_post_process_fn)
def Run(self): stats_collector = stats.StatsCollector() stats_collector.RegisterCounterMetric( "sample_counter", docstring="Sample counter metric.") stats_collector.RegisterGaugeMetric( "sample_gauge_value", str, docstring="Sample gauge metric.") stats_collector.RegisterEventMetric( "sample_event", docstring="Sample event metric.") with utils.Stubber(stats, "STATS", stats_collector): with aff4.FACTORY.Create( None, aff4_stats_store.StatsStore, mode="w", token=self.token) as stats_store: stats_store.WriteStats(process_id="worker_1") self.Check( "ListStatsStoreMetricsMetadata", args=stats_plugin.ApiListStatsStoreMetricsMetadataArgs( component="WORKER"))