def test_repair_shard(self): # Add a normal metric. metric_name = "a.b.test" metric = bg_test_utils.make_metric(metric_name) self.metadata_cache.create_metric(metric) self.metadata_cache.repair(shard=0, nshards=2) self.metadata_cache.repair(shard=1, nshards=2) self.metadata_cache.get_metric(metric_name) # Should be only one entry, and it was a hit. self.assertEqual(self.metadata_cache.hit_count, 1) self.assertEqual(self.metadata_cache.miss_count, 0) # Add a spurious metric. metric_name = "a.b.fake" metric = bg_test_utils.make_metric(metric_name) self.metadata_cache._cache_set(metric_name, metric) # Will not fix. self.metadata_cache.repair(start_key="b") self.metadata_cache.get_metric(metric_name) self.assertEqual(self.metadata_cache.hit_count, 2) # Will fix. self.metadata_cache.repair(start_key="a", shard=0, nshards=2) self.metadata_cache.repair(start_key="a", shard=1, nshards=2) self.metadata_cache.get_metric(metric_name) # repair() will remove it, and the get will produce a miss. self.assertEqual(self.metadata_cache.hit_count, 2) self.assertEqual(self.metadata_cache.miss_count, 1)
def test_clean_not_expired(self): if isinstance(self.accessor, bg_elasticsearch._ElasticSearchAccessor): # TODO (t.chataigner) Remove once clean is implemented. self.skipTest( "clean is not implemented for _ElasticSearchAccessor.") metric1 = bg_test_utils.make_metric("a.b.c.d.e.f") self.accessor.create_metric(metric1) metric2 = bg_test_utils.make_metric("g.h.i.j.k.l") self.accessor.create_metric(metric2) self.flush() # Check that the metrics exist before the cleanup self.assertEqual(self.accessor.has_metric(metric1.name), True) self.assertEqual(self.accessor.has_metric(metric2.name), True) # set cutoff time in the past to delete nothing cutoff = 3600 self.accessor.clean(cutoff) # Check that the metrics still exist after the cleanup self.assertEqual(self.accessor.has_metric(metric1.name), True) self.assertEqual(self.accessor.has_metric(metric2.name), True) self.addCleanup(self.accessor.drop_all_metrics)
def test_get_metric_doubledots(self): metric = bg_test_utils.make_metric("a.b..c") metric_1 = bg_test_utils.make_metric("a.b.c") self.accessor.create_metric(metric) self.accessor.create_metric(metric_1) self.flush() self.assertEqual(["a.b.c"], list(self.accessor.glob_metric_names("a.b.*"))) self.assertEqual(True, self.accessor.has_metric("a.b..c")) self.assertNotEqual(None, self.accessor.get_metric("a.b..c"))
def test_cache_clean(self): """Check that the cache is cleared out of metrics older than the TTL.""" with freeze_time("2014-01-01 00:00:00"): old_metric = bg_test_utils.make_metric("i.am.old") self.metadata_cache.create_metric(old_metric) with freeze_time("2015-01-01 00:00:00"): new_metric = bg_test_utils.make_metric("i.am.new") self.metadata_cache.create_metric(new_metric) with freeze_time("2015-01-01 20:00:00"): self.metadata_cache.clean() self.assertEqual(self.metadata_cache._cache_has(new_metric.name), True) self.assertEqual(self.metadata_cache._cache_has(old_metric.name), False)
def test_carbon_protocol_read(self): metric_name = "fake.name" # Custom aggregator to make sure all goes right. metric = bg_test_utils.make_metric(_METRIC_NAME, aggregator=bg_metric.Aggregator.minimum) self.accessor.create_metric(metric) self.accessor.flush() self.reader = bg_graphite.Reader( self.accessor, self.metadata_cache, self.carbonlink, metric_name ) with mock.patch( "graphite.carbonlink.CarbonLinkPool.query" ) as carbonlink_query_mock: carbonlink_query_mock.return_value = [ (864005.0, 100.0), (864065.0, 101.0), (864125.0, 102.0), ] (start, end, step), points = self.fetch( start_time=self._POINTS_START + 3, end_time=self._POINTS_END - 3, now=self._POINTS_END + 10, ) # Check that we really have a 1sec resolution self.assertEqual(start, self._POINTS_START + 3) self.assertEqual(end, self._POINTS_END - 3) self.assertEqual(step, 1) # Check that this returns at least one value different from None. self.assertEqual(len(points), end - start) # Check that at least one point is at the correct place. self.assertEqual(points[864005 - start], 100.0)
def test_delete_metric_should_remove_metric_from_index(self): prefix = "test_delete_metric_should_remove_metric_from_index" to_delete_metric_name = "%s.to_delete" % prefix not_to_delete_metric_name = "%s.not_to_delete" % prefix self.accessor.create_metric(bg_test_utils.make_metric(to_delete_metric_name)) self.accessor.create_metric( bg_test_utils.make_metric(not_to_delete_metric_name) ) self.accessor.flush() self.accessor.delete_metric(to_delete_metric_name) self.accessor.flush() self.assertIsNone(self.accessor.get_metric(to_delete_metric_name)) self.assertIsNotNone(self.accessor.get_metric(not_to_delete_metric_name))
def test_delete_directory_should_delete_all_metrics_in_the_given_directory( self): prefix = ( "test_delete_directory_should_delete_all_metrics_in_the_given_directory" ) to_delete_prefix = "%s.should_not_be_found" % prefix not_to_delete_prefix = "%s.should_be_found" % prefix short_names = ["a", "b", "c"] unexpected_metrics = [ "%s.%s" % (to_delete_prefix, short_name) for short_name in short_names ] expected_metrics = [ "%s.%s" % (not_to_delete_prefix, short_name) for short_name in short_names ] for metric_name in expected_metrics + unexpected_metrics: self.accessor.create_metric(bg_test_utils.make_metric(metric_name)) self.accessor.flush() self.accessor.delete_directory(to_delete_prefix) self.accessor.flush() for expected_metric in expected_metrics: self.assertIsNotNone(self.accessor.get_metric(expected_metric)) for unexpected_metric in unexpected_metrics: self.assertIsNone(self.accessor.get_metric(unexpected_metric))
def test_carbon_protocol_read(self): metric_name = "fake.name" metric = bg_test_utils.make_metric(_METRIC_NAME) # Custom aggregator to make sure all goes right. metric.metadata.aggregator = bg_metric.Aggregator.minimum self.accessor.create_metric(metric) self.accessor.flush() self.reader = bg_graphite.Reader(self.accessor, self.metadata_cache, self.carbonlink, metric_name) with mock.patch("graphite.carbonlink.CarbonLinkPool.query" ) as carbonlink_query_mock: carbonlink_query_mock.return_value = [ (864005.0, 100.0), (864065.0, 101.0), (864125.0, 102.0), ] (start, end, step), points = self.fetch( start_time=self._POINTS_START + 3, end_time=self._POINTS_END - 3, now=self._POINTS_END + 10, ) # Check that we really have a 1sec resolution self.assertEqual(start, self._POINTS_START + 3) self.assertEqual(end, self._POINTS_END - 3) self.assertEqual(step, 1) # Check that this returns at least one value different from None. self.assertEqual(len(points), end - start) # Check that at least one point is at the correct place. self.assertEqual(points[864005 - start], 100.0)
def test_graphite_glob(self): for name in self._metric_names: metric = bg_test_utils.make_metric(name) self.accessor.create_metric(metric) scenarii = [ # Single character wildcard ("a?", ["aa"], []), # Component wildcard ("*", self._metrics_by_length[1], ["a", "x"]), ("*.*.c", ["a.b.c", "x.y.c"], []), ("a.*.*", ["a.a.a", "a.b.c", "a.b.d"], []), ("*.*.*", self._metrics_by_length[3], []), # Multi-character wildcard ("a*", ["a", "aa", "aaa"], ["a"]), # Choices ("*.{b,c,d,5}.?", ["a.b.c", "a.b.d"], []), # Globstar wildcard ("a.**", ["a.a.a", "a.b.c", "a.b.d"], ["a.a", "a.b"]), ] for (glob, metrics, directories) in scenarii: (found_metrics, found_directories) = bg_glob.graphite_glob(self.accessor, glob) found_metric_names = [found_metric.name for found_metric in found_metrics] found_directory_names = [found_directory.name for found_directory in found_directories] self.assertEqual((metrics, directories), (found_metric_names, found_directory_names))
def test_has_metric(self): metric = bg_test_utils.make_metric("a.b.c.d.e.f") self.assertEqual(self.accessor.has_metric(metric.name), False) self.accessor.create_metric(metric) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), True)
def test_glob_directories(self): IS_ELASTICSEARCH = self.ACCESSOR_SETTINGS.get("driver", "") == "elasticsearch" for name in "a", "a.b", "x.y.z": metric = bg_test_utils.make_metric(name) self.accessor.create_metric(metric) self.flush() def assert_find(glob, expected_matches): # Check we can find the matches of a glob self.assertEqual(expected_matches, list(self.accessor.glob_directory_names(glob))) assert_find("x.y", ["x.y"]) # Test exact match assert_find("A", []) # Test case mismatch # Test various depths assert_find("*", ["a", "x"]) if IS_ELASTICSEARCH: assert_find("*.*", ["*.y"]) else: assert_find("*.*", ["x.y"]) assert_find("*.*.*", []) self.accessor.drop_all_metrics() assert_find("*", [])
def test_delete_directory_should_delete_all_metrics_in_the_given_directory(self): prefix = ( "test_delete_directory_should_delete_all_metrics_in_the_given_directory" ) to_delete_prefix = "%s.should_not_be_found" % prefix not_to_delete_prefix = "%s.should_be_found" % prefix short_names = ["a", "b", "c"] unexpected_metrics = [ "%s.%s" % (to_delete_prefix, short_name) for short_name in short_names ] expected_metrics = [ "%s.%s" % (not_to_delete_prefix, short_name) for short_name in short_names ] for metric_name in expected_metrics + unexpected_metrics: self.accessor.create_metric(bg_test_utils.make_metric(metric_name)) self.accessor.flush() self.accessor.delete_directory(to_delete_prefix) self.accessor.flush() for expected_metric in expected_metrics: self.assertIsNotNone(self.accessor.get_metric(expected_metric)) for unexpected_metric in unexpected_metrics: self.assertIsNone(self.accessor.get_metric(unexpected_metric))
def setUp(self): super(TestFinder, self).setUp() for metric_name in "a", "a.a", "a.b.c", "x.y": metric = bg_test_utils.make_metric(metric_name) self.accessor.create_metric(metric) self.finder = bg_graphite.Finder(accessor=self.accessor, metadata_cache=self.metadata_cache)
def test_create_async(self): metric_name = "a.b.c" metric = bg_test_utils.make_metric(metric_name) self._plugin._createAsyncOrig(metric, metric_name) self.assertFalse(self._plugin.exists(metric_name)) self._plugin._createOneMetric() self.assertTrue(self._plugin.exists(metric_name)) # See if we can update. metric = bg_test_utils.make_metric(metric_name) metric.metadata.retention = bg_metric.Retention([bg_metric.Stage(1, 1)]) self._plugin._createAsyncOrig(metric, metric_name) self._plugin._createOneMetric() retention = self._plugin.getMetadata(metric_name, "retention") self.assertEqual(retention, metric.metadata.retention)
def test_update_metric_should_update_metric_metadata(self): metric_name = "test_update_metric_should_update_metric_metadata.a.b.c" initial_metadata = MetricMetadata( aggregator=Aggregator.total, retention=Retention.from_string("42*1s"), carbon_xfilesfactor=0.5, ) create_date = datetime.datetime(2014, 1, 1) update_date = datetime.datetime(2018, 1, 1) new_metadata = MetricMetadata( aggregator=Aggregator.average, retention=Retention.from_string("43*100s"), carbon_xfilesfactor=0.25, ) with freezegun.freeze_time(create_date): metric = bg_test_utils.make_metric(metric_name, initial_metadata) self.accessor.create_metric(metric) self.accessor.flush() with freezegun.freeze_time(update_date): self.accessor.update_metric(metric_name, new_metadata) self.accessor.flush() metric = self.accessor.get_metric(metric_name) self.assertEqual(update_date, metric.updated_on) self.assertEqual(new_metadata, metric.metadata)
def test_map(self): metric1 = bg_test_utils.make_metric("a.b.c.d.e.f") self.accessor.create_metric(metric1) metric2 = bg_test_utils.make_metric("g.h.i.j.k.l") self.accessor.create_metric(metric2) self.flush() def _callback(metric, done, total): self.assertIsNotNone(metric) self.assertTrue(done <= total) def _errback(name): self.assertIsNotNone(name) self.accessor.map(_callback, errback=_errback)
def test_fetch_empty(self): no_such_metric = bg_test_utils.make_metric("no.such.metric") self.accessor.insert_points(_METRIC, _POINTS) self.flush() self.accessor.drop_all_metrics() self.assertEqual(len(self.fetch(no_such_metric, _POINTS_START, _POINTS_END)), 0) self.assertFalse(len(self.fetch(_METRIC, _POINTS_START, _POINTS_END)), 0)
def test_graphite_glob(self): for name in self._metric_names: metric = bg_test_utils.make_metric(name) self.accessor.create_metric(metric) scenarii = [ # Single character wildcard ("a?", ["aa"], []), # Component wildcard ("*", self._metrics_by_length[1], ["a", "x"]), ("*.*.c", ["a.b.c", "x.y.c"], []), ("a.*.*", ["a.a.a", "a.b.c", "a.b.d"], []), ("*.*.*", self._metrics_by_length[3], []), # Multi-character wildcard ("a*", ["a", "aa", "aaa"], ["a"]), # Choices ("*.{b,c,d,5}.?", ["a.b.c", "a.b.d"], []), # Globstar wildcard ("a.**", ["a.a.a", "a.b.c", "a.b.d"], ["a.a", "a.b"]), ] for (glob, metrics, directories) in scenarii: (found_metrics, found_directories) = bg_glob.graphite_glob(self.accessor, glob) found_metric_names = [ found_metric.name for found_metric in found_metrics ] found_directory_names = [ found_directory.name for found_directory in found_directories ] self.assertEqual((metrics, directories), (found_metric_names, found_directory_names))
def test_glob_directories(self): IS_ELASTICSEARCH = self.ACCESSOR_SETTINGS.get("driver", "") == "elasticsearch" for name in "a", "a.b", "x.y.z": metric = bg_test_utils.make_metric(name) self.accessor.create_metric(metric) self.flush() def assert_find(glob, expected_matches): # Check we can find the matches of a glob self.assertEqual( expected_matches, list(self.accessor.glob_directory_names(glob)) ) assert_find("x.y", ["x.y"]) # Test exact match assert_find("A", []) # Test case mismatch # Test various depths assert_find("*", ["a", "x"]) if IS_ELASTICSEARCH: assert_find("*.*", ["*.y"]) else: assert_find("*.*", ["x.y"]) assert_find("*.*.*", []) self.accessor.drop_all_metrics() assert_find("*", [])
def test_run(self): self.accessor.drop_all_metrics() cmd = command_list.CommandList() parser = argparse.ArgumentParser() bg_settings.add_argparse_arguments(parser) cmd.add_arguments(parser) name = "foo.bar" metadata = bg_metric.MetricMetadata( retention=bg_metric.Retention.from_string("1440*60s")) self.accessor.create_metric(bg_test_utils.make_metric(name, metadata)) opts = parser.parse_args(["foo.*"]) cmd.run(self.accessor, opts) metrics = command_list.list_metrics(self.accessor, opts.glob, opts.graphite) self.assertEqual(name, list(metrics)[0].name) opts = parser.parse_args(["--graphite", "foo.{bar}"]) metrics = command_list.list_metrics(self.accessor, opts.glob, opts.graphite) self.assertEqual(name, list(metrics)[0].name)
def test_run(self, mock_stdout): self.accessor.drop_all_metrics() for metric in self.metrics: self.accessor.create_metric(bg_test_utils.make_metric(metric)) bgutil.main(["--driver=memory", "read", "**"], self.accessor) output = mock_stdout.getvalue() for metric in self.metrics: self.assertIn(metric, output)
def setUp(self): super(TestFinder, self).setUp() for metric_name in "a", "a.a", "a.b.c", "x.y": metric = bg_test_utils.make_metric(metric_name) self.accessor.create_metric(metric) self.finder = bg_graphite.Finder( accessor=self.accessor, metadata_cache=self.metadata_cache )
def test_create_async(self): metric_name = "a.b.c" metric = bg_test_utils.make_metric(metric_name) self._plugin._createAsyncOrig(metric, metric_name) self.assertFalse(self._plugin.exists(metric_name)) self._plugin._createOneMetric() self.assertTrue(self._plugin.exists(metric_name)) # See if we can update. metric = bg_test_utils.make_metric( metric_name, retention=bg_metric.Retention([bg_metric.Stage(1, 1)]) ) self._plugin._createAsyncOrig(metric, metric_name) self._plugin._createOneMetric() retention = self._plugin.getMetadata(metric_name, "retention") self.assertEqual(retention, metric.metadata.retention)
def test_fetch_empty(self): no_such_metric = bg_test_utils.make_metric("no.such.metric") self.accessor.insert_points(_METRIC, _POINTS) self.flush() self.accessor.drop_all_metrics() self.assertEqual( len(self.fetch(no_such_metric, _POINTS_START, _POINTS_END)), 0) self.assertFalse(len(self.fetch(_METRIC, _POINTS_START, _POINTS_END)), 0)
def test_glob_metrics_should_not_return_metrics_not_matching_glob(self): metric_name = "test_glob_metrics_should_not_return_metrics_not_matching_glob.a.b.c" metric = bg_test_utils.make_metric(metric_name, _create_default_metadata()) self.accessor.create_metric(metric) self.accessor.flush() metric_names = self.accessor.glob_metrics("*.foo.bar") self.assertEqual(len(metric_names), 0, "No metric should have been found")
def test_fetch_doubledots(self): metric = bg_test_utils.make_metric("a.b..c") metric_1 = bg_test_utils.make_metric("a.b.c") points = [(1, 42)] self.accessor.create_metric(metric) self.accessor.create_metric(metric_1) self.flush() self.accessor.insert_points(metric, points) self.flush() actual_points = self.accessor.fetch_points( metric, 1, 2, stage=metric.retention[0] ) self.assertEqual(points, list(actual_points)) actual_points = self.accessor.fetch_points( metric_1, 1, 2, stage=metric.retention[0] ) self.assertEqual(points, list(actual_points))
def test_delete_metric_should_remove_metric_from_name(self): metric_name = "test_delete_metric_should_remove_metric_from_name.a.b.c" metric = bg_test_utils.make_metric(metric_name) self.accessor.create_metric(metric) self.accessor.flush() self.accessor.delete_metric(metric_name) self.accessor.flush() self.assertIsNone(self.accessor.get_metric(metric_name))
def test_write_doubledots(self): metric = bg_test_utils.make_metric("a.b..c") metric_1 = bg_test_utils.make_metric("a.b.c") points = [(1, 42)] self.accessor.create_metric(metric) self._plugin.write(metric.name, points) self.accessor.flush() self.assertEqual(True, self.accessor.has_metric("a.b..c")) self.assertNotEqual(None, self.accessor.get_metric("a.b..c")) actual_points = self.accessor.fetch_points( metric, 1, 2, stage=metric.retention[0] ) self.assertEqual(points, list(actual_points)) actual_points = self.accessor.fetch_points( metric_1, 1, 2, stage=metric.retention[0] ) self.assertEqual(points, list(actual_points))
def test_glob_metrics_should_return_metrics_matching_glob(self): metric_name = "test_glob_metrics_should_return_metrics_matching_glob.a.b.c" metric = bg_test_utils.make_metric(metric_name, _create_default_metadata()) self.accessor.create_metric(metric) self.accessor.flush() found_metrics = self.accessor.glob_metrics("*.a.b.c") self.assertGreaterEqual(len(found_metrics), 1, "Expected metric not found") found_metric = found_metrics[0] self.assertEqual(found_metric.name, metric_name)
def test_glob_too_many_directories(self): for name in "a", "a.b", "x.y.z": metric = bg_test_utils.make_metric(name) self.accessor.create_metric(metric) self.flush() old_value = self.accessor.max_metrics_per_pattern self.accessor.max_metrics_per_pattern = 1 with self.assertRaises(bg_cassandra.TooManyMetrics): list(self.accessor.glob_directory_names("**")) self.accessor.max_metrics_per_pattern = old_value
def test_stats(self): ret = self.metadata_cache.stats() self.assertNotEqual(len(ret), 0) metric_name = "a.b.test" metric = bg_test_utils.make_metric(metric_name) self.metadata_cache.create_metric(metric) ret = self.metadata_cache.stats() self.assertNotEqual(len(ret), 0)
def test_fetch_doubledots(self): metric = bg_test_utils.make_metric("a.b..c") metric_1 = bg_test_utils.make_metric("a.b.c") points = [(1, 42)] self.accessor.create_metric(metric) self.accessor.create_metric(metric_1) self.flush() self.accessor.insert_points(metric, points) self.flush() actual_points = self.accessor.fetch_points(metric, 1, 2, stage=metric.retention[0]) self.assertEqual(points, list(actual_points)) actual_points = self.accessor.fetch_points(metric_1, 1, 2, stage=metric.retention[0]) self.assertEqual(points, list(actual_points))
def test_run(self): name = "foo.bar" metadata = bg_metric.MetricMetadata.create( retention=bg_metric.Retention.from_string("1440*60s")) self.accessor.create_metric(bg_test_utils.make_metric(name, metadata)) cmd = command_read.CommandRead() parser = argparse.ArgumentParser() bg_settings.add_argparse_arguments(parser) cmd.add_arguments(parser) opts = parser.parse_args([name]) cmd.run(self.accessor, opts)
def test_created_on_and_updated_on_are_set_upon_creation_date(self): metric_name = "test_created_on_is_set_upon_current_date.a.b.c" expected_created_on = datetime.datetime(2017, 1, 2) with freezegun.freeze_time(expected_created_on): metric = bg_test_utils.make_metric(metric_name, _create_default_metadata()) self.accessor.create_metric(metric) self.accessor.flush() metric = self.accessor.get_metric(metric_name) self.accessor.flush() self.assertEqual(metric.created_on, expected_created_on) self.assertEqual(metric.updated_on, expected_created_on)
def test_created_on_and_updated_on_are_set_upon_creation_date(self): metric_name = "test_created_on_is_set_upon_current_date.a.b.c" expected_created_on = datetime.datetime(2017, 1, 2) with freezegun.freeze_time(expected_created_on): metric = bg_test_utils.make_metric(metric_name, _create_default_metadata()) self.accessor.create_metric(metric) self.accessor.flush() metric = self.accessor.get_metric(metric_name) self.assertEqual(metric.created_on, expected_created_on) self.assertEqual(metric.updated_on, expected_created_on)
def test_run(self): name = "foo.bar" metadata = bg_metric.MetricMetadata( retention=bg_metric.Retention.from_string("1440*60s") ) self.accessor.create_metric(bg_test_utils.make_metric(name, metadata)) cmd = command_read.CommandRead() parser = argparse.ArgumentParser() bg_settings.add_argparse_arguments(parser) cmd.add_arguments(parser) opts = parser.parse_args([name]) cmd.run(self.accessor, opts)
def test_updated_on_is_set_upon_current_date_with_created_on_unchanged(self): metric_name = "test_created_on_is_set_upon_current_date.a.b.c" expected_created_on = datetime.datetime(2000, 1, 1) expected_updated_on = datetime.datetime(2011, 1, 1) with freezegun.freeze_time(expected_created_on): metric = bg_test_utils.make_metric(metric_name, _create_default_metadata()) self.accessor.create_metric(metric) self.accessor.flush() with freezegun.freeze_time(expected_updated_on): self.accessor.touch_metric(metric) self.accessor.flush() metric = self.accessor.get_metric(metric_name) self.assertEqual(expected_created_on, metric.created_on) self.assertEqual(expected_updated_on, metric.updated_on)
def test_delete_metric(self): if isinstance(self.accessor, bg_elasticsearch._ElasticSearchAccessor): # TODO (t.chataigner) Remove once delete_metric is implemented. self.skipTest( "delete_metric is not implemented for _ElasticSearchAccessor.") metric = bg_test_utils.make_metric("a.b.c.d.e.f") self.accessor.create_metric(metric) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), True) self.accessor.delete_metric(metric.name) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), False)
def get_output(self, args, mock_stdout): self.accessor.drop_all_metrics() for metric in self.metrics: self.accessor.create_metric( bg_test_utils.make_metric(metric, self.metadata)) cmd = command_du.CommandDu() parser = argparse.ArgumentParser(add_help=False) bg_settings.add_argparse_arguments(parser) cmd.add_arguments(parser) opts = parser.parse_args(args) cmd.run(self.accessor, opts) return mock_stdout.getvalue()
def setUp(self): """Set up a delayed wrier.""" super(TestDelayedWriter, self).setUp() stages = (self.DURATION, self.PRECISION, self.DURATION, self.PRECISION * 100) retention_string = "%d*%ds:%d*%ds" % (stages) retention = bg_metric.Retention.from_string(retention_string) self.stage_0 = retention.stages[0] self.stage_1 = retention.stages[1] metadata = bg_metric.MetricMetadata( aggregator=bg_metric.Aggregator.average, retention=retention ) self.metric = test_utils.make_metric(self.METRIC_NAME, metadata=metadata) self.accessor.create_metric(self.metric) self.dw = bg_dw.DelayedWriter(self.accessor, period_ms=self.WRITER_PERIOD)
def test_clean_not_expired(self): if isinstance(self.accessor, bg_elasticsearch._ElasticSearchAccessor): # TODO (t.chataigner) Remove once clean is implemented. self.skipTest("clean is not implemented for _ElasticSearchAccessor.") metric1 = bg_test_utils.make_metric("a.b.c.d.e.f") self.accessor.create_metric(metric1) metric2 = bg_test_utils.make_metric("g.h.i.j.k.l") self.accessor.create_metric(metric2) self.flush() # Check that the metrics exist before the cleanup self.assertEqual(self.accessor.has_metric(metric1.name), True) self.assertEqual(self.accessor.has_metric(metric2.name), True) # set cutoff time in the past to delete nothing cutoff = 3600 self.accessor.clean(cutoff) # Check that the metrics still exist after the cleanup self.assertEqual(self.accessor.has_metric(metric1.name), True) self.assertEqual(self.accessor.has_metric(metric2.name), True) self.addCleanup(self.accessor.drop_all_metrics)
def test_create_metrics(self): meta_dict = { "aggregator": bg_metric.Aggregator.last, "retention": bg_metric.Retention.from_string("60*1s:60*60s"), "carbon_xfilesfactor": 0.3, } metric = bg_test_utils.make_metric("a.b.c.d.e.f", **meta_dict) self.assertEqual(self.accessor.has_metric(metric.name), False) self.accessor.create_metric(metric) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), True) metric_again = self.accessor.get_metric(metric.name) self.assertEqual(metric.name, metric_again.name) for k, v in meta_dict.items(): self.assertEqual(v, getattr(metric_again.metadata, k))
def test_delete_metric(self): if isinstance(self.accessor, bg_elasticsearch._ElasticSearchAccessor): # TODO (t.chataigner) Remove once delete_metric is implemented. self.skipTest( "delete_metric is not implemented for _ElasticSearchAccessor." ) metric = bg_test_utils.make_metric("a.b.c.d.e.f") self.accessor.create_metric(metric) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), True) self.accessor.delete_metric(metric.name) self.flush() self.assertEqual(self.accessor.has_metric(metric.name), False)
def get_output(self, args, mock_stdout): self.accessor.drop_all_metrics() for metric in self.metrics: self.accessor.create_metric( bg_test_utils.make_metric(metric, self.metadata) ) cmd = command_stats.CommandStats() parser = argparse.ArgumentParser(add_help=False) bg_settings.add_argparse_arguments(parser) cmd.add_arguments(parser) opts = parser.parse_args(args) cmd.run(self.accessor, opts) return mock_stdout.getvalue()