示例#1
0
    def test_aggregation_methods(self):
        conf = service.prepare_service([], default_config_files=[])

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["*"])
        self.assertEqual(
            archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS,
            ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["last"])
        self.assertEqual(set(["last"]), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["*", "-mean"])
        self.assertEqual(
            (archive_policy.ArchivePolicy.VALID_AGGREGATION_METHODS -
             set(["mean"])), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["-mean", "-last"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods) -
             set(["mean", "last"])), ap.aggregation_methods)

        ap = archive_policy.ArchivePolicy("foobar", 0, [], ["+12pct"])
        self.assertEqual(
            (set(conf.archive_policy.default_aggregation_methods).union(
                set(["12pct"]))), ap.aggregation_methods)
示例#2
0
    def test_rewrite_measures_corruption_bad_data(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
        self.assertEqual(
            splits,
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
        ], self.storage.get_measures(self.metric, granularity=60.0))

        # Test what happens if we write garbage
        self.storage._store_metric_measures(self.metric, '1451952000.0',
                                            "mean", 60.0, b"oh really?")

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()
示例#3
0
 def test_list_ap_rules_ordered(self):
     name = str(uuid.uuid4())
     self.index.create_archive_policy(
         archive_policy.ArchivePolicy(name, 0, {}))
     self.index.create_archive_policy_rule('rule1', 'abc.*', name)
     self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name)
     self.index.create_archive_policy_rule('rule3', 'abc.xyz', name)
     rules = self.index.list_archive_policy_rules()
     self.assertEqual(3, len(rules))
     self.assertEqual('abc.xyz.*', rules[0]['metric_pattern'])
     self.assertEqual('abc.xyz', rules[1]['metric_pattern'])
     self.assertEqual('abc.*', rules[2]['metric_pattern'])
示例#4
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = storage.Metric(uuid.uuid4(), ap)
     self.index.create_metric(m.id, str(uuid.uuid4()), str(uuid.uuid4()),
                              name)
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     # unchanged after update if no samples
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # drop points
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0),
     ], self.storage.get_measures(m))
示例#5
0
 def test_delete_archive_policy(self):
     name = str(uuid.uuid4())
     self.index.create_archive_policy(
         archive_policy.ArchivePolicy(name, 0, {}))
     self.index.delete_archive_policy(name)
     self.assertRaises(indexer.NoSuchArchivePolicy,
                       self.index.delete_archive_policy, name)
     self.assertRaises(indexer.NoSuchArchivePolicy,
                       self.index.delete_archive_policy, str(uuid.uuid4()))
     metric_id = uuid.uuid4()
     self.index.create_metric(metric_id, str(uuid.uuid4()),
                              str(uuid.uuid4()), "low")
     self.assertRaises(indexer.ArchivePolicyInUse,
                       self.index.delete_archive_policy, "low")
     self.index.delete_metric(metric_id)
示例#6
0
 def test_update_archive_policy(self):
     self.assertRaises(indexer.UnsupportedArchivePolicyChange,
                       self.index.update_archive_policy, "low",
                       [archive_policy.ArchivePolicyItem(granularity=300,
                                                         points=10)])
     self.assertRaises(indexer.UnsupportedArchivePolicyChange,
                       self.index.update_archive_policy, "low",
                       [archive_policy.ArchivePolicyItem(granularity=300,
                                                         points=12),
                        archive_policy.ArchivePolicyItem(granularity=3600,
                                                         points=12),
                        archive_policy.ArchivePolicyItem(granularity=5,
                                                         points=6)])
     apname = str(uuid.uuid4())
     self.index.create_archive_policy(archive_policy.ArchivePolicy(
         apname, 0, [(12, 300), (24, 3600), (30, 86400)]))
     ap = self.index.update_archive_policy(
         apname, [archive_policy.ArchivePolicyItem(granularity=300,
                                                   points=6),
                  archive_policy.ArchivePolicyItem(granularity=3600,
                                                   points=24),
                  archive_policy.ArchivePolicyItem(granularity=86400,
                                                   points=30)])
     self.assertEqual({
         'back_window': 0,
         'aggregation_methods':
         set(self.conf.archive_policy.default_aggregation_methods),
         'definition': [
             {u'granularity': 300, u'points': 6, u'timespan': 1800},
             {u'granularity': 3600, u'points': 24, u'timespan': 86400},
             {u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
         'name': apname}, dict(ap))
     ap = self.index.update_archive_policy(
         apname, [archive_policy.ArchivePolicyItem(granularity=300,
                                                   points=12),
                  archive_policy.ArchivePolicyItem(granularity=3600,
                                                   points=24),
                  archive_policy.ArchivePolicyItem(granularity=86400,
                                                   points=30)])
     self.assertEqual({
         'back_window': 0,
         'aggregation_methods':
         set(self.conf.archive_policy.default_aggregation_methods),
         'definition': [
             {u'granularity': 300, u'points': 12, u'timespan': 3600},
             {u'granularity': 3600, u'points': 24, u'timespan': 86400},
             {u'granularity': 86400, u'points': 30, u'timespan': 2592000}],
         'name': apname}, dict(ap))
示例#7
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()),
                                  str(uuid.uuid4()), name)
     m = self.index.list_metrics(ids=[m.id])[0]
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.trigger_processing(self.storage, self.index)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     m = self.index.list_metrics(ids=[m.id])[0]
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.trigger_processing(self.storage, self.index)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     m = self.index.list_metrics(ids=[m.id])[0]
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
示例#8
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name)
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.incoming.add_measures(m.id, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1),
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1),
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.trigger_processing([str(m.id)])
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.incoming.add_measures(m.id, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.trigger_processing([str(m.id)])
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
     self.assertEqual([
         (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
         (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
     ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
示例#9
0
    def test_list_ap_rules_ordered(self):
        name = str(uuid.uuid4())
        self.index.create_archive_policy(
            archive_policy.ArchivePolicy(name, 0, {}))
        self.index.create_archive_policy_rule('rule1', 'abc.*', name)
        self.index.create_archive_policy_rule('rule2', 'abc.xyz.*', name)
        self.index.create_archive_policy_rule('rule3', 'abc.xyz', name)
        rules = self.index.list_archive_policy_rules()
        # NOTE(jd) The test is not isolated, there might be more than 3 rules
        found = 0
        for r in rules:
            if r['metric_pattern'] == 'abc.xyz.*':
                found = 1
            if found == 1 and r['metric_pattern'] == 'abc.xyz':
                found = 2
            if found == 2 and r['metric_pattern'] == 'abc.*':
                break
        else:
            self.fail("Metric patterns are not ordered")

        # Ensure we can't delete the archive policy
        self.assertRaises(indexer.ArchivePolicyInUse,
                          self.index.delete_archive_policy, name)
示例#10
0
    def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self):
        """See LP#1655422"""
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
        self.assertEqual(
            splits,
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
        ], self.storage.get_measures(self.metric, granularity=60.0))

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.

        # Here we test a special case where the oldest_mutable_timestamp will
        # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key.
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45),
        ])
        self.trigger_processing()

        self.assertEqual(
            {'1452384000.0', '1451736000.0', '1451520000.0', '1451952000.0'},
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))
        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        # Now this one is compressed because it has been rewritten!
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1452384000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
            (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45),
        ], self.storage.get_measures(self.metric, granularity=60.0))
示例#11
0
class TestCase(BaseTestCase):

    REDIS_DB_INDEX = 0
    REDIS_DB_LOCK = threading.Lock()

    ARCHIVE_POLICIES = {
        'no_granularity_match': archive_policy.ArchivePolicy(
            "no_granularity_match",
            0, [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(2, 's'),
                    timespan=numpy.timedelta64(1, 'D'),
                ),
            ],
        ),
        'low': archive_policy.ArchivePolicy(
            "low", 0, [
                # 5 minutes resolution for an hour
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(5, 'm'), points=12),
                # 1 hour resolution for a day
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'h'), points=24),
                # 1 day resolution for a month
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'D'), points=30),
            ],
        ),
        'medium': archive_policy.ArchivePolicy(
            "medium", 0, [
                # 1 minute resolution for an day
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'm'), points=60 * 24),
                # 1 hour resolution for a week
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'h'), points=7 * 24),
                # 1 day resolution for a year
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'D'), points=365),
            ],
        ),
        'high': archive_policy.ArchivePolicy(
            "high", 0, [
                # 1 second resolution for an hour
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 's'), points=3600),
                # 1 minute resolution for a week
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'm'), points=60 * 24 * 7),
                # 1 hour resolution for a year
                archive_policy.ArchivePolicyItem(
                    granularity=numpy.timedelta64(1, 'h'), points=365 * 24),
            ],
        ),
    }

    def setUp(self):
        super(TestCase, self).setUp()

        self.conf = service.prepare_service(
            [], conf=utils.prepare_conf(),
            default_config_files=[],
            logging_level=logging.DEBUG,
            skip_log_opts=True)

        self.index = indexer.get_driver(self.conf)

        self.coord = metricd.get_coordinator_and_start(
            str(uuid.uuid4()),
            self.conf.coordination_url)

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        py_root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                               '..',))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.yaml'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id', "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key', "anythingworks",
                                   group="storage")

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')

        if swexc:
            self.useFixture(fixtures.MockPatch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'ceph':
            self.conf.set_override('ceph_conffile',
                                   os.getenv("CEPH_CONF"),
                                   'storage')
            self.ceph_pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call(("ceph -c %s osd pool create %s "
                                 "16 16 replicated") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
                subprocess.call(("ceph -c %s osd pool application "
                                 "enable %s rbd") % (
                    os.getenv("CEPH_CONF"), self.ceph_pool_name), shell=True,
                    stdout=f, stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', self.ceph_pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix", str(uuid.uuid4())[:26],
                               "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4()).encode()

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_NAME_FORMAT = (
                str(uuid.uuid4()) + incoming.IncomingDriver.SACK_NAME_FORMAT
            )

        self.storage.upgrade()
        self.incoming.upgrade(3)
        self.chef = chef.Chef(
            self.coord, self.incoming, self.index, self.storage)

    def tearDown(self):
        self.index.disconnect()
        self.coord.stop()

        if self.conf.storage.driver == 'ceph':
            with open(os.devnull, 'w') as f:
                ceph_rmpool_command = "ceph -c %s osd pool delete %s %s \
--yes-i-really-really-mean-it" % (os.getenv("CEPH_CONF"), self.ceph_pool_name,
                                  self.ceph_pool_name)
                subprocess.call(ceph_rmpool_command, shell=True,
                                stdout=f, stderr=subprocess.STDOUT)

        super(TestCase, self).tearDown()

    def _create_metric(self, archive_policy_name="low"):
        """Create a metric and return it"""
        m = indexer.Metric(uuid.uuid4(),
                           self.archive_policies[archive_policy_name])
        m_sql = self.index.create_metric(m.id, str(uuid.uuid4()),
                                         archive_policy_name)
        return m, m_sql

    def trigger_processing(self, metrics=None):
        if metrics is None:
            self.chef.process_new_measures_for_sack(
                self.incoming.sack_for_metric(self.metric.id),
                blocking=True, sync=True)
        else:
            self.chef.refresh_metrics(metrics, timeout=True, sync=True)
示例#12
0
文件: base.py 项目: shushen/gnocchi
class TestCase(base.BaseTestCase):

    ARCHIVE_POLICIES = {
        'no_granularity_match': archive_policy.ArchivePolicy(
            "no_granularity_match",
            0,
            [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(
                    granularity=2, points=3600 * 24),
                ],
        ),
    }

    @staticmethod
    def path_get(project_file=None):
        root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                            '..',
                                            '..',
                                            )
                               )
        if project_file:
            return os.path.join(root, project_file)
        return root

    @classmethod
    def setUpClass(self):
        super(TestCase, self).setUpClass()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

    def setUp(self):
        super(TestCase, self).setUp()
        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

    def tearDown(self):
        self.index.disconnect()
        self.storage.stop()
        super(TestCase, self).tearDown()
示例#13
0
 def test_create_archive_policy_already_exists(self):
     # NOTE(jd) This archive policy
     # is created by gnocchi.tests on setUp() :)
     self.assertRaises(indexer.ArchivePolicyAlreadyExists,
                       self.index.create_archive_policy,
                       archive_policy.ArchivePolicy("high", 0, {}))
示例#14
0
    def test_rewrite_measures_corruption_missing_file(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = indexer.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
            incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
            incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
            incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                               numpy.timedelta64(1, 'm'))
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularities=[numpy.timedelta64(60, 's')]))

        # Test what happens if we delete the latest split and then need to
        # compress it!
        self.storage._delete_metric_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            ), 'mean')

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
            incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()
示例#15
0
 def test_max_block_size(self):
     ap = archive_policy.ArchivePolicy("foobar", 0, [(20, 60), (10, 300),
                                                     (10, 5)],
                                       ["-mean", "-last"])
     self.assertEqual(ap.max_block_size, 300)
示例#16
0
    def test_rewrite_measures(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(1, 'm'),
            ), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451736000, 's'),
                numpy.timedelta64(60, 's'),
            ), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(60, 's'),
            ), "mean")
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularity=numpy.timedelta64(1, 'm')))

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric, [
            storage.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
            storage.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64(1452384000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(60, 's'),
            ), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451736000, 's'),
                numpy.timedelta64(60, 's'),
            ), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            ), "mean")
        # Now this one is compressed because it has been rewritten!
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1452384000, 's'),
                numpy.timedelta64(60, 's'),
            ), "mean")
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
            (datetime64(2016, 1, 10, 16, 18), numpy.timedelta64(1, 'm'), 45),
            (datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularity=numpy.timedelta64(1, 'm')))
示例#17
0
class TestCase(base.BaseTestCase):

    ARCHIVE_POLICIES = {
        'no_granularity_match': archive_policy.ArchivePolicy(
            "no_granularity_match",
            0,
            [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(
                    granularity=2, points=3600 * 24),
                ],
        ),
    }

    @staticmethod
    def path_get(project_file=None):
        root = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                            '..',
                                            '..',
                                            )
                               )
        if project_file:
            return os.path.join(root, project_file)
        return root

    def setUp(self):
        super(TestCase, self).setUp()
        self.conf = service.prepare_service([],
                                            default_config_files=[])
        self.conf.set_override('policy_file',
                               self.path_get('etc/gnocchi/policy.json'),
                               group="oslo_policy")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start()

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            # Force upgrading using Alembic rather than creating the
            # database from scratch so we are sure we don't miss anything
            # in the Alembic upgrades. We have a test to check that
            # upgrades == create but it misses things such as custom CHECK
            # constraints.
            self.index.upgrade(nocreate=True)

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        self.archive_policies.update(archive_policy.DEFAULT_ARCHIVE_POLICIES)
        # Used in gnocchi.gendoc
        if not getattr(self, "skip_archive_policies_creation", False):
            for name, ap in six.iteritems(self.archive_policies):
                # Create basic archive policies
                try:
                    self.index.create_archive_policy(ap)
                except indexer.ArchivePolicyAlreadyExists:
                    pass

        if swexc:
            self.useFixture(mockpatch.Patch(
                'swiftclient.client.Connection',
                FakeSwiftClient))

        self.useFixture(mockpatch.Patch('gnocchi.storage.ceph.rados',
                                        FakeRadosModule()))

        self.conf.set_override(
            'driver',
            os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "null"),
            'storage')

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath',
                                   tempdir.path,
                                   'storage')
        elif self.conf.storage.driver == 'influxdb':
            self.conf.set_override('influxdb_block_until_data_ingested', True,
                                   'storage')
            self.conf.set_override('influxdb_database', 'test', 'storage')
            self.conf.set_override('influxdb_password', 'root', 'storage')
            self.conf.set_override('influxdb_port',
                                   os.getenv("GNOCCHI_TEST_INFLUXDB_PORT",
                                             51234), 'storage')
            # NOTE(ityaptin) Creating unique database for every test may cause
            # tests failing by timeout, but in may be useful in some cases
            if os.getenv("GNOCCHI_TEST_INFLUXDB_UNIQUE_DATABASES"):
                self.conf.set_override("influxdb_database",
                                       "gnocchi_%s" % uuid.uuid4().hex,
                                       'storage')

        self.storage = storage.get_driver(self.conf)
        # NOTE(jd) Do not upgrade the storage. We don't really need the storage
        # upgrade for now, and the code that upgrade from pre-1.3
        # (TimeSerieArchive) uses a lot of parallel lock, which makes tooz
        # explodes because MySQL does not support that many connections in real
        # life.
        # self.storage.upgrade(self.index)

        self.mgr = extension.ExtensionManager('gnocchi.aggregates',
                                              invoke_on_load=True)
        self.custom_agg = dict((x.name, x.obj) for x in self.mgr)

    def tearDown(self):
        self.index.disconnect()
        self.storage.stop()
        super(TestCase, self).tearDown()
示例#18
0
class TestCase(BaseTestCase):

    REDIS_DB_INDEX = 0
    REDIS_DB_LOCK = threading.Lock()

    ARCHIVE_POLICIES = {
        'no_granularity_match':
        archive_policy.ArchivePolicy(
            "no_granularity_match",
            0,
            [
                # 2 second resolution for a day
                archive_policy.ArchivePolicyItem(granularity=2,
                                                 points=3600 * 24),
            ],
        ),
        'low':
        archive_policy.ArchivePolicy(
            "low",
            0,
            [
                # 5 minutes resolution for an hour
                archive_policy.ArchivePolicyItem(granularity=300, points=12),
                # 1 hour resolution for a day
                archive_policy.ArchivePolicyItem(granularity=3600, points=24),
                # 1 day resolution for a month
                archive_policy.ArchivePolicyItem(granularity=3600 * 24,
                                                 points=30),
            ],
        ),
        'medium':
        archive_policy.ArchivePolicy(
            "medium",
            0,
            [
                # 1 minute resolution for an day
                archive_policy.ArchivePolicyItem(granularity=60,
                                                 points=60 * 24),
                # 1 hour resolution for a week
                archive_policy.ArchivePolicyItem(granularity=3600,
                                                 points=7 * 24),
                # 1 day resolution for a year
                archive_policy.ArchivePolicyItem(granularity=3600 * 24,
                                                 points=365),
            ],
        ),
        'high':
        archive_policy.ArchivePolicy(
            "high",
            0,
            [
                # 1 second resolution for an hour
                archive_policy.ArchivePolicyItem(granularity=1, points=3600),
                # 1 minute resolution for a week
                archive_policy.ArchivePolicyItem(granularity=60,
                                                 points=60 * 24 * 7),
                # 1 hour resolution for a year
                archive_policy.ArchivePolicyItem(granularity=3600,
                                                 points=365 * 24),
            ],
        ),
    }

    @classmethod
    def setUpClass(self):
        super(TestCase, self).setUpClass()
        self.conf = service.prepare_service([], default_config_files=[])
        if not os.getenv("GNOCCHI_TEST_DEBUG"):
            daiquiri.setup(outputs=[])

        py_root = os.path.abspath(
            os.path.join(
                os.path.dirname(__file__),
                '..',
            ))
        self.conf.set_override('paste_config',
                               os.path.join(py_root, 'rest', 'api-paste.ini'),
                               group="api")
        self.conf.set_override('policy_file',
                               os.path.join(py_root, 'rest', 'policy.json'),
                               group="oslo_policy")

        # NOTE(jd) This allows to test S3 on AWS
        if not os.getenv("AWS_ACCESS_KEY_ID"):
            self.conf.set_override('s3_endpoint_url',
                                   os.getenv("GNOCCHI_STORAGE_HTTP_URL"),
                                   group="storage")
            self.conf.set_override('s3_access_key_id',
                                   "gnocchi",
                                   group="storage")
            self.conf.set_override('s3_secret_access_key',
                                   "anythingworks",
                                   group="storage")

        self.index = indexer.get_driver(self.conf)
        self.index.connect()

        # NOTE(jd) So, some driver, at least SQLAlchemy, can't create all
        # their tables in a single transaction even with the
        # checkfirst=True, so what we do here is we force the upgrade code
        # path to be sequential to avoid race conditions as the tests run
        # in parallel.
        self.coord = coordination.get_coordinator(
            self.conf.storage.coordination_url,
            str(uuid.uuid4()).encode('ascii'))

        self.coord.start(start_heart=True)

        with self.coord.get_lock(b"gnocchi-tests-db-lock"):
            self.index.upgrade()

        self.coord.stop()

        self.archive_policies = self.ARCHIVE_POLICIES.copy()
        for name, ap in six.iteritems(self.archive_policies):
            # Create basic archive policies
            try:
                self.index.create_archive_policy(ap)
            except indexer.ArchivePolicyAlreadyExists:
                pass

        storage_driver = os.getenv("GNOCCHI_TEST_STORAGE_DRIVER", "file")
        self.conf.set_override('driver', storage_driver, 'storage')
        if storage_driver == 'ceph':
            self.conf.set_override('ceph_conffile', os.getenv("CEPH_CONF"),
                                   'storage')

    def setUp(self):
        super(TestCase, self).setUp()
        if swexc:
            self.useFixture(
                fixtures.MockPatch('swiftclient.client.Connection',
                                   FakeSwiftClient))

        if self.conf.storage.driver == 'file':
            tempdir = self.useFixture(fixtures.TempDir())
            self.conf.set_override('file_basepath', tempdir.path, 'storage')
        elif self.conf.storage.driver == 'ceph':
            pool_name = uuid.uuid4().hex
            with open(os.devnull, 'w') as f:
                subprocess.call("rados -c %s mkpool %s" %
                                (os.getenv("CEPH_CONF"), pool_name),
                                shell=True,
                                stdout=f,
                                stderr=subprocess.STDOUT)
            self.conf.set_override('ceph_pool', pool_name, 'storage')

        # Override the bucket prefix to be unique to avoid concurrent access
        # with any other test
        self.conf.set_override("s3_bucket_prefix",
                               str(uuid.uuid4())[:26], "storage")

        self.storage = storage.get_driver(self.conf)
        self.incoming = incoming.get_driver(self.conf)

        if self.conf.storage.driver == 'redis':
            # Create one prefix per test
            self.storage.STORAGE_PREFIX = str(uuid.uuid4())

        if self.conf.incoming.driver == 'redis':
            self.incoming.SACK_PREFIX = str(uuid.uuid4())

        self.storage.upgrade()
        self.incoming.upgrade(128)

    def tearDown(self):
        self.index.disconnect()
        self.storage.stop()
        super(TestCase, self).tearDown()