Esempio n. 1
0
 def test_get_cross_metric_measures_empty_metrics_no_overlap(self):
     self.assertRaises(
         cross_metric.MetricUnaggregatable,
         cross_metric.get_cross_metric_measures, self.storage, [
             storage.Metric(uuid.uuid4(), self.archive_policies['low']),
             storage.Metric(uuid.uuid4(), self.archive_policies['low'])
         ])
Esempio n. 2
0
 def test_get_cross_metric_measures_unknown_metric(self):
     self.assertEqual([],
                      self.storage.get_cross_metric_measures(
                          [storage.Metric(uuid.uuid4(),
                                          self.archive_policies['low']),
                           storage.Metric(uuid.uuid4(),
                                          self.archive_policies['low'])]))
Esempio n. 3
0
    def test_rewrite_measures_corruption_bad_data(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
        self.assertEqual(
            splits,
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
        ], self.storage.get_measures(self.metric, granularity=60.0))

        # Test what happens if we write garbage
        self.storage._store_metric_measures(self.metric, '1451952000.0',
                                            "mean", 60.0, b"oh really?")

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()
Esempio n. 4
0
 def _create_metric(self, archive_policy_name="low"):
     """Create a metric and return it"""
     m = storage.Metric(uuid.uuid4(),
                        self.archive_policies[archive_policy_name])
     m_sql = self.index.create_metric(m.id, str(uuid.uuid4()),
                                      archive_policy_name)
     return m, m_sql
Esempio n. 5
0
 def test_resize_policy(self):
     name = str(uuid.uuid4())
     ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
     self.index.create_archive_policy(ap)
     m = storage.Metric(uuid.uuid4(), ap)
     self.index.create_metric(m.id, str(uuid.uuid4()), str(uuid.uuid4()),
                              name)
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1),
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # expand to more points
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # shrink timespan
     self.index.update_archive_policy(
         name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
     # unchanged after update if no samples
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
     ], self.storage.get_measures(m))
     # drop points
     self.storage.add_measures(m, [
         storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1),
     ])
     self.storage.process_background_tasks(self.index, sync=True)
     self.assertEqual([
         (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0),
         (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0),
     ], self.storage.get_measures(m))
    def _test_create_metric_and_data(self, data, spacing):
        metric = storage.Metric(uuid.uuid4(), self.archive_policies['medium'])
        start_time = datetime.datetime(2014, 1, 1, 12)
        incr = datetime.timedelta(seconds=spacing)
        measures = [
            storage.Measure(start_time + incr * n, val)
            for n, val in enumerate(data)
        ]
        self.index.create_metric(metric.id, str(uuid.uuid4()),
                                 str(uuid.uuid4()), 'medium')
        self.storage.add_measures(metric, measures)
        self.storage.process_background_tasks(self.index, sync=True)

        return metric
    def _test_create_metric_and_data(self, data, spacing):
        metric = storage.Metric(uuid.uuid4(), self.archive_policies['medium'])
        start_time = utils.datetime_utc(2014, 1, 1, 12)
        incr = datetime.timedelta(seconds=spacing)
        measures = [
            storage.Measure(utils.dt_in_unix_ns(start_time + incr * n), val)
            for n, val in enumerate(data)
        ]
        self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium')
        self.storage.incoming.add_measures(metric, measures)
        metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming)
        self.storage.process_background_tasks(self.index, metrics, sync=True)

        return metric
Esempio n. 8
0
 def test_get_cross_metric_measures_unknown_granularity(self):
     metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low'])
     self.incoming.add_measures(self.metric, [
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.incoming.add_measures(metric2, [
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.assertRaises(storage.GranularityDoesNotExist,
                       self.storage.get_cross_metric_measures,
                       [self.metric, metric2],
                       granularity=12345.456)
Esempio n. 9
0
 def test_get_cross_metric_measures_unknown_aggregation(self):
     metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low'])
     self.incoming.add_measures(self.metric, [
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.incoming.add_measures(metric2, [
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.assertRaises(storage.AggregationDoesNotExist,
                       self.storage.get_cross_metric_measures,
                       [self.metric, metric2],
                       aggregation='last')
Esempio n. 10
0
 def test_get_cross_metric_measures_unknown_granularity(self):
     metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low'])
     self.incoming.add_measures(self.metric, [
         storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.incoming.add_measures(metric2, [
         storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.assertRaises(storage.GranularityDoesNotExist,
                       cross_metric.get_cross_metric_measures,
                       self.storage, [self.metric, metric2],
                       granularity=numpy.timedelta64(12345456, 'ms'))
Esempio n. 11
0
    def test_add_and_get_cross_metric_measures_different_archives(self):
        metric2 = storage.Metric(uuid.uuid4(),
                                 self.archive_policies['no_granularity_match'])
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
        ])
        self.incoming.add_measures(metric2, [
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44),
        ])

        self.assertRaises(storage.MetricUnaggregatable,
                          self.storage.get_cross_metric_measures,
                          [self.metric, metric2])
Esempio n. 12
0
    def setUp(self):
        super(TestCarbonaraMigration, self).setUp()
        if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage):
            self.skipTest("This driver is not based on Carbonara")

        self.metric = storage.Metric(uuid.uuid4(),
                                     self.archive_policies['low'])

        archive = carbonara.TimeSerieArchive.from_definitions([
            (v.granularity, v.points)
            for v in self.metric.archive_policy.definition
        ])

        archive_max = carbonara.TimeSerieArchive.from_definitions(
            [(v.granularity, v.points)
             for v in self.metric.archive_policy.definition],
            aggregation_method='max',
        )

        for a in (archive, archive_max):
            a.update(
                carbonara.TimeSerie.from_data([
                    datetime.datetime(2014, 1, 1, 12, 0, 0),
                    datetime.datetime(2014, 1, 1, 12, 0, 4),
                    datetime.datetime(2014, 1, 1, 12, 0, 9)
                ], [4, 5, 6]))

        self.storage._create_metric(self.metric)

        # serialise in old format
        with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.to_dict',
                        autospec=True) as f:
            f.side_effect = _to_dict_v1_3

            self.storage._store_metric_archive(
                self.metric, archive.agg_timeseries[0].aggregation_method,
                archive.serialize())

            self.storage._store_metric_archive(
                self.metric, archive_max.agg_timeseries[0].aggregation_method,
                archive_max.serialize())
Esempio n. 13
0
    def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self):
        """See LP#1655422"""
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        splits = {'1451520000.0', '1451736000.0', '1451952000.0'}
        self.assertEqual(
            splits,
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
        ], self.storage.get_measures(self.metric, granularity=60.0))

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.

        # Here we test a special case where the oldest_mutable_timestamp will
        # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key.
        self.incoming.add_measures(self.metric, [
            storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 0, 12), 45),
        ])
        self.trigger_processing()

        self.assertEqual(
            {'1452384000.0', '1451736000.0', '1451520000.0', '1451952000.0'},
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     60.0))
        data = self.storage._get_measures(self.metric, '1451520000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451736000.0', "mean",
                                          60.0)
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1451952000.0', "mean",
                                          60.0)
        # Now this one is compressed because it has been rewritten!
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, '1452384000.0', "mean",
                                          60.0)
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69),
            (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42),
            (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4),
            (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44),
            (utils.datetime_utc(2016, 1, 10, 0, 12), 60.0, 45),
        ], self.storage.get_measures(self.metric, granularity=60.0))
Esempio n. 14
0
 def setUp(self):
     super(TestIncomingDriver, self).setUp()
     # A lot of tests wants a metric, create one
     self.metric = storage.Metric(uuid.uuid4(),
                                  self.archive_policies["low"])
Esempio n. 15
0
    def test_rewrite_measures_corruption_missing_file(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = storage.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric, [
            storage.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
            storage.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
            storage.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
            storage.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(1, 'm'),
            ), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                               numpy.timedelta64(1, 'm')), "mean")
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            ), "mean")
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularity=numpy.timedelta64(60, 's')))

        # Test what happens if we delete the latest split and then need to
        # compress it!
        self.storage._delete_metric_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            ), 'mean')

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric, [
            storage.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
            storage.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()