def test_get_cross_metric_measures_empty_metrics_no_overlap(self):
     self.assertRaises(
         cross_metric.MetricUnaggregatable,
         cross_metric.get_cross_metric_measures, self.storage, [
             indexer.Metric(uuid.uuid4(), self.archive_policies['low']),
             indexer.Metric(uuid.uuid4(), self.archive_policies['low'])
         ])
Exemple #2
0
 def _create_metric(self, archive_policy_name="low"):
     """Create a metric and return it"""
     m = indexer.Metric(uuid.uuid4(),
                        self.archive_policies[archive_policy_name])
     m_sql = self.index.create_metric(m.id, str(uuid.uuid4()),
                                      archive_policy_name)
     return m, m_sql
Exemple #3
0
    def _test_create_metric_and_data(self, data, spacing):
        metric = indexer.Metric(
            uuid.uuid4(), self.archive_policies['medium'])
        start_time = utils.datetime_utc(2014, 1, 1, 12)
        incr = datetime.timedelta(seconds=spacing)
        measures = [incoming.Measure(
            utils.dt_in_unix_ns(start_time + incr * n), val)
            for n, val in enumerate(data)]
        self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium')
        self.incoming.add_measures(metric, measures)
        metrics = tests_utils.list_all_incoming_metrics(self.incoming)
        self.storage.process_new_measures(
            self.index, self.incoming, metrics, sync=True)

        return metric
 def test_get_cross_metric_measures_unknown_granularity(self):
     metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low'])
     self.incoming.add_measures(self.metric, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.incoming.add_measures(metric2, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.assertRaises(storage.GranularityDoesNotExist,
                       cross_metric.get_cross_metric_measures,
                       self.storage, [self.metric, metric2],
                       granularity=numpy.timedelta64(12345456, 'ms'))
 def test_get_cross_metric_measures_unknown_aggregation(self):
     metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low'])
     self.incoming.add_measures(self.metric, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.incoming.add_measures(metric2, [
         incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
         incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
         incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
         incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
     ])
     self.assertRaises(storage.AggregationDoesNotExist,
                       cross_metric.get_cross_metric_measures,
                       self.storage, [self.metric, metric2],
                       aggregation='last')
    def test_add_and_get_cross_metric_measures_different_archives(self):
        metric2 = indexer.Metric(uuid.uuid4(),
                                 self.archive_policies['no_granularity_match'])
        self.incoming.add_measures(self.metric, [
            incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
            incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
            incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
            incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
        ])
        self.incoming.add_measures(metric2, [
            incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
            incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
            incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
            incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
        ])

        self.assertRaises(cross_metric.MetricUnaggregatable,
                          cross_metric.get_cross_metric_measures, self.storage,
                          [self.metric, metric2])
Exemple #7
0
 def setUp(self):
     super(TestIncomingDriver, self).setUp()
     # A lot of tests wants a metric, create one
     self.metric = indexer.Metric(uuid.uuid4(),
                                  self.archive_policies["low"])
Exemple #8
0
    def test_rewrite_measures_corruption_bad_data(self):
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = indexer.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
            incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
            incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
            incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(60, 's'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451736000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularities=[numpy.timedelta64(1, 'm')]))

        # Test what happens if we write garbage
        self.storage._store_metric_measures(
            self.metric,
            carbonara.SplitKey(
                numpy.datetime64(1451952000, 's'),
                numpy.timedelta64(1, 'm'),
            ), "mean", b"oh really?")

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
            incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
        ])
        self.trigger_processing()
Exemple #9
0
    def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self):
        """See LP#1655422"""
        # Create an archive policy that spans on several splits. Each split
        # being 3600 points, let's go for 36k points so we have 10 splits.
        apname = str(uuid.uuid4())
        ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
        self.index.create_archive_policy(ap)
        self.metric = indexer.Metric(uuid.uuid4(), ap)
        self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname)

        # First store some points scattered across different splits
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
            incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
            incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
            incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))

        if self.storage.WRITE_FULL:
            assertCompressedIfWriteFull = self.assertTrue
        else:
            assertCompressedIfWriteFull = self.assertFalse

        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451736000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                               numpy.timedelta64(1, 'm'))
        ], "mean")[0]
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularities=[numpy.timedelta64(60, 's')]))

        # Now store brand new points that should force a rewrite of one of the
        # split (keep in mind the back window size in one hour here). We move
        # the BoundTimeSerie processing timeserie far away from its current
        # range.

        # Here we test a special case where the oldest_mutable_timestamp will
        # be 2016-01-10TOO:OO:OO = 1452384000.0, our new split key.
        self.incoming.add_measures(self.metric.id, [
            incoming.Measure(datetime64(2016, 1, 10, 0, 12), 45),
        ])
        self.trigger_processing()

        self.assertEqual(
            {
                carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
                carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'),
                                   numpy.timedelta64(1, 'm')),
            },
            self.storage._list_split_keys_for_metric(self.metric, "mean",
                                                     numpy.timedelta64(1,
                                                                       'm')))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451520000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1451736000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
                               numpy.timedelta64(60, 's'))
        ], "mean")[0]
        # Now this one is compressed because it has been rewritten!
        self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
        data = self.storage._get_measures(self.metric, [
            carbonara.SplitKey(
                numpy.datetime64(1452384000, 's'),
                numpy.timedelta64(1, 'm'),
            )
        ], "mean")[0]
        assertCompressedIfWriteFull(
            carbonara.AggregatedTimeSerie.is_compressed(data))

        self.assertEqual([
            (datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
            (datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
            (datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
            (datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
            (datetime64(2016, 1, 10, 0, 12), numpy.timedelta64(1, 'm'), 45),
        ],
                         self.storage.get_measures(
                             self.metric,
                             granularities=[numpy.timedelta64(60, 's')]))