def test_add_and_get_cross_metric_measures_with_holes(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] self.storage.process_background_tasks(self.index) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0) ], values)
def test_corrupted_data(self, logger): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] self.storage.process_background_tasks(self.index, True) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch.object(self.index, 'get_metrics', return_value=[self.metric]): with mock.patch('gnocchi.carbonara.msgpack.unpack', side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.msgpack.loads', side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), ], self.storage.get_measures(self.metric))
def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0) ], values)
def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ storage.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2, [ storage.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = cross_metric.get_cross_metric_measures( self.storage, [self.metric, metric2]) self.assertEqual([(utils.datetime_utc( 2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 'm'), 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), numpy.timedelta64(5, 'm'), 11.0), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), numpy.timedelta64(5, 'm'), 22.0)], values)
def test_compute_moving_average(self): metric = self._test_create_metric_and_data([69, 42, 6, 44, 7], spacing=20) agg_obj = self.custom_agg['moving-average'] window = '120s' center = 'False' result = agg_obj.compute(self.storage, metric, start=None, stop=None, window=window, center=center) expected = [(utils.datetime_utc(2014, 1, 1, 12), 120.0, 32.25)] self.assertEqual(expected, result) center = 'True' result = agg_obj.compute(self.storage, metric, start=None, stop=None, window=window, center=center) expected = [(utils.datetime_utc(2014, 1, 1, 12, 1), 120.0, 28.875)] self.assertEqual(expected, result) # (FIXME) atmalagon: doing a centered average when # there are only two points in the retrieved data seems weird. # better to raise an error or return nan in this case? self.storage.delete_metric(metric)
def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) self.storage.process_background_tasks(self.index, sync=True) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0) ], values)
def test_rewrite_measures_corruption_bad_data(self): # Create an archive policy that spans on several splits. Each split # being 3600 points, let's go for 36k points so we have 10 splits. apname = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)]) self.index.create_archive_policy(ap) self.metric = storage.Metric(uuid.uuid4(), ap) self.index.create_metric(self.metric.id, str(uuid.uuid4()), apname) # First store some points scattered across different splits self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2016, 1, 2, 13, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2016, 1, 4, 14, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2016, 1, 6, 15, 12, 45), 44), ]) self.trigger_processing() splits = {'1451520000.0', '1451736000.0', '1451952000.0'} self.assertEqual( splits, self.storage._list_split_keys_for_metric(self.metric, "mean", 60.0)) if self.storage.WRITE_FULL: assertCompressedIfWriteFull = self.assertTrue else: assertCompressedIfWriteFull = self.assertFalse data = self.storage._get_measures(self.metric, '1451520000.0', "mean", 60.0) self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures(self.metric, '1451736000.0', "mean", 60.0) self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data)) data = self.storage._get_measures(self.metric, '1451952000.0', "mean", 60.0) assertCompressedIfWriteFull( carbonara.AggregatedTimeSerie.is_compressed(data)) self.assertEqual([ (utils.datetime_utc(2016, 1, 1, 12), 60.0, 69), (utils.datetime_utc(2016, 1, 2, 13, 7), 60.0, 42), (utils.datetime_utc(2016, 1, 4, 14, 9), 60.0, 4), (utils.datetime_utc(2016, 1, 6, 15, 12), 60.0, 44), ], self.storage.get_measures(self.metric, granularity=60.0)) # Test what happens if we write garbage self.storage._store_metric_measures(self.metric, '1451952000.0', "mean", 60.0, b"oh really?") # Now store brand new points that should force a rewrite of one of the # split (keep in mind the back window size in one hour here). We move # the BoundTimeSerie processing timeserie far away from its current # range. self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 16, 18, 45), 45), storage.Measure(utils.dt_to_unix_ns(2016, 1, 10, 17, 12, 45), 46), ]) self.trigger_processing()
def test_resample_no_metric(self): """https://github.com/gnocchixyz/gnocchi/issues/69""" self.assertEqual([], self.storage.get_measures( self.metric, utils.datetime_utc(2014, 1, 1), utils.datetime_utc(2015, 1, 1), granularity=300, resample=3600))
def test_delete_old_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) # One year later… self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2015, 1, 1), 86400.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric))
def test_delete_old_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) # One year later… self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2015, 1, 1), 86400.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric))
def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.trigger_processing([metric]) measures = self.storage.get_aggregated_measures( {metric: self.aggregations})[metric] measures = get_measures_list(measures) self.assertEqual( { "mean": [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0)] }, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.trigger_processing([metric]) measures = self.storage.get_aggregated_measures( {metric: self.aggregations})[metric] measures = get_measures_list(measures) self.assertEqual( { "mean": [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 2.0)] }, measures)
def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received(("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.assertIsNotNone(metric) self.trigger_processing([metric]) measures = self.storage.get_aggregated_measures( {metric: self.aggregations})[metric] measures = get_measures_list(measures) self.assertEqual( { "mean": [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0)] }, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|c|@0.2" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.trigger_processing([metric]) measures = self.storage.get_aggregated_measures( {metric: self.aggregations})[metric] measures = get_measures_list(measures) self.assertEqual( { "mean": [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64( 1, 'm'), 55.0)] }, measures)
def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.storage.process_new_measures(self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.granularities) self.assertEqual( [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0)], measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.storage.process_new_measures(self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric, self.granularities) self.assertEqual( [(datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 2.0)], measures)
def test_list_resources_by_duration(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) g = self.index.create_resource( 'generic', r1, user, project, user_id=user, project_id=project, started_at=utils.datetime_utc(2010, 1, 1, 12, 0), ended_at=utils.datetime_utc(2010, 1, 1, 13, 0)) resources = self.index.list_resources('generic', attribute_filter={ "and": [ { "=": { "project_id": project } }, { ">": { "lifespan": 1800 } }, ] }) self.assertEqual(1, len(resources)) self.assertEqual(g, resources[0]) resources = self.index.list_resources('generic', attribute_filter={ "and": [ { "=": { "project_id": project } }, { ">": { "lifespan": 7200 } }, ] }) self.assertEqual(0, len(resources))
def test_create_resource_with_start_timestamp(self): r1 = uuid.uuid4() ts = utils.datetime_utc(2014, 1, 1, 23, 34, 23, 1234) user = str(uuid.uuid4()) project = str(uuid.uuid4()) rc = self.index.create_resource('generic', r1, user, project, started_at=ts) self.assertEqual( { "id": r1, "revision_start": rc.revision_start, "revision_end": None, "created_by_user_id": user, "created_by_project_id": project, "user_id": None, "project_id": None, "started_at": ts, "ended_at": None, "original_resource_id": None, "type": "generic", "metrics": {} }, rc.jsonify()) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertEqual(rc, r)
def test_search_value(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1,), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] self.storage.process_background_tasks(self.index) self.assertEqual( {metric2: [], self.metric: [(utils.datetime_utc(2014, 1, 1, 12), 300, 69)]}, self.storage.search_value( [metric2, self.metric], {u"≥": 50})) self.assertEqual( {metric2: [], self.metric: []}, self.storage.search_value( [metric2, self.metric], {u"∧": [ {u"eq": 100}, {u"≠": 50}]}))
def _test_gauge(self, metrics, utcnow): utcnow.return_value = utils.datetime_utc(2017, 1, 10, 13, 58, 36) self.server.process_collectd_message(metrics[0]) self.stats.flush() metric_in_json = ujson.loads(metrics[0]) metric_name = amqp1d.CollectdStats.serialize_identifier( 0, metric_in_json[0]) host = metric_in_json[0]["host"] resources = self.stats.indexer.list_resources( self.conf.amqp1d.resource_name, attribute_filter={"=": { "host": host }}) self.assertIsNotNone(resources) resource = self.stats.indexer.get_resource( self.conf.amqp1d.resource_name, resources[0].id, with_metrics=True) self.assertIsNotNone(resource) metric = resource.get_metric(metric_name) self.assertIsNotNone(metric) self.storage.process_new_measures(self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric) self.assertEqual([ (datetime64(2017, 1, 10), numpy.timedelta64(1, 'D'), 129), (datetime64(2017, 1, 10, 13), numpy.timedelta64(1, 'h'), 129), (datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 129) ], measures)
def test_amqp1d(self, utcnow): utcnow.return_value = utils.datetime_utc(2017, 1, 10, 13, 58, 36) metrics = json.dumps([{ u'dstypes': [u'gauge'], u'plugin': u'memory', u'dsnames': [u'value'], u'interval': 10.0, u'host': u'www.gnocchi.test.com', u'values': [9], u'time': 1506712460.824, u'plugin_instance': u'', u'type_instance': u'free', u'type': u'memory' }, { u'dstypes': [u'derive', u'derive'], u'plugin': u'interface', u'dsnames': [u'rx', u'tx'], u'interval': 10.0, u'host': u'www.gnocchi.test.com', u'values': [2, 5], u'time': 1506712460.824, u'plugin_instance': u'ens2f1', u'type_instance': u'', u'type': u'if_errors' }]) self.server.on_message(mock.Mock(message=mock.Mock(body=metrics))) self.server.processor.flush() resources = self.index.list_resources( self.conf.amqp1d.resource_type, attribute_filter={"=": { "host": "www.gnocchi.test.com" }}) self.assertEqual(1, len(resources)) self.assertEqual("www.gnocchi.test.com", resources[0].host) metrics = self.index.list_metrics( attribute_filter={'=': { "resource_id": resources[0].id }}) self.assertEqual(3, len(metrics)) self.trigger_processing(metrics) expected_measures = { "memory@memory-free": [(datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 9)], "interface-ens2f1@if_errors-rx": [(datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 2)], "interface-ens2f1@if_errors-tx": [(datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 5)] } for metric in metrics: aggregation = metric.archive_policy.get_aggregation( "mean", numpy.timedelta64(1, 'm')) measures = self.storage.get_measures(metric, [aggregation]) self.assertEqual(expected_measures[metric.name], measures["mean"])
def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.assertIsNotNone(metric) self.storage.process_new_measures( self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0) ], measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|c|@0.2" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.storage.process_new_measures( self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric) self.assertEqual([ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 55.0) ], measures)
def test_search_value(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime( 2014, 1, 1, 12, 0, 1, ), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual( { metric2: [], self.metric: [(utils.datetime_utc(2014, 1, 1), 86400, 33), (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), (utils.datetime_utc(2014, 1, 1, 12), 300, 69), (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42) ] }, self.storage.search_value([metric2, self.metric], {u"≥": 30})) self.assertEqual( { metric2: [], self.metric: [] }, self.storage.search_value([metric2, self.metric], {u"∧": [{ u"eq": 100 }, { u"≠": 50 }]}))
def test_search_value(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns( 2014, 1, 1, 12, 0, 1, ), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 5, 31), 8), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 5), 9), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 2), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 6), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) self.assertEqual( { metric2: [], self.metric: [(utils.datetime_utc(2014, 1, 1), 86400, 33), (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), (utils.datetime_utc(2014, 1, 1, 12), 300, 69), (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42) ] }, self.storage.search_value([metric2, self.metric], {u"≥": 30})) self.assertEqual( { metric2: [], self.metric: [] }, self.storage.search_value([metric2, self.metric], {u"∧": [{ u"eq": 100 }, { u"≠": 50 }]}))
def test_update_resource_ended_at_fail(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) self.index.create_resource('generic', r1, user, project) self.assertRaises( indexer.ResourceValueError, self.index.update_resource, 'generic', r1, ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1))
def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0) ]}, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.5), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.5), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 2.0) ]}, measures)
def test_aborted_initial_processing(self): self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 5), ]) with mock.patch.object(self.storage, '_store_unaggregated_timeserie', side_effect=Exception): try: self.trigger_processing() except Exception: pass with mock.patch('gnocchi.storage._carbonara.LOG') as LOG: self.trigger_processing() self.assertFalse(LOG.error.called) m = self.storage.get_measures(self.metric) self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 5.0), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 3600.0, 5.0), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 12), 300.0, 5.0), m)
def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.assertIsNotNone(metric) self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 1.0), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 1.0), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0) ]}, measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|c|@0.2" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.trigger_processing([metric]) measures = self.storage.get_measures(metric, self.aggregations) self.assertEqual({"mean": [ (datetime64(2015, 1, 7), numpy.timedelta64(1, 'D'), 28), (datetime64(2015, 1, 7, 13), numpy.timedelta64(1, 'h'), 28), (datetime64(2015, 1, 7, 13, 58), numpy.timedelta64(1, 'm'), 1.0), (datetime64(2015, 1, 7, 13, 59), numpy.timedelta64(1, 'm'), 55.0) ]}, measures)
def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.stats.storage.process_background_tasks(self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([(utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) utcnow.return_value = utils.datetime_utc(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() self.stats.storage.process_background_tasks(self.stats.indexer, [str(metric.id)], sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([(utils.datetime_utc(2015, 1, 7), 86400.0, 1.5), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 2.0)], measures)
def test_update_resource_end_timestamp(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) self.index.create_resource('generic', r1, user, project) self.index.update_resource( 'generic', r1, ended_at=utils.datetime_utc(2043, 1, 1, 2, 3, 4)) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertIsNotNone(r.started_at) self.assertIsNone(r.user_id) self.assertIsNone(r.project_id) self.assertIsNone(r.revision_end) self.assertIsNotNone(r.revision_start) self.assertEqual(r1, r.id) self.assertEqual(user, r.created_by_user_id) self.assertEqual(project, r.created_by_project_id) self.assertEqual(utils.datetime_utc(2043, 1, 1, 2, 3, 4), r.ended_at) self.assertEqual("generic", r.type) self.assertEqual(0, len(r.metrics)) self.index.update_resource( 'generic', r1, ended_at=None) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertIsNotNone(r.started_at) self.assertIsNotNone(r.revision_start) self.assertEqual({"id": r1, "revision_start": r.revision_start, "revision_end": None, "ended_at": None, "created_by_user_id": user, "created_by_project_id": project, "user_id": None, "project_id": None, "type": "generic", "started_at": r.started_at, "original_resource_id": None, "metrics": {}}, r.jsonify())
def test_corrupted_data(self, logger): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] self.storage.process_background_tasks(self.index, True) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch.object(self.index, 'get_metrics', return_value=[self.metric]): with mock.patch('gnocchi.carbonara.msgpack.unpack', side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.msgpack.loads', side_effect=ValueError("boom!")): self.storage.process_background_tasks(self.index, True) expected_calls = [ mock.call.debug('Processing measures for %s' % self.metric.id), mock.call.debug('Processing measures for %s' % self.metric.id), ] aggs = ["none"] + self.conf.archive_policy.default_aggregation_methods for agg in aggs: expected_calls.append(mock.call.error( 'Data are corrupted for metric %s and aggregation %s, ' 'recreating an empty timeserie.' % (self.metric.id, agg))) logger.assert_has_calls(expected_calls, any_order=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), ], self.storage.get_measures(self.metric))
def test_corrupted_data(self, logger): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing(self.storage, self.index) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch('gnocchi.carbonara.msgpack.unpack', side_effect=ValueError("boom!")): with mock.patch('gnocchi.carbonara.msgpack.loads', side_effect=ValueError("boom!")): self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), (utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), ], self.storage.get_measures(self.metric))
def test_update_resource_ended_at_fail(self): r1 = uuid.uuid4() user = uuid.uuid4() project = uuid.uuid4() self.index.create_resource('instance', r1, user, project, flavor_id="1", image_ref="http://foo/bar", host="foo", display_name="lol") self.assertRaises( indexer.ResourceValueError, self.index.update_resource, 'instance', r1, ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1))
def _test_create_metric_and_data(self, data, spacing): metric = storage.Metric(uuid.uuid4(), self.archive_policies['medium']) start_time = utils.datetime_utc(2014, 1, 1, 12) incr = datetime.timedelta(seconds=spacing) measures = [ storage.Measure(utils.dt_in_unix_ns(start_time + incr * n), val) for n, val in enumerate(data) ] self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') self.storage.incoming.add_measures(metric, measures) metrics = tests_utils.list_all_incoming_metrics(self.storage.incoming) self.storage.process_background_tasks(self.index, metrics, sync=True) return metric
def test_corrupted_data(self): if not isinstance(self.storage, _carbonara.CarbonaraBasedStorage): self.skipTest("This driver is not based on Carbonara") self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', side_effect=carbonara.InvalidData()): with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize', side_effect=carbonara.InvalidData()): self.trigger_processing() m = self.storage.get_measures(self.metric) self.assertIn((utils.datetime_utc(2014, 1, 1), 86400.0, 1), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 3600.0, 1), m) self.assertIn((utils.datetime_utc(2014, 1, 1, 13), 300.0, 1), m)
def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) self.storage.process_background_tasks(self.index, sync=True) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual( [(utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 18.875), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 11.0), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 22.0)], values)
def test_search_value(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1,), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 5, 31), 8), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 42), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 6), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 2), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual( {metric2: [], self.metric: [ (utils.datetime_utc(2014, 1, 1), 86400, 33), (utils.datetime_utc(2014, 1, 1, 12), 3600, 33), (utils.datetime_utc(2014, 1, 1, 12), 300, 69), (utils.datetime_utc(2014, 1, 1, 12, 10), 300, 42)]}, self.storage.search_value( [metric2, self.metric], {u"≥": 30})) self.assertEqual( {metric2: [], self.metric: []}, self.storage.search_value( [metric2, self.metric], {u"∧": [ {u"eq": 100}, {u"≠": 50}]}))
def test_list_resources_by_duration(self): r1 = uuid.uuid4() user = uuid.uuid4() project = uuid.uuid4() g = self.index.create_resource( 'generic', r1, user, project, user_id=user, project_id=project, started_at=utils.datetime_utc(2010, 1, 1, 12, 0), ended_at=utils.datetime_utc(2010, 1, 1, 13, 0)) resources = self.index.list_resources( 'generic', attribute_filter={"and": [ {"=": {"project_id": project}}, {">": {"lifespan": 1800}}, ]}) self.assertEqual(1, len(resources)) self.assertEqual(g, resources[0]) resources = self.index.list_resources( 'generic', attribute_filter={"and": [ {"=": {"project_id": project}}, {">": {"lifespan": 7200}}, ]}) self.assertEqual(0, len(resources))
def test_resize_policy(self): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), str(uuid.uuid4()), name) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), ]) self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), ], self.storage.get_measures(m)) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(ids=[m.id])[0] self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m)) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) m = self.index.list_metrics(ids=[m.id])[0] self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m))
def _test_gauge_or_ms(self, metric_type, utcnow): metric_name = "test_gauge_or_ms" metric_key = metric_name + "|" + metric_type utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks( self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0) ], measures) utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37) # This one is going to be ignored self.server.datagram_received( ("%s:45|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|%s" % (metric_name, metric_type)).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks( self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.5), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.5), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 2.0) ], measures)
def test_delete_old_measures(self): self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 0, 1), 69), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 7, 31), 42), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 9, 31), 4), storage.Measure(utils.dt_to_unix_ns(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) # One year later… self.incoming.add_measures(self.metric, [ storage.Measure(utils.dt_to_unix_ns(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2015, 1, 1), 86400.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 3600.0, 69), (utils.datetime_utc(2015, 1, 1, 12), 300.0, 69), ], self.storage.get_measures(self.metric)) self.assertEqual({"1244160000.0"}, self.storage._list_split_keys_for_metric( self.metric, "mean", 86400.0)) self.assertEqual({"1412640000.0"}, self.storage._list_split_keys_for_metric( self.metric, "mean", 3600.0)) self.assertEqual({"1419120000.0"}, self.storage._list_split_keys_for_metric( self.metric, "mean", 300.0))
def test_update_resource_ended_at_fail(self): r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) self.index.create_resource('instance', r1, user, project, flavor_id="1", image_ref="http://foo/bar", host="foo", display_name="lol") self.assertRaises(indexer.ResourceValueError, self.index.update_resource, 'instance', r1, ended_at=utils.datetime_utc(2010, 1, 1, 1, 1, 1))
def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36) self.server.datagram_received(("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.assertIsNotNone(metric) metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks(self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([(utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|c|@0.2" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks(self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual( [(utils.datetime_utc(2015, 1, 7), 86400.0, 28), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures)
def test_counter(self, utcnow): metric_name = "test_counter" metric_key = metric_name + "|c" utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 58, 36) self.server.datagram_received( ("%s:1|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() r = self.stats.indexer.get_resource('generic', self.conf.statsd.resource_id, with_metrics=True) metric = r.get_metric(metric_key) self.assertIsNotNone(metric) metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks( self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0)], measures) utcnow.return_value = datetime.datetime(2015, 1, 7, 13, 59, 37) self.server.datagram_received( ("%s:45|c" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.server.datagram_received( ("%s:2|c|@0.2" % metric_name).encode('ascii'), ("127.0.0.1", 12345)) self.stats.flush() metrics = self.stats.storage.list_metric_with_measures_to_process( None, None, full=True) self.stats.storage.process_background_tasks( self.stats.indexer, metrics, sync=True) measures = self.stats.storage.get_measures(metric) self.assertEqual([ (utils.datetime_utc(2015, 1, 7), 86400.0, 28), (utils.datetime_utc(2015, 1, 7, 13), 3600.0, 28), (utils.datetime_utc(2015, 1, 7, 13, 58), 60.0, 1.0), (utils.datetime_utc(2015, 1, 7, 13, 59), 60.0, 55.0)], measures)
def _test_counters(self, metrics, utcnow): """" this creates two metrics names for derived types (collectd counter). """ utcnow.return_value = utils.datetime_utc(2017, 1, 10, 13, 58, 36) self.server.process_collectd_message(metrics[0]) self.stats.flush() metric_in_json = ujson.loads(metrics[0]) metric_names = [] for metric in metric_in_json: for index, value in enumerate(metric["values"]): metric_names.append( amqp1d.CollectdStats.serialize_identifier(index, metric)) host = metric_in_json[0]["host"] resources = self.stats.indexer.list_resources( self.conf.amqp1d.resource_name, attribute_filter={"=": { "host": host }}) self.assertIsNotNone(resources) resource = self.stats.indexer.get_resource( self.conf.amqp1d.resource_name, resources[0].id, with_metrics=True) self.assertIsNotNone(resource) for metric_name in metric_names: metric = resource.get_metric(metric_name) self.assertIsNotNone(metric) self.storage.process_new_measures(self.stats.indexer, self.stats.incoming, [str(metric.id)], sync=True) measures = self.storage.get_measures(metric) self.assertEqual([ (datetime64(2017, 1, 10), numpy.timedelta64(1, 'D'), 0), (datetime64(2017, 1, 10, 13), numpy.timedelta64(1, 'h'), 0), (datetime64(2017, 1, 10, 13, 58), numpy.timedelta64(1, 'm'), 0) ], measures)
def test_create_resource_with_start_timestamp(self): r1 = uuid.uuid4() ts = utils.datetime_utc(2014, 1, 1, 23, 34, 23, 1234) user = uuid.uuid4() project = uuid.uuid4() rc = self.index.create_resource( 'generic', r1, user, project, started_at=ts) self.assertEqual({"id": r1, "revision_start": rc.revision_start, "revision_end": None, "created_by_user_id": user, "created_by_project_id": project, "user_id": None, "project_id": None, "started_at": ts, "ended_at": None, "type": "generic", "metrics": {}}, rc.jsonify()) r = self.index.get_resource('generic', r1, with_metrics=True) self.assertEqual(rc, r)
def test_get_measures(self): # This is to make gordc safer self.assertIsNotNone( self.storage._get_metric_archive(self.metric, "mean")) self.upgrade() self.assertEqual([(utils.datetime_utc(2014, 1, 1), 86400, 5), (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), (utils.datetime_utc(2014, 1, 1, 12), 300, 5)], self.storage.get_measures(self.metric)) self.assertEqual([(utils.datetime_utc(2014, 1, 1), 86400, 6), (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), (utils.datetime_utc(2014, 1, 1, 12), 300, 6)], self.storage.get_measures(self.metric, aggregation='max')) self.assertRaises(storage.AggregationDoesNotExist, self.storage._get_metric_archive, self.metric, "mean")
def test_get_measures(self): # This is to make gordc safer self.assertIsNotNone(self.storage._get_metric_archive( self.metric, "mean")) self.upgrade() self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400, 5), (utils.datetime_utc(2014, 1, 1, 12), 3600, 5), (utils.datetime_utc(2014, 1, 1, 12), 300, 5) ], self.storage.get_measures(self.metric)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400, 6), (utils.datetime_utc(2014, 1, 1, 12), 3600, 6), (utils.datetime_utc(2014, 1, 1, 12), 300, 6) ], self.storage.get_measures(self.metric, aggregation='max')) self.assertRaises( storage.AggregationDoesNotExist, self.storage._get_metric_archive, self.metric, "mean")
def test_resize_policy(self): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) m = storage.Metric(uuid.uuid4(), ap) self.index.create_metric(m.id, str(uuid.uuid4()), str(uuid.uuid4()), name) self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 0), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 1), storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 10), 1), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), ], self.storage.get_measures(m)) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 15), 1), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m)) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) # unchanged after update if no samples self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 5), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 10), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), ], self.storage.get_measures(m)) # drop points self.storage.add_measures(m, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 20), 1), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 15), 5.0, 1.0), (utils.datetime_utc(2014, 1, 1, 12, 0, 20), 5.0, 1.0), ], self.storage.get_measures(m))
def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.process_background_tasks(self.index, sync=True) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), ], self.storage.get_measures( self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures( self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( self.metric, from_timestamp=timeutils.parse_isotime("2014-1-1 13:00:00+01:00"), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), granularity=3600.0)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2), granularity=300.0)) self.assertRaises(storage.GranularityDoesNotExist, self.storage.get_measures, self.metric, granularity=42)
def test_list_resources_started_after_ended_before(self): # NOTE(jd) So this test is a bit fuzzy right now as we uses the same # database for all tests and the tests are running concurrently, but # for now it'll be better than nothing. r1 = uuid.uuid4() user = str(uuid.uuid4()) project = str(uuid.uuid4()) g = self.index.create_resource( 'generic', r1, user, project, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23)) r2 = uuid.uuid4() mgr = self.index.get_resource_type_schema() resource_type = str(uuid.uuid4()) self.index.create_resource_type( mgr.resource_type_from_dict(resource_type, {}, 'creating')) i = self.index.create_resource( resource_type, r2, user, project, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23)) resources = self.index.list_resources( 'generic', attribute_filter={ "and": [{">=": {"started_at": utils.datetime_utc(2000, 1, 1, 23, 23, 23)}}, {"<": {"ended_at": utils.datetime_utc(2000, 1, 5, 23, 23, 23)}}]}) self.assertGreaterEqual(len(resources), 2) g_found = False i_found = False for r in resources: if r.id == r1: self.assertEqual(g, r) g_found = True elif r.id == r2: i_found = True if i_found and g_found: break else: self.fail("Some resources were not found") resources = self.index.list_resources( resource_type, attribute_filter={ ">=": { "started_at": datetime.datetime(2000, 1, 1, 23, 23, 23) }, }) self.assertGreaterEqual(len(resources), 1) for r in resources: if r.id == r2: self.assertEqual(i, r) break else: self.fail("Some resources were not found") resources = self.index.list_resources( 'generic', attribute_filter={ "<": { "ended_at": datetime.datetime(1999, 1, 1, 23, 23, 23) }, }) self.assertEqual(0, len(resources))
def test_add_and_get_cross_metric_measures(self): metric2, __ = self._create_metric() self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 41), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4), ]) self.storage.process_background_tasks(self.index, sync=True) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 12.5), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp=utils.to_timestamp('2014-01-01 12:10:00')) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], to_timestamp=utils.to_timestamp('2014-01-01 12:05:00')) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], to_timestamp=utils.to_timestamp('2014-01-01 12:10:10'), from_timestamp=utils.to_timestamp('2014-01-01 12:10:10')) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 24.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp=utils.to_timestamp('2014-01-01 12:00:00'), to_timestamp=utils.to_timestamp('2014-01-01 12:00:01')) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp=utils.to_timestamp('2014-01-01 12:00:00'), to_timestamp=utils.to_timestamp('2014-01-01 12:00:01'), granularity=300.0) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values)
def test_updated_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), ]) self.trigger_processing(self.storage, self.index) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing(self.storage, self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 69), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 42.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric, aggregation='max')) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 4), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 4), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 4.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric, aggregation='min'))
def test_list_resources_started_after_ended_before(self): # NOTE(jd) So this test is a bit fuzzy right now as we uses the same # database for all tests and the tests are running concurrently, but # for now it'll be better than nothing. r1 = uuid.uuid4() user = uuid.uuid4() project = uuid.uuid4() g = self.index.create_resource( 'generic', r1, user, project, started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 3, 23, 23, 23)) r2 = uuid.uuid4() i = self.index.create_resource( 'instance', r2, user, project, flavor_id="123", image_ref="foo", host="dwq", display_name="foobar", started_at=utils.datetime_utc(2000, 1, 1, 23, 23, 23), ended_at=utils.datetime_utc(2000, 1, 4, 23, 23, 23)) resources = self.index.list_resources( 'generic', attribute_filter={ "and": [{">=": {"started_at": utils.datetime_utc(2000, 1, 1, 23, 23, 23)}}, {"<": {"ended_at": utils.datetime_utc(2000, 1, 5, 23, 23, 23)}}]}) self.assertGreaterEqual(len(resources), 2) g_found = False i_found = False for r in resources: if r.id == r1: self.assertEqual(g, r) g_found = True elif r.id == r2: i_found = True if i_found and g_found: break else: self.fail("Some resources were not found") resources = self.index.list_resources( 'instance', attribute_filter={ ">=": { "started_at": datetime.datetime(2000, 1, 1, 23, 23, 23) }, }) self.assertGreaterEqual(len(resources), 1) for r in resources: if r.id == r2: self.assertEqual(i, r) break else: self.fail("Some resources were not found") resources = self.index.list_resources( 'generic', attribute_filter={ "<": { "ended_at": datetime.datetime(1999, 1, 1, 23, 23, 23) }, }) self.assertEqual(0, len(resources))
def test_add_and_get_measures(self): self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric] self.storage.process_background_tasks(self.index) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures(self.metric)) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 10), 300.0, 44.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 0))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1), 86400.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), (utils.datetime_utc(2014, 1, 1, 12, 5), 300.0, 23.0), ], self.storage.get_measures( self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 6, 0))) self.assertEqual( [], self.storage.get_measures( self.metric, to_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10), from_timestamp=datetime.datetime(2014, 1, 1, 12, 10, 10))) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12), 3600.0, 39.75), (utils.datetime_utc(2014, 1, 1, 12), 300.0, 69.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 0), to_timestamp=datetime.datetime(2014, 1, 1, 12, 0, 2)))
def test_add_and_get_cross_metric_measures(self): metric2 = storage.Metric(uuid.uuid4(), self.archive_policies['low']) self.storage.add_measures(self.metric, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 1), 69), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 31), 42), storage.Measure(datetime.datetime(2014, 1, 1, 12, 9, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 12, 45), 44), ]) self.storage.add_measures(metric2, [ storage.Measure(datetime.datetime(2014, 1, 1, 12, 0, 5), 9), storage.Measure(datetime.datetime(2014, 1, 1, 12, 7, 41), 2), storage.Measure(datetime.datetime(2014, 1, 1, 12, 10, 31), 4), storage.Measure(datetime.datetime(2014, 1, 1, 12, 13, 10), 4), ]) with mock.patch.object(self.index, 'get_metrics') as f: f.return_value = [self.metric, metric2] self.storage.process_background_tasks(self.index) values = self.storage.get_cross_metric_measures([self.metric, metric2]) self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), (utils.datetime_utc(2014, 1, 1, 12, 5, 0), 300.0, 12.5), (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp='2014-01-01 12:10:00') self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 10, 0), 300.0, 24.0) ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], to_timestamp='2014-01-01 12:05:00') self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 0, 0, 0), 86400.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], to_timestamp='2014-01-01 12:10:10', from_timestamp='2014-01-01 12:10:10') self.assertEqual([], values) values = self.storage.get_cross_metric_measures( [self.metric, metric2], from_timestamp='2014-01-01 12:00:00', to_timestamp='2014-01-01 12:00:01') self.assertEqual([ (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 3600.0, 22.25), (utils.datetime_utc(2014, 1, 1, 12, 0, 0), 300.0, 39.0), ], values)