def test_corrupted_data(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 13, 0, 1), 1), ]) with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize', side_effect=carbonara.InvalidData()): with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize', side_effect=carbonara.InvalidData()): self.trigger_processing() granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] m = self.storage.get_measures(self.metric, granularities) self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 1), m) self.assertIn( (datetime64(2014, 1, 1, 13), numpy.timedelta64(1, 'h'), 1), m) self.assertIn( (datetime64(2014, 1, 1, 13), numpy.timedelta64(5, 'm'), 1), m)
def treat_metric(self, metric_name, metric_type, value, sampling): metric_name += "|" + metric_type if metric_type == "ms": if sampling is not None: raise ValueError( "Invalid sampling for ms: `%d`, should be none" % sampling) self.times[metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "g": if sampling is not None: raise ValueError( "Invalid sampling for g: `%d`, should be none" % sampling) self.gauges[metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "c": sampling = 1 if sampling is None else sampling if metric_name in self.counters: current_value = self.counters[metric_name].value else: current_value = 0 self.counters[metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), current_value + (value * (1 / sampling))) # TODO(jd) Support "set" type # elif metric_type == "s": # pass else: raise ValueError("Unknown metric type `%s'" % metric_type)
def treat_metric(self, host, metric_name, metric_type, value): """Collectd. Statistics in collectd consist of a value list. A value list includes: Values, can be one of: Derive: used for values where a change in the value since it's last been read is of interest. Can be used to calculate and store a rate. Counter: similar to derive values, but take the possibility of a counter wrap around into consideration. Gauge: used for values that are stored as is. Absolute: used for counters that are reset after reading. """ if metric_type == "absolute": if host not in self.absolute: self.absolute[host] = {} self.absolute[host][metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "gauge": if host not in self.gauges: self.gauges[host] = {} self.gauges[host][metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), value) elif metric_type == "counter" or metric_type == "derive": if host not in self.counters: self.counters[host] = {} self.counters[host][metric_name] = incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), value) else: raise ValueError("Unknown metric type '%s'" % metric_type)
def test_delete_old_measures(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities)) # One year later… self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2015, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.assertEqual([ (datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69), (datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), ], self.storage.get_measures(self.metric, granularities)) self.assertEqual( { carbonara.SplitKey(numpy.datetime64(1244160000, 's'), numpy.timedelta64(1, 'D')), }, self.storage._list_split_keys_for_metric(self.metric, "mean", numpy.timedelta64(1, 'D'))) self.assertEqual( { carbonara.SplitKey(numpy.datetime64(1412640000, 's'), numpy.timedelta64(1, 'h')), }, self.storage._list_split_keys_for_metric(self.metric, "mean", numpy.timedelta64(1, 'h'))) self.assertEqual( { carbonara.SplitKey(numpy.datetime64(1419120000, 's'), numpy.timedelta64(5, 'm')), }, self.storage._list_split_keys_for_metric(self.metric, "mean", numpy.timedelta64(5, 'm')))
def test_updated_measures(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), ]) self.trigger_processing() granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 55.5), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), ], self.storage.get_measures(self.metric, granularities)) self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities)) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 69), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 69.0), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities, aggregation='max')) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 4), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 4), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 4.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities, aggregation='min'))
def test_add_and_get_cross_metric_measures_with_holes(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) values = cross_metric.get_cross_metric_measures( self.storage, [self.metric, metric2]) self.assertEqual([(datetime64(2014, 1, 1, 0, 0, 0), numpy.timedelta64(1, 'D'), 18.875), (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(1, 'h'), 18.875), (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 'm'), 39.0), (datetime64(2014, 1, 1, 12, 5, 0), numpy.timedelta64(5, 'm'), 11.0), (datetime64(2014, 1, 1, 12, 10, 0), numpy.timedelta64(5, 'm'), 22.0)], values)
def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, aggregation='last')
def test_get_measure_unknown_aggregation(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] self.assertEqual([], self.storage.get_measures(self.metric, granularities, aggregation='last'))
def test_aborted_initial_processing(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 5), ]) with mock.patch.object(self.storage, '_store_unaggregated_timeserie', side_effect=Exception): try: self.trigger_processing() except Exception: pass with mock.patch('gnocchi.storage.LOG') as LOG: self.trigger_processing() self.assertFalse(LOG.error.called) granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] m = self.storage.get_measures(self.metric, granularities) self.assertIn((datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 5.0), m) self.assertIn( (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 5.0), m) self.assertIn( (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 5.0), m)
def test_iter_on_sacks_to_process(self): if (self.incoming.iter_on_sacks_to_process == incoming.IncomingDriver.iter_on_sacks_to_process): self.skipTest("Incoming driver does not implement " "iter_on_sacks_to_process") found = threading.Event() sack_to_find = self.incoming.sack_for_metric(self.metric.id) def _iter_on_sacks_to_process(): for sack in self.incoming.iter_on_sacks_to_process(): self.assertIsInstance(sack, int) if sack == sack_to_find: found.set() break finder = threading.Thread(target=_iter_on_sacks_to_process) finder.daemon = True finder.start() # Try for 30s to get a notification about this sack for _ in range(30): if found.wait(timeout=1): break # NOTE(jd) Retry to send measures. It cannot be done only once as # there might be a race condition between the threads self.incoming.finish_sack_processing(sack_to_find) self.incoming.add_measures(self.metric, [ incoming.Measure(numpy.datetime64("2014-01-01 12:00:01"), 69), ]) else: self.fail("Notification for metric not received")
def test_add_measures_big(self): m, __ = self._create_metric('high') self.incoming.add_measures(m, [ incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100) for i in six.moves.range(0, 60) for j in six.moves.range(0, 60) ]) self.trigger_processing([str(m.id)]) self.assertEqual(3661, len(self.storage.get_measures(m)))
def test_delete_nonempty_metric_unprocessed(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.index.delete_metric(self.metric.id) self.trigger_processing() __, __, details = self.incoming._build_report(True) self.assertNotIn(str(self.metric.id), details) self.chef.expunge_metrics(10000, sync=True)
def test_delete_expunge_metric(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.index.delete_metric(self.metric.id) self.storage.expunge_metrics(self.incoming, self.index, sync=True) self.assertRaises(indexer.NoSuchMetric, self.index.delete_metric, self.metric.id)
def _inject(inc, coord, store, idx, metrics, measures, archive_policy_name="low", process=False, interval=None): LOG.info("Creating %d metrics", metrics) with utils.StopWatch() as sw: metric_ids = [ idx.create_metric(uuid.uuid4(), "admin", archive_policy_name).id for _ in range(metrics) ] LOG.info("Created %d metrics in %.2fs", metrics, sw.elapsed()) LOG.info("Generating %d measures per metric for %d metrics… ", measures, metrics) now = numpy.datetime64(utils.utcnow()) with utils.StopWatch() as sw: measures = { m_id: [ incoming.Measure(now + numpy.timedelta64(seconds=s), random.randint(-999999, 999999)) for s in range(measures) ] for m_id in metric_ids } LOG.info("… done in %.2fs", sw.elapsed()) interval_timer = utils.StopWatch().start() while True: interval_timer.reset() with utils.StopWatch() as sw: inc.add_measures_batch(measures) total_measures = sum(map(len, measures.values())) LOG.info("Pushed %d measures in %.2fs", total_measures, sw.elapsed()) if process: c = chef.Chef(coord, inc, idx, store) with utils.StopWatch() as sw: for s in inc.iter_sacks(): c.process_new_measures_for_sack(s, blocking=True) LOG.info("Processed %d sacks in %.2fs", inc.NUM_SACKS, sw.elapsed()) LOG.info("Speed: %.2f measures/s", float(total_measures) / sw.elapsed()) if interval is None: break time.sleep(max(0, interval - interval_timer.elapsed())) return total_measures
def test_measures_reporting(self): m2, __ = self._create_metric('medium') for i in six.moves.range(60): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69), ]) self.incoming.add_measures(m2.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69), ]) report = self.incoming.measures_report(True) self.assertIsInstance(report, dict) self.assertEqual(2, report['summary']['metrics']) self.assertEqual(120, report['summary']['measures']) self.assertIn('details', report) self.assertIsInstance(report['details'], dict) report = self.incoming.measures_report(False) self.assertIsInstance(report, dict) self.assertEqual(2, report['summary']['metrics']) self.assertEqual(120, report['summary']['measures'])
def test_search_value(self): metric2, __ = self._create_metric() self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64( 2014, 1, 1, 12, 0, 1, ), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 5, 31), 8), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 42), ]) self.incoming.add_measures(metric2, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 9), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 2), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 6), incoming.Measure(datetime64(2014, 1, 1, 12, 13, 10), 2), ]) self.trigger_processing([str(self.metric.id), str(metric2.id)]) self.assertEqual( { metric2: [], self.metric: [(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 33), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 33), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 42)] }, self.storage.search_value([metric2, self.metric], {u"≥": 30})) self.assertEqual( { metric2: [], self.metric: [] }, self.storage.search_value([metric2, self.metric], {u"∧": [{ u"eq": 100 }, { u"≠": 50 }]}))
def test_delete_nonempty_metric(self): self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage.delete_metric(self.incoming, self.metric, sync=True) self.trigger_processing() self.assertEqual([], self.storage.get_measures(self.metric)) self.assertRaises(storage.MetricDoesNotExist, self.storage._get_unaggregated_timeserie, self.metric)
def test_add_measures_update_subset(self): m, m_sql = self._create_metric('medium') measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2) ] self.incoming.add_measures(m.id, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. new_point = datetime64(2014, 1, 6, 1, 58, 1) self.incoming.add_measures(m.id, [incoming.Measure(new_point, 100)]) with mock.patch.object(self.incoming, 'add_measures') as c: self.trigger_processing([str(m.id)]) for __, args, __ in c.mock_calls: self.assertEqual( list(args[3])[0][0], carbonara.round_timestamp(new_point, args[1].granularity * 10e8))
def test_list_metric_with_measures_to_process(self): metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set(), metrics) self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set([str(self.metric.id)]), metrics) self.trigger_processing() metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.assertEqual(set([]), metrics)
def todo(): metric = index.create_metric( uuid.uuid4(), creator=conf.creator, archive_policy_name=conf.archive_policy_name) for _ in six.moves.range(conf.batch_of_measures): measures = [ incoming.Measure( utils.dt_in_unix_ns(utils.utcnow()), random.random()) for __ in six.moves.range(conf.measures_per_batch)] instore.add_measures(metric, measures)
def on_message(self, event): json_message = ujson.loads(event.message.body) timestamp = utils.dt_in_unix_ns(utils.utcnow()) measures_by_host_and_name = sorted( ((message["host"], self._serialize_identifier(index, message), value) for message in json_message for index, value in enumerate(message["values"]))) for (host, name), values in itertools.groupby(measures_by_host_and_name, key=lambda x: x[0:2]): measures = (incoming.Measure(timestamp, v[2]) for v in values) self.processor.add_measures(host, name, measures)
def test_resize_policy(self): name = str(uuid.uuid4()) ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)]) self.index.create_archive_policy(ap) m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1), incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) # expand to more points self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)]) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.incoming.add_measures(m.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1), ]) self.trigger_processing([str(m.id)]) self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')])) # shrink timespan self.index.update_archive_policy( name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)]) m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0] self.assertEqual([ (datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1), (datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1), ], self.storage.get_measures(m, [numpy.timedelta64(5, 's')]))
def _test_create_metric_and_data(self, data, spacing): metric = indexer.Metric( uuid.uuid4(), self.archive_policies['medium']) start_time = utils.datetime_utc(2014, 1, 1, 12) incr = datetime.timedelta(seconds=spacing) measures = [incoming.Measure( utils.dt_in_unix_ns(start_time + incr * n), val) for n, val in enumerate(data)] self.index.create_metric(metric.id, str(uuid.uuid4()), 'medium') self.incoming.add_measures(metric, measures) metrics = tests_utils.list_all_incoming_metrics(self.incoming) self.storage.process_new_measures( self.index, self.incoming, metrics, sync=True) return metric
def test_add_measures_update_subset_split(self): m, m_sql = self._create_metric('medium') measures = [ incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100) for i in six.moves.range(2) for j in six.moves.range(0, 60, 2) ] self.incoming.add_measures(m.id, measures) self.trigger_processing([str(m.id)]) # add measure to end, in same aggregate time as last point. self.incoming.add_measures( m.id, [incoming.Measure(datetime64(2014, 1, 6, 1, 58, 1), 100)]) with mock.patch.object(self.storage, '_store_metric_measures') as c: # should only resample last aggregate self.trigger_processing([str(m.id)]) count = 0 for call in c.mock_calls: # policy is 60 points and split is 48. should only update 2nd half args = call[1] if (args[0] == m_sql and args[2] == 'mean' and args[1].sampling == numpy.timedelta64(1, 'm')): count += 1 self.assertEqual(1, count)
def test_delete_nonempty_metric(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), ]) self.trigger_processing() self.storage._delete_metric(self.metric) self.trigger_processing() self.assertEqual([], self.storage.get_measures(self.metric, [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ])) self.assertRaises(storage.MetricDoesNotExist, self.storage._get_unaggregated_timeserie, self.metric)
def test_get_cross_metric_measures_unknown_aggregation(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low']) self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.incoming.add_measures(metric2, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.AggregationDoesNotExist, cross_metric.get_cross_metric_measures, self.storage, [self.metric, metric2], aggregation='last')
def test_get_cross_metric_measures_unknown_granularity(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['low']) self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.incoming.add_measures(metric2, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(storage.GranularityDoesNotExist, cross_metric.get_cross_metric_measures, self.storage, [self.metric, metric2], granularity=numpy.timedelta64(12345456, 'ms'))
def test_add_and_get_cross_metric_measures_different_archives(self): metric2 = indexer.Metric(uuid.uuid4(), self.archive_policies['no_granularity_match']) self.incoming.add_measures(self.metric, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.incoming.add_measures(metric2, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.assertRaises(cross_metric.MetricUnaggregatable, cross_metric.get_cross_metric_measures, self.storage, [self.metric, metric2])
def post_write(self, db="influxdb"): creator = pecan.request.auth_helper.get_current_user(pecan.request) tag_to_rid = pecan.request.headers.get( "X-Gnocchi-InfluxDB-Tag-Resource-ID", self.DEFAULT_TAG_RESOURCE_ID) while True: encoding, chunk = self._write_get_lines() # If chunk is empty then this is over. if not chunk: break # Compute now on a per-chunk basis now = numpy.datetime64(int(time.time() * 10e8), 'ns') # resources = { resource_id: { # metric_name: [ incoming.Measure(t, v), …], … # }, … # } resources = collections.defaultdict( lambda: collections.defaultdict(list)) for line_number, line in enumerate(chunk.split(b"\n")): # Ignore empty lines if not line: continue try: measurement, tags, fields, timestamp = ( line_protocol.parseString(line.decode())) except (UnicodeDecodeError, SyntaxError, pyparsing.ParseException): api.abort( 400, { "cause": "Value error", "detail": "line", "reason": "Unable to parse line %d" % (line_number + 1), }) if timestamp is None: timestamp = now try: resource_id = tags.pop(tag_to_rid) except KeyError: api.abort( 400, { "cause": "Value error", "detail": "key", "reason": "Unable to find key `%s' in tags" % (tag_to_rid), }) tags_str = (("@" if tags else "") + ",".join( ("%s=%s" % (k, tags[k])) for k in sorted(tags))) for field_name, field_value in six.iteritems(fields): if isinstance(field_value, str): # We do not support field value that are not numerical continue # Metric name is the: # <measurement>.<field_key>@<tag_key>=<tag_value>,… # with tag ordered # Replace "/" with "_" because Gnocchi does not support / # in metric names metric_name = (measurement + "." + field_name + tags_str).replace("/", "_") resources[resource_id][metric_name].append( incoming.Measure(timestamp, field_value)) measures_to_batch = {} for resource_name, metrics_and_measures in six.iteritems( resources): resource_name = resource_name resource_id = utils.ResourceUUID(resource_name, creator=creator) LOG.debug("Getting metrics from resource `%s'", resource_name) timeout = pecan.request.conf.api.operation_timeout metrics = (api.get_or_create_resource_and_metrics.retry_with( stop=tenacity.stop_after_delay(timeout))( creator, resource_id, resource_name, metrics_and_measures.keys(), {}, db)) for metric in metrics: api.enforce("post measures", metric) measures_to_batch.update( dict((metric.id, metrics_and_measures[metric.name]) for metric in metrics if metric.name in metrics_and_measures)) LOG.debug("Add measures batch for %d metrics", len(measures_to_batch)) pecan.request.incoming.add_measures_batch(measures_to_batch) pecan.response.status = 204 if encoding != "chunked": return
def test_add_and_get_measures(self): self.incoming.add_measures(self.metric.id, [ incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69), incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42), incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4), incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44), ]) self.trigger_processing() granularities = [ numpy.timedelta64(1, 'D'), numpy.timedelta64(1, 'h'), numpy.timedelta64(5, 'm'), ] self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities)) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures(self.metric, granularities, from_timestamp=datetime64( 2014, 1, 1, 12, 10, 0))) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), (datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0), ], self.storage.get_measures(self.metric, granularities, to_timestamp=datetime64( 2014, 1, 1, 12, 6, 0))) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0), ], self.storage.get_measures( self.metric, granularities, to_timestamp=datetime64(2014, 1, 1, 12, 10, 10), from_timestamp=datetime64(2014, 1, 1, 12, 10, 10))) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, granularities, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) self.assertEqual([ (datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, granularities, from_timestamp=datetime64(2014, 1, 1, 12), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2))) self.assertEqual([ (datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75), ], self.storage.get_measures( self.metric, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), granularities=[numpy.timedelta64(1, 'h')])) self.assertEqual([ (datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0), ], self.storage.get_measures( self.metric, from_timestamp=datetime64(2014, 1, 1, 12, 0, 0), to_timestamp=datetime64(2014, 1, 1, 12, 0, 2), granularities=[numpy.timedelta64(5, 'm')])) self.assertRaises(storage.AggregationDoesNotExist, self.storage.get_measures, self.metric, granularities=[numpy.timedelta64(42, 's')])