def test_fetch_basic(self): ts = carbonara.AggregatedTimeSerie.from_data( timestamps=[datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9)], values=[3, 5, 6], aggregation=carbonara.Aggregation( "mean", numpy.timedelta64(1, 's'), None)) self.assertEqual( [(datetime64(2014, 1, 1, 12), 3), (datetime64(2014, 1, 1, 12, 0, 4), 5), (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch())) self.assertEqual( [(datetime64(2014, 1, 1, 12, 0, 4), 5), (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=datetime64(2014, 1, 1, 12, 0, 4)))) self.assertEqual( [(datetime64(2014, 1, 1, 12, 0, 4), 5), (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=numpy.datetime64(iso8601.parse_date( "2014-01-01 12:00:04"))))) self.assertEqual( [(datetime64(2014, 1, 1, 12, 0, 4), 5), (datetime64(2014, 1, 1, 12, 0, 9), 6)], list(ts.fetch( from_timestamp=numpy.datetime64(iso8601.parse_date( "2014-01-01 13:00:04+01:00")))))
def _resample(ts, sampling, agg, derived=False): aggregation = carbonara.Aggregation(agg, sampling, None) grouped = ts.group_serie(sampling) if derived: grouped = grouped.derived() return carbonara.AggregatedTimeSerie.from_grouped_serie( grouped, aggregation)
def test_different_length_in_timestamps_and_data(self): self.assertRaises( ValueError, carbonara.AggregatedTimeSerie.from_data, carbonara.Aggregation('mean', numpy.timedelta64(3, 's'), None), [datetime64(2014, 1, 1, 12, 0, 0), datetime64(2014, 1, 1, 12, 0, 4), datetime64(2014, 1, 1, 12, 0, 9)], [3, 5])
def _resample_and_merge(ts, agg_dict): """Helper method that mimics _compute_splits_operations workflow.""" grouped = ts.group_serie(agg_dict['sampling']) existing = agg_dict.get('return') agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie( grouped, carbonara.Aggregation( agg_dict['agg'], agg_dict['sampling'], None)) if existing: existing.merge(agg_dict['return']) agg_dict['return'] = existing
def _get_measures_timeserie(storage, ref, granularity, *args, **kwargs): agg = ref.metric.archive_policy.get_aggregation(ref.aggregation, granularity) try: data = storage.get_aggregated_measures({ref.metric: [agg]}, *args, **kwargs)[ref.metric][agg] except gnocchi_storage.MetricDoesNotExist: data = carbonara.AggregatedTimeSerie( carbonara.Aggregation(ref.aggregation, granularity, None)) return (ref, data)
def get_aggregations_for_method(self, method): """Return a list of aggregation for a method. List is sorted by granularity, desc. :param method: Aggregation method. """ return [ carbonara.Aggregation(method, d.granularity, d.timespan) for d in sorted(self.definition, key=ATTRGETTER_GRANULARITY, reverse=True) ]
def handle_resample(agg, granularity, timestamps, values, is_aggregated, references, sampling): # TODO(sileht): make a more optimised version that # compute the data across the whole matrix new_values = None result_timestamps = timestamps for ts in values.T: ts = carbonara.AggregatedTimeSerie.from_data( carbonara.Aggregation(agg, None, None), timestamps, ts) ts = ts.resample(sampling) result_timestamps = ts["timestamps"] if new_values is None: new_values = numpy.array([ts["values"]]) else: new_values = numpy.concatenate((new_values, [ts["values"]])) return sampling, result_timestamps, new_values.T, is_aggregated
def get_aggregation(self, method, granularity): # Find the timespan for d in self.definition: if d.granularity == granularity: return carbonara.Aggregation(method, d.granularity, d.timespan)
def aggregations(self): return [ carbonara.Aggregation(method, d.granularity, d.timespan) for d in sorted(self.definition, key=ATTRGETTER_GRANULARITY) for method in self.aggregation_methods ]