示例#1
0
    def _store_unaggregated_timeseries(self, metrics_and_data, version=3):
        """Store unaggregated timeseries.

        :param metrics_and_data: A list of (metric, serialized_data) tuples
        :param version: Storage engine data format version
        """
        utils.parallel_map(
            utils.return_none_on_failure(
                self._store_unaggregated_timeseries_unbatched),
            ((metric, data, version) for metric, data in metrics_and_data))
示例#2
0
    def add_measures_batch(self, metrics_and_measures):
        """Add a batch of measures for some metrics.

        :param metrics_and_measures: A dict where keys are metric objects
                                     and values are a list of
                                     :py:class:`gnocchi.incoming.Measure`.
        """
        utils.parallel_map(
            self._store_new_measures,
            ((metric, self._encode_measures(measures))
             for metric, measures in six.iteritems(metrics_and_measures)))
示例#3
0
    def add_measures_batch(self, metrics_and_measures):
        """Add a batch of measures for some metrics.

        :param metrics_and_measures: A dict where keys
        are metrics and value are measure.
        """
        utils.parallel_map(
            self._store_new_measures,
            ((metric, self._encode_measures(measures))
             for metric, measures
             in six.iteritems(metrics_and_measures)))
示例#4
0
    def _delete_metric_splits(self, metrics_keys_aggregations, version=3):
        """Delete splits of metrics.

        :param metrics_keys_aggregations: A dict where keys are
                                         `storage.Metric` and values are lists
                                         of (key, aggregation) tuples.
        """
        utils.parallel_map(
            utils.return_none_on_failure(self._delete_metric_splits_unbatched),
            ((metric, key, aggregation)
             for metric, keys_and_aggregations
             in six.iteritems(metrics_keys_aggregations)
             for key, aggregation in keys_and_aggregations))
示例#5
0
 def test_parallel_map_four(self):
     utils.parallel_map.MAX_WORKERS = 4
     starmap = itertools.starmap
     with mock.patch("itertools.starmap") as sm:
         sm.side_effect = starmap
         self.assertEqual([1, 2, 3],
                          utils.parallel_map(lambda x: x, [[1], [2], [3]]))
         sm.assert_not_called()
示例#6
0
        def _map_add_measures(bound_timeserie):
            # NOTE (gordc): bound_timeserie is entire set of
            # unaggregated measures matching largest
            # granularity. the following takes only the points
            # affected by new measures for specific granularity
            tstamp = max(bound_timeserie.first, measures['timestamps'][0])
            new_first_block_timestamp = bound_timeserie.first_block_timestamp()
            computed_points['number'] = len(bound_timeserie)
            for d in definition:
                ts = bound_timeserie.group_serie(
                    d.granularity,
                    carbonara.round_timestamp(tstamp, d.granularity))

                utils.parallel_map(
                    self._add_measures,
                    ((aggregation, d, metric, ts,
                      current_first_block_timestamp, new_first_block_timestamp)
                     for aggregation in agg_methods))
示例#7
0
    def _store_metric_splits(self, metrics_keys_aggregations_data_offset,
                             version=3):
        """Store metric splits.

        Store a bunch of splits for some metrics.

        :param metrics_keys_aggregations_data_offset: A dict where keys are
                                                      `storage.Metric` and
                                                      values are a list of
                                                      (key, aggregation,
                                                       data, offset) tuples.
        :param version: Storage engine format version.
        """
        utils.parallel_map(
            self._store_metric_splits_unbatched,
            ((metric, key, aggregation, data, offset, version)
             for metric, keys_aggregations_data_offset
             in six.iteritems(metrics_keys_aggregations_data_offset)
             for key, aggregation, data, offset
             in keys_aggregations_data_offset))
示例#8
0
 def _get_splits(self, metrics_aggregations_keys, version=3):
     results = collections.defaultdict(
         lambda: collections.defaultdict(list))
     for metric, aggregation, split in utils.parallel_map(
             lambda m, k, a, v: (m, a, self._get_splits_unbatched(m, k, a, v)),  # noqa
             ((metric, key, aggregation, version)
              for metric, aggregations_and_keys
              in six.iteritems(metrics_aggregations_keys)
              for aggregation, keys
              in six.iteritems(aggregations_and_keys)
              for key in keys)):
         results[metric][aggregation].append(split)
     return results
示例#9
0
    def _get_measures_timeserie(self,
                                metric,
                                aggregation,
                                granularity,
                                from_timestamp=None,
                                to_timestamp=None):

        # Find the number of point
        for d in metric.archive_policy.definition:
            if d.granularity == granularity:
                points = d.points
                break
        else:
            raise GranularityDoesNotExist(metric, granularity)

        all_keys = None
        try:
            all_keys = self._list_split_keys_for_metric(
                metric, aggregation, granularity)
        except MetricDoesNotExist:
            for d in metric.archive_policy.definition:
                if d.granularity == granularity:
                    return carbonara.AggregatedTimeSerie(
                        sampling=granularity,
                        aggregation_method=aggregation,
                        max_size=d.points)
            raise GranularityDoesNotExist(metric, granularity)

        if from_timestamp:
            from_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
                from_timestamp, granularity)

        if to_timestamp:
            to_timestamp = carbonara.SplitKey.from_timestamp_and_sampling(
                to_timestamp, granularity)

        timeseries = list(
            filter(
                lambda x: x is not None,
                utils.parallel_map(
                    self._get_measures_and_unserialize,
                    ((metric, key, aggregation) for key in sorted(all_keys)
                     if ((not from_timestamp or key >= from_timestamp) and (
                         not to_timestamp or key <= to_timestamp))))))

        return carbonara.AggregatedTimeSerie.from_timeseries(
            sampling=granularity,
            aggregation_method=aggregation,
            timeseries=timeseries,
            max_size=points)
示例#10
0
    def _get_or_create_unaggregated_timeseries(self, metrics, version=3):
        """Get the unaggregated timeserie of metrics.

        If the metrics does not exist, it is created.

        :param metrics: A list of metrics.
        :param version: The storage format version number.
        """
        return dict(
            six.moves.zip(
                metrics,
                utils.parallel_map(
                    utils.return_none_on_failure(
                        self._get_or_create_unaggregated_timeseries_unbatched),
                    ((metric, version) for metric in metrics))))
示例#11
0
    def get_aggregated_measures(self, metric, aggregations,
                                from_timestamp=None, to_timestamp=None):
        """Get aggregated measures from a metric.

        :param metric: The metric measured.
        :param aggregations: The aggregations to retrieve.
        :param from timestamp: The timestamp to get the measure from.
        :param to timestamp: The timestamp to get the measure to.
        """
        keys = self._list_split_keys(metric, aggregations)
        timeseries = utils.parallel_map(
            self._get_measures_timeserie,
            ((metric, agg, keys[agg], from_timestamp, to_timestamp)
             for agg in aggregations))
        return {
            agg: ts.fetch(from_timestamp, to_timestamp)
            for agg, ts in six.moves.zip(aggregations, timeseries)
        }
示例#12
0
    def get_measures(self,
                     metric,
                     from_timestamp=None,
                     to_timestamp=None,
                     aggregation='mean',
                     granularity=None,
                     resample=None):
        """Get a measure to a metric.

        :param metric: The metric measured.
        :param from timestamp: The timestamp to get the measure from.
        :param to timestamp: The timestamp to get the measure to.
        :param aggregation: The type of aggregation to retrieve.
        :param granularity: The granularity to retrieve.
        :param resample: The granularity to resample to.
        """
        if aggregation not in metric.archive_policy.aggregation_methods:
            if granularity is None:
                granularity = metric.archive_policy.definition[0].granularity
            raise AggregationDoesNotExist(metric, aggregation, granularity)

        if granularity is None:
            agg_timeseries = utils.parallel_map(
                self._get_measures_timeserie,
                ((metric, aggregation, ap.granularity, from_timestamp,
                  to_timestamp)
                 for ap in reversed(metric.archive_policy.definition)))
        else:
            agg_timeseries = [
                self._get_measures_timeserie(metric, aggregation, granularity,
                                             from_timestamp, to_timestamp)
            ]

        if resample and granularity:
            agg_timeseries = list(
                map(lambda agg: agg.resample(resample), agg_timeseries))

        return list(
            itertools.chain(*[
                ts.fetch(from_timestamp, to_timestamp) for ts in agg_timeseries
            ]))
示例#13
0
    def get_measures(self,
                     metric,
                     granularities,
                     from_timestamp=None,
                     to_timestamp=None,
                     aggregation='mean',
                     resample=None):
        """Get a measure to a metric.

        :param metric: The metric measured.
        :param granularities: The granularities to retrieve.
        :param from timestamp: The timestamp to get the measure from.
        :param to timestamp: The timestamp to get the measure to.
        :param aggregation: The type of aggregation to retrieve.
        :param resample: The granularity to resample to.
        """

        aggregations = []
        for g in sorted(granularities, reverse=True):
            agg = metric.archive_policy.get_aggregation(aggregation, g)
            if agg is None:
                raise AggregationDoesNotExist(metric, aggregation, g)
            aggregations.append(agg)

        agg_timeseries = utils.parallel_map(
            self._get_measures_timeserie,
            ((metric, ag, from_timestamp, to_timestamp)
             for ag in aggregations))

        if resample:
            agg_timeseries = list(
                map(lambda agg: agg.resample(resample), agg_timeseries))

        return list(
            itertools.chain(*[
                ts.fetch(from_timestamp, to_timestamp) for ts in agg_timeseries
            ]))
示例#14
0
    def search_value(self,
                     metrics,
                     query,
                     from_timestamp=None,
                     to_timestamp=None,
                     aggregation='mean',
                     granularity=None):
        """Search for an aggregated value that realizes a predicate.

        :param metrics: The list of metrics to look into.
        :param query: The query being sent.
        :param from_timestamp: The timestamp to get the measure from.
        :param to_timestamp: The timestamp to get the measure to.
        :param aggregation: The type of aggregation to retrieve.
        :param granularity: The granularity to retrieve.
        """

        granularity = granularity or []
        predicate = MeasureQuery(query)

        results = utils.parallel_map(
            self._find_measure,
            [(metric, aggregation, gran, predicate, from_timestamp,
              to_timestamp) for metric in metrics for gran in granularity or (
                  defin.granularity
                  for defin in metric.archive_policy.definition)])
        result = collections.defaultdict(list)
        for r in results:
            for metric, metric_result in six.iteritems(r):
                result[metric].extend(metric_result)

        # Sort the result
        for metric, r in six.iteritems(result):
            # Sort by timestamp asc, granularity desc
            r.sort(key=lambda t: (t[0], -t[1]))

        return result
示例#15
0
def get_measures(storage, references, operations,
                 from_timestamp=None, to_timestamp=None,
                 granularities=None, needed_overlap=100.0,
                 fill=None):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularities: The granularities to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    if granularities is None:
        all_granularities = (
            definition.granularity
            for ref in references
            for definition in ref.metric.archive_policy.definition
        )
        # granularities_in_common
        granularities = [
            g
            for g, occurrence in six.iteritems(
                collections.Counter(all_granularities))
            if occurrence == len(references)
        ]

        if not granularities:
            raise exceptions.UnAggregableTimeseries(
                list((ref.name, ref.aggregation)
                     for ref in references),
                'No granularity match')

    references_with_missing_granularity = []
    for ref in references:
        if (ref.aggregation not in
                ref.metric.archive_policy.aggregation_methods):
            raise gnocchi_storage.AggregationDoesNotExist(
                ref.metric, ref.aggregation,
                # Use the first granularity, that should be good enough since
                # they are all missing anyway
                ref.metric.archive_policy.definition[0].granularity)

        available_granularities = [
            d.granularity
            for d in ref.metric.archive_policy.definition
        ]
        for g in granularities:
            if g not in available_granularities:
                references_with_missing_granularity.append(
                    (ref.name, ref.aggregation, g))
                break

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity,
            "Granularities are missing")

    tss = utils.parallel_map(_get_measures_timeserie,
                             [(storage, ref, g, from_timestamp, to_timestamp)
                              for ref in references
                              for g in granularities])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)
示例#16
0
def get_measures(storage,
                 references,
                 operations,
                 from_timestamp=None,
                 to_timestamp=None,
                 granularities=None,
                 needed_overlap=100.0,
                 fill=None):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularities: The granularities to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    if granularities is None:
        all_granularities = (
            definition.granularity for ref in references
            for definition in ref.metric.archive_policy.definition)
        # granularities_in_common
        granularities = [
            g for g, occurrence in six.iteritems(
                collections.Counter(all_granularities))
            if occurrence == len(references)
        ]

        if not granularities:
            raise exceptions.UnAggregableTimeseries(
                list((ref.name, ref.aggregation) for ref in references),
                'No granularity match')

    references_with_missing_granularity = []
    for ref in references:
        if (ref.aggregation
                not in ref.metric.archive_policy.aggregation_methods):
            raise gnocchi_storage.AggregationDoesNotExist(
                ref.metric,
                ref.aggregation,
                # Use the first granularity, that should be good enough since
                # they are all missing anyway
                ref.metric.archive_policy.definition[0].granularity)

        available_granularities = [
            d.granularity for d in ref.metric.archive_policy.definition
        ]
        for g in granularities:
            if g not in available_granularities:
                references_with_missing_granularity.append(
                    (ref.name, ref.aggregation, g))
                break

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity, "Granularities are missing")

    tss = utils.parallel_map(_get_measures_timeserie,
                             [(storage, ref, g, from_timestamp, to_timestamp)
                              for ref in references for g in granularities])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)
示例#17
0
def get_measures(storage,
                 metrics_and_aggregations,
                 operations,
                 from_timestamp=None,
                 to_timestamp=None,
                 granularity=None,
                 needed_overlap=100.0,
                 fill=None,
                 ref_identifier="id"):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularity: The granularity to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    references_with_missing_granularity = []
    for (metric, aggregation) in metrics_and_aggregations:
        if aggregation not in metric.archive_policy.aggregation_methods:
            raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation)
        if granularity is not None:
            for d in metric.archive_policy.definition:
                if d.granularity == granularity:
                    break
            else:
                references_with_missing_granularity.append(
                    (getattr(metric, ref_identifier), aggregation))

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity,
            "granularity '%d' is missing" %
            utils.timespan_total_seconds(granularity))

    if granularity is None:
        granularities = (definition.granularity
                         for (metric, aggregation) in metrics_and_aggregations
                         for definition in metric.archive_policy.definition)
        granularities_in_common = [
            g for g, occurrence in six.iteritems(
                collections.Counter(granularities))
            if occurrence == len(metrics_and_aggregations)
        ]

        if not granularities_in_common:
            raise exceptions.UnAggregableTimeseries(
                list((str(getattr(m, ref_identifier)), a)
                     for (m, a) in metrics_and_aggregations),
                'No granularity match')
    else:
        granularities_in_common = [granularity]

    tss = utils.parallel_map(
        _get_measures_timeserie,
        [(storage, metric, aggregation, ref_identifier, g, from_timestamp,
          to_timestamp) for (metric, aggregation) in metrics_and_aggregations
         for g in granularities_in_common])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)
示例#18
0
 def test_parallel_map_four(self):
     utils.parallel_map.NUM_WORKERS = 4
     self.assertEqual([1, 2, 3],
                      utils.parallel_map(lambda x: x, [[1], [2], [3]]))
示例#19
0
 def _get_measures(self, metric, keys, aggregation, version=3):
     return utils.parallel_map(self._get_measures_unbatched,
                               ((metric, key, aggregation, version)
                                for key in keys))
示例#20
0
 def _delete_metric_splits(self, metric, keys_and_aggregations, version=3):
     utils.parallel_map(
         utils.return_none_on_failure(self._delete_metric_splits_unbatched),
         ((metric, key, aggregation)
          for key, aggregation in keys_and_aggregations))