Esempio n. 1
0
 def _get_metric_archive(self, metric, aggregation):
     """Retrieve data in the place we used to store TimeSerieArchive."""
     try:
         return self._get_object_content(
             str("gnocchi_%s_%s" % (metric.id, aggregation)))
     except rados.ObjectNotFound:
         raise storage.AggregationDoesNotExist(metric, aggregation)
Esempio n. 2
0
 def _get_measures(self, metric, key, aggregation, version=3):
     redis_key = self._metric_key(metric)
     field = self._aggregated_field_for_split(aggregation, key, version)
     data = self._client.hget(redis_key, field)
     if data is None:
         if not self._client.exists(redis_key):
             raise storage.MetricDoesNotExist(metric)
         raise storage.AggregationDoesNotExist(metric, aggregation)
     return data
Esempio n. 3
0
 def _get_metric_archive(self, metric, aggregation):
     """Retrieve data in the place we used to store TimeSerieArchive."""
     try:
         headers, contents = self.swift.get_object(
             self._container_name(metric), aggregation)
     except swclient.ClientException as e:
         if e.http_status == 404:
             raise storage.AggregationDoesNotExist(metric, aggregation)
         raise
     return contents
Esempio n. 4
0
 def _get_measures(self, metric, timestamp_key, aggregation, granularity):
     try:
         name = self._get_object_name(metric, timestamp_key, aggregation,
                                      granularity)
         return self._get_object_content(name)
     except rados.ObjectNotFound:
         if self._object_exists("gnocchi_%s_container" % metric.id):
             raise storage.AggregationDoesNotExist(metric, aggregation)
         else:
             raise storage.MetricDoesNotExist(metric)
Esempio n. 5
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         name = self._get_object_name(metric, key, aggregation, version)
         return self._get_object_content(name)
     except rados.ObjectNotFound:
         if self._object_exists(
                 self._build_unaggregated_timeserie_path(metric, 3)):
             raise storage.AggregationDoesNotExist(metric, aggregation)
         else:
             raise storage.MetricDoesNotExist(metric)
Esempio n. 6
0
 def _get_metric_archive(self, metric, aggregation):
     """Retrieve data in the place we used to store TimeSerieArchive."""
     path = self._build_metric_archive_path(metric, aggregation)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(metric, aggregation)
             raise storage.MetricDoesNotExist(metric)
         raise
Esempio n. 7
0
 def _get_measures(self, metric, timestamp_key, aggregation, granularity):
     path = self._build_metric_path_for_split(metric, aggregation,
                                              timestamp_key, granularity)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(metric, aggregation)
             raise storage.MetricDoesNotExist(metric)
         raise
Esempio n. 8
0
 def _get_measures_unbatched(self, metric, key, aggregation, version=3):
     path = self._build_metric_path_for_split(metric, aggregation, key,
                                              version)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(
                     metric, aggregation, key.sampling)
             raise storage.MetricDoesNotExist(metric)
         raise
Esempio n. 9
0
 def _get_measures(self, metric, keys, aggregation, version=3):
     redis_key = self._metric_key(metric)
     fields = [
         self._aggregated_field_for_split(aggregation, key, version)
         for key in keys
     ]
     results = self._client.hmget(redis_key, fields)
     for key, data in six.moves.zip(keys, results):
         if data is None:
             if not self._client.exists(redis_key):
                 raise storage.MetricDoesNotExist(metric)
             raise storage.AggregationDoesNotExist(metric, aggregation,
                                                   key.sampling)
     return results
Esempio n. 10
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         headers, contents = self.swift.get_object(
             self._container_name(metric), self._object_name(
                 key, aggregation, version))
     except swclient.ClientException as e:
         if e.http_status == 404:
             try:
                 self.swift.head_container(self._container_name(metric))
             except swclient.ClientException as e:
                 if e.http_status == 404:
                     raise storage.MetricDoesNotExist(metric)
                 raise
             raise storage.AggregationDoesNotExist(metric, aggregation)
         raise
     return contents
Esempio n. 11
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         response = self.s3.get_object(
             Bucket=self._bucket_name,
             Key=self._prefix(metric) + self._object_name(
                 key, aggregation, version))
     except botocore.exceptions.ClientError as e:
         if e.response['Error'].get('Code') == 'NoSuchKey':
             try:
                 response = self.s3.list_objects_v2(
                     Bucket=self._bucket_name, Prefix=self._prefix(metric))
             except botocore.exceptions.ClientError as e:
                 if e.response['Error'].get('Code') == 'NoSuchKey':
                     raise storage.MetricDoesNotExist(metric)
                 raise
             raise storage.AggregationDoesNotExist(metric, aggregation)
         raise
     return response['Body'].read()
Esempio n. 12
0
def get_cross_metric_measures(storage,
                              metrics,
                              from_timestamp=None,
                              to_timestamp=None,
                              aggregation='mean',
                              reaggregation=None,
                              granularity=None,
                              needed_overlap=100.0,
                              fill=None,
                              transform=None):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics: The metrics measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularity: The granularity to retrieve.
    :param aggregation: The type of aggregation to retrieve.
    :param reaggregation: The type of aggregation to compute
                          on the retrieved measures.
    :param fill: The value to use to fill in missing data in series.
    :param transform: List of transformation to apply to the series
    """
    for metric in metrics:
        if aggregation not in metric.archive_policy.aggregation_methods:
            raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation)
        if granularity is not None:
            for d in metric.archive_policy.definition:
                if d.granularity == granularity:
                    break
            else:
                raise gnocchi_storage.GranularityDoesNotExist(
                    metric, granularity)

    if reaggregation is None:
        reaggregation = aggregation

    if granularity is None:
        granularities = (definition.granularity for metric in metrics
                         for definition in metric.archive_policy.definition)
        granularities_in_common = [
            g for g, occurrence in six.iteritems(
                collections.Counter(granularities))
            if occurrence == len(metrics)
        ]

        if not granularities_in_common:
            raise MetricUnaggregatable(metrics, 'No granularity match')
    else:
        granularities_in_common = [granularity]

    tss = storage._map_in_thread(
        storage._get_measures_timeserie,
        [(metric, aggregation, g, from_timestamp, to_timestamp)
         for metric in metrics for g in granularities_in_common])

    if transform is not None:
        tss = list(map(lambda ts: ts.transform(transform), tss))

    try:
        return [(timestamp.replace(tzinfo=iso8601.iso8601.UTC), r, v)
                for timestamp, r, v in
                aggregated(tss, reaggregation, from_timestamp, to_timestamp,
                           needed_overlap, fill)]
    except UnAggregableTimeseries as e:
        raise MetricUnaggregatable(metrics, e.reason)
Esempio n. 13
0
def get_measures(storage,
                 references,
                 operations,
                 from_timestamp=None,
                 to_timestamp=None,
                 granularities=None,
                 needed_overlap=100.0,
                 fill=None):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularities: The granularities to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    if granularities is None:
        all_granularities = (
            definition.granularity for ref in references
            for definition in ref.metric.archive_policy.definition)
        # granularities_in_common
        granularities = [
            g for g, occurrence in six.iteritems(
                collections.Counter(all_granularities))
            if occurrence == len(references)
        ]

        if not granularities:
            raise exceptions.UnAggregableTimeseries(
                list((ref.name, ref.aggregation) for ref in references),
                'No granularity match')

    references_with_missing_granularity = []
    for ref in references:
        if (ref.aggregation
                not in ref.metric.archive_policy.aggregation_methods):
            raise gnocchi_storage.AggregationDoesNotExist(
                ref.metric,
                ref.aggregation,
                # Use the first granularity, that should be good enough since
                # they are all missing anyway
                ref.metric.archive_policy.definition[0].granularity)

        available_granularities = [
            d.granularity for d in ref.metric.archive_policy.definition
        ]
        for g in granularities:
            if g not in available_granularities:
                references_with_missing_granularity.append(
                    (ref.name, ref.aggregation, g))
                break

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity, "Granularities are missing")

    tss = utils.parallel_map(_get_measures_timeserie,
                             [(storage, ref, g, from_timestamp, to_timestamp)
                              for ref in references for g in granularities])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)
Esempio n. 14
0
def get_measures(storage,
                 metrics_and_aggregations,
                 operations,
                 from_timestamp=None,
                 to_timestamp=None,
                 granularity=None,
                 needed_overlap=100.0,
                 fill=None,
                 ref_identifier="id"):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularity: The granularity to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    references_with_missing_granularity = []
    for (metric, aggregation) in metrics_and_aggregations:
        if aggregation not in metric.archive_policy.aggregation_methods:
            raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation)
        if granularity is not None:
            for d in metric.archive_policy.definition:
                if d.granularity == granularity:
                    break
            else:
                references_with_missing_granularity.append(
                    (getattr(metric, ref_identifier), aggregation))

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity,
            "granularity '%d' is missing" %
            utils.timespan_total_seconds(granularity))

    if granularity is None:
        granularities = (definition.granularity
                         for (metric, aggregation) in metrics_and_aggregations
                         for definition in metric.archive_policy.definition)
        granularities_in_common = [
            g for g, occurrence in six.iteritems(
                collections.Counter(granularities))
            if occurrence == len(metrics_and_aggregations)
        ]

        if not granularities_in_common:
            raise exceptions.UnAggregableTimeseries(
                list((str(getattr(m, ref_identifier)), a)
                     for (m, a) in metrics_and_aggregations),
                'No granularity match')
    else:
        granularities_in_common = [granularity]

    tss = utils.parallel_map(
        _get_measures_timeserie,
        [(storage, metric, aggregation, ref_identifier, g, from_timestamp,
          to_timestamp) for (metric, aggregation) in metrics_and_aggregations
         for g in granularities_in_common])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)