Beispiel #1
0
    def _list_split_keys_for_metric(self,
                                    metric,
                                    aggregation,
                                    granularity,
                                    version=3):
        with rados.ReadOpCtx() as op:
            omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
            try:
                self.ioctx.operate_read_op(
                    op, self._build_unaggregated_timeserie_path(metric, 3))
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            # NOTE(sileht): after reading the libradospy, I'm
            # not sure that ret will have the correct value
            # get_omap_vals transforms the C int to python int
            # before operate_read_op is called, I dunno if the int
            # content is copied during this transformation or if
            # this is a pointer to the C int, I think it's copied...
            try:
                ceph.errno_to_exception(ret)
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            keys = set()
            for name, value in omaps:
                meta = name.split('_')
                if (aggregation == meta[3] and granularity == float(meta[4])
                        and self._version_check(name, version)):
                    keys.add(meta[2])
            return keys
Beispiel #2
0
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     bucket = self._bucket_name
     keys = set()
     response = {}
     while response.get('IsTruncated', True):
         if 'NextContinuationToken' in response:
             kwargs = {
                 'ContinuationToken': response['NextContinuationToken']
             }
         else:
             kwargs = {}
         try:
             response = self.s3.list_objects_v2(
                 Bucket=bucket,
                 Prefix=self._prefix(metric) + '%s_%s' % (aggregation,
                                                          granularity),
                 **kwargs)
         except botocore.exceptions.ClientError as e:
             if e.response['Error'].get('Code') == "NoSuchKey":
                 raise storage.MetricDoesNotExist(metric)
             raise
         for f in response.get('Contents', ()):
             try:
                 meta = f['Key'].split('_')
                 if (self._version_check(f['Key'], version)):
                     keys.add(meta[2])
             except (ValueError, IndexError):
                 # Might be "none", or any other file. Be resilient.
                 continue
     return keys
Beispiel #3
0
 def _list_split_keys(self, metric, aggregations, version=3):
     key = self._metric_key(metric)
     pipe = self._client.pipeline(transaction=False)
     pipe.exists(key)
     for aggregation in aggregations:
         self._scripts["list_split_keys"](
             keys=[key], args=[self._aggregated_field_for_split(
                 aggregation.method, "*",
                 version, aggregation.granularity)],
             client=pipe,
         )
     results = pipe.execute()
     metric_exists_p = results.pop(0)
     if not metric_exists_p:
         raise storage.MetricDoesNotExist(metric)
     keys = {}
     for aggregation, k in six.moves.zip(aggregations, results):
         if not k:
             keys[aggregation] = set()
             continue
         timestamps, methods, granularities = list(zip(*k))
         timestamps = utils.to_timestamps(timestamps)
         granularities = map(utils.to_timespan, granularities)
         keys[aggregation] = {
             carbonara.SplitKey(timestamp,
                                sampling=granularity)
             for timestamp, granularity
             in six.moves.zip(timestamps, granularities)
         }
     return keys
Beispiel #4
0
 def _list_split_keys(self, metric, aggregations, version=3):
     keys = collections.defaultdict(set)
     for method, grouped_aggregations in itertools.groupby(
             sorted(aggregations, key=ATTRGETTER_METHOD),
             ATTRGETTER_METHOD):
         try:
             files = os.listdir(self._build_metric_path(metric, method))
         except OSError as e:
             if e.errno == errno.ENOENT:
                 raise storage.MetricDoesNotExist(metric)
             raise
         raw_keys = list(
             map(lambda k: k.split("_"),
                 filter(lambda f: self._version_check(f, version), files)))
         if not raw_keys:
             continue
         zipped = list(zip(*raw_keys))
         k_timestamps = utils.to_timestamps(zipped[0])
         k_granularities = list(map(utils.to_timespan, zipped[1]))
         grouped_aggregations = list(grouped_aggregations)
         for timestamp, granularity in six.moves.zip(
                 k_timestamps, k_granularities):
             for agg in grouped_aggregations:
                 if granularity == agg.granularity:
                     keys[agg].add(
                         carbonara.SplitKey(timestamp,
                                            sampling=granularity))
                     break
     return keys
Beispiel #5
0
    def _list_split_keys(self, metric, aggregations, version=3):
        container = self._container_name(metric)
        try:
            headers, files = self.swift.get_container(container,
                                                      full_listing=True)
        except swclient.ClientException as e:
            if e.http_status == 404:
                raise storage.MetricDoesNotExist(metric)
            raise

        raw_keys = list(
            map(lambda k: k.split("_"),
                (f['name']
                 for f in files if self._version_check(f['name'], version)
                 and not f['name'].startswith('none'))))
        keys = collections.defaultdict(set)
        if not raw_keys:
            return keys
        zipped = list(zip(*raw_keys))
        k_timestamps = utils.to_timestamps(zipped[0])
        k_methods = zipped[1]
        k_granularities = list(map(utils.to_timespan, zipped[2]))

        for timestamp, method, granularity in six.moves.zip(
                k_timestamps, k_methods, k_granularities):
            for aggregation in aggregations:
                if (aggregation.method == method
                        and aggregation.granularity == granularity):
                    keys[aggregation].add(
                        carbonara.SplitKey(timestamp, sampling=granularity))
                    break
        return keys
Beispiel #6
0
 def _get_unaggregated_timeserie(self, metric):
     try:
         headers, contents = self.swift.get_object(
             self._container_name(metric), "none")
     except swclient.ClientException as e:
         if e.http_status == 404:
             raise storage.MetricDoesNotExist(metric)
         raise
     return contents
Beispiel #7
0
 def _get_unaggregated_timeserie(self, metric):
     path = self._build_unaggregated_timeserie_path(metric)
     try:
         with open(path, 'rb') as f:
             return f.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             raise storage.MetricDoesNotExist(metric)
         raise
Beispiel #8
0
 def _get_measures(self, metric, key, aggregation, version=3):
     redis_key = self._metric_key(metric)
     field = self._aggregated_field_for_split(aggregation, key, version)
     data = self._client.hget(redis_key, field)
     if data is None:
         if not self._client.exists(redis_key):
             raise storage.MetricDoesNotExist(metric)
         raise storage.AggregationDoesNotExist(metric, aggregation)
     return data
Beispiel #9
0
 def _get_measures(self, metric, timestamp_key, aggregation, granularity):
     try:
         name = self._get_object_name(metric, timestamp_key, aggregation,
                                      granularity)
         return self._get_object_content(name)
     except rados.ObjectNotFound:
         if self._object_exists("gnocchi_%s_container" % metric.id):
             raise storage.AggregationDoesNotExist(metric, aggregation)
         else:
             raise storage.MetricDoesNotExist(metric)
Beispiel #10
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         name = self._get_object_name(metric, key, aggregation, version)
         return self._get_object_content(name)
     except rados.ObjectNotFound:
         if self._object_exists(
                 self._build_unaggregated_timeserie_path(metric, 3)):
             raise storage.AggregationDoesNotExist(metric, aggregation)
         else:
             raise storage.MetricDoesNotExist(metric)
Beispiel #11
0
 def _get_unaggregated_timeserie(self, metric, version=3):
     try:
         response = self.s3.get_object(
             Bucket=self._bucket_name,
             Key=self._build_unaggregated_timeserie_path(metric, version))
     except botocore.exceptions.ClientError as e:
         if e.response['Error'].get('Code') == "NoSuchKey":
             raise storage.MetricDoesNotExist(metric)
         raise
     return response['Body'].read()
Beispiel #12
0
    def _list_split_keys(self, metric, aggregations, version=3):
        with rados.ReadOpCtx() as op:
            omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
            try:
                self.ioctx.operate_read_op(
                    op, self._build_unaggregated_timeserie_path(metric, 3))
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            # NOTE(sileht): after reading the libradospy, I'm
            # not sure that ret will have the correct value
            # get_omap_vals transforms the C int to python int
            # before operate_read_op is called, I dunno if the int
            # content is copied during this transformation or if
            # this is a pointer to the C int, I think it's copied...
            try:
                ceph.errno_to_exception(ret)
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            raw_keys = [
                name.split("_") for name, value in omaps
                if self._version_check(name, version)
            ]
            keys = collections.defaultdict(set)
            if not raw_keys:
                return keys
            zipped = list(zip(*raw_keys))
            k_timestamps = utils.to_timestamps(zipped[2])
            k_methods = zipped[3]
            k_granularities = list(map(utils.to_timespan, zipped[4]))

            for timestamp, method, granularity in six.moves.zip(
                    k_timestamps, k_methods, k_granularities):
                for aggregation in aggregations:
                    if (aggregation.method == method
                            and aggregation.granularity == granularity):
                        keys[aggregation].add(
                            carbonara.SplitKey(timestamp,
                                               sampling=granularity))
                        break
            return keys
Beispiel #13
0
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     key = self._metric_key(metric)
     if not self._client.exists(key):
         raise storage.MetricDoesNotExist(metric)
     split_keys = set()
     hashes = self._client.hscan_iter(
         key, match=self._aggregated_field_for_split(
             aggregation, '*', version, granularity))
     for f, __ in hashes:
         meta = f.decode("utf8").split(self.FIELD_SEP, 1)
         split_keys.add(meta[0])
     return split_keys
Beispiel #14
0
 def _get_measures(self, metric, timestamp_key, aggregation, granularity):
     path = self._build_metric_path_for_split(metric, aggregation,
                                              timestamp_key, granularity)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(metric, aggregation)
             raise storage.MetricDoesNotExist(metric)
         raise
Beispiel #15
0
 def _get_metric_archive(self, metric, aggregation):
     """Retrieve data in the place we used to store TimeSerieArchive."""
     path = self._build_metric_archive_path(metric, aggregation)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(metric, aggregation)
             raise storage.MetricDoesNotExist(metric)
         raise
Beispiel #16
0
    def _list_split_keys_for_metric(self, metric, aggregation, granularity):
        try:
            xattrs = self.ioctx.get_xattrs("gnocchi_%s_container" % metric.id)
        except rados.ObjectNotFound:
            raise storage.MetricDoesNotExist(metric)
        keys = []
        for xattr, value in xattrs:
            _, metric_id, key, agg, g = xattr.split('_', 4)
            if aggregation == agg and granularity == float(g):
                keys.append(key)

        return keys
Beispiel #17
0
 def _list_split_keys_for_metric(self, metric, aggregation, granularity):
     try:
         files = os.listdir(self._build_metric_path(metric, aggregation))
     except OSError as e:
         if e.errno == errno.ENOENT:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = []
     for f in files:
         key, sep, file_granularity = f.partition("_")
         if file_granularity == str(granularity):
             keys.append(key)
     return keys
Beispiel #18
0
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     try:
         files = os.listdir(self._build_metric_path(metric, aggregation))
     except OSError as e:
         if e.errno == errno.ENOENT:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = set()
     for f in files:
         meta = f.split("_")
         if meta[1] == str(granularity) and self._version_check(f, version):
             keys.add(meta[0])
     return keys
Beispiel #19
0
 def _get_measures_unbatched(self, metric, key, aggregation, version=3):
     path = self._build_metric_path_for_split(metric, aggregation, key,
                                              version)
     try:
         with open(path, 'rb') as aggregation_file:
             return aggregation_file.read()
     except IOError as e:
         if e.errno == errno.ENOENT:
             if os.path.exists(self._build_metric_dir(metric)):
                 raise storage.AggregationDoesNotExist(
                     metric, aggregation, key.sampling)
             raise storage.MetricDoesNotExist(metric)
         raise
Beispiel #20
0
 def _get_measures(self, metric, keys, aggregation, version=3):
     redis_key = self._metric_key(metric)
     fields = [
         self._aggregated_field_for_split(aggregation, key, version)
         for key in keys
     ]
     results = self._client.hmget(redis_key, fields)
     for key, data in six.moves.zip(keys, results):
         if data is None:
             if not self._client.exists(redis_key):
                 raise storage.MetricDoesNotExist(metric)
             raise storage.AggregationDoesNotExist(metric, aggregation,
                                                   key.sampling)
     return results
Beispiel #21
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         headers, contents = self.swift.get_object(
             self._container_name(metric), self._object_name(
                 key, aggregation, version))
     except swclient.ClientException as e:
         if e.http_status == 404:
             try:
                 self.swift.head_container(self._container_name(metric))
             except swclient.ClientException as e:
                 if e.http_status == 404:
                     raise storage.MetricDoesNotExist(metric)
                 raise
             raise storage.AggregationDoesNotExist(metric, aggregation)
         raise
     return contents
Beispiel #22
0
 def _get_measures(self, metric, key, aggregation, version=3):
     try:
         response = self.s3.get_object(
             Bucket=self._bucket_name,
             Key=self._prefix(metric) + self._object_name(
                 key, aggregation, version))
     except botocore.exceptions.ClientError as e:
         if e.response['Error'].get('Code') == 'NoSuchKey':
             try:
                 response = self.s3.list_objects_v2(
                     Bucket=self._bucket_name, Prefix=self._prefix(metric))
             except botocore.exceptions.ClientError as e:
                 if e.response['Error'].get('Code') == 'NoSuchKey':
                     raise storage.MetricDoesNotExist(metric)
                 raise
             raise storage.AggregationDoesNotExist(metric, aggregation)
         raise
     return response['Body'].read()
Beispiel #23
0
 def _list_split_keys(self, metrics_and_aggregations, version=3):
     pipe = self._client.pipeline(transaction=False)
     # Keep an ordered list of metrics
     metrics = list(metrics_and_aggregations.keys())
     for metric in metrics:
         key = self._metric_key(metric)
         pipe.exists(key)
         aggregations = metrics_and_aggregations[metric]
         for aggregation in aggregations:
             self._scripts["list_split_keys"](
                 keys=[key],
                 args=[
                     self._aggregated_field_for_split(
                         aggregation.method, "*", version,
                         aggregation.granularity)
                 ],
                 client=pipe,
             )
     results = pipe.execute()
     keys = collections.defaultdict(dict)
     start = 0
     for metric in metrics:
         metric_exists_p = results[start]
         if not metric_exists_p:
             raise storage.MetricDoesNotExist(metric)
         aggregations = metrics_and_aggregations[metric]
         number_of_aggregations = len(aggregations)
         keys_for_aggregations = results[start + 1:start + 1 +
                                         number_of_aggregations]
         start += 1 + number_of_aggregations  # 1 for metric_exists_p
         for aggregation, k in six.moves.zip(aggregations,
                                             keys_for_aggregations):
             if not k:
                 keys[metric][aggregation] = set()
                 continue
             timestamps, methods, granularities = list(zip(*k))
             timestamps = utils.to_timestamps(timestamps)
             granularities = map(utils.to_timespan, granularities)
             keys[metric][aggregation] = {
                 carbonara.SplitKey(timestamp, sampling=granularity)
                 for timestamp, granularity in six.moves.zip(
                     timestamps, granularities)
             }
     return keys
Beispiel #24
0
 def _list_split_keys_for_metric(self, metric, aggregation, granularity):
     container = self._container_name(metric)
     try:
         headers, files = self.swift.get_container(container,
                                                   full_listing=True)
     except swclient.ClientException as e:
         if e.http_status == 404:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = []
     for f in files:
         try:
             key, agg, g = f['name'].split('_', 2)
         except ValueError:
             # Might be "none", or any other file. Be resilient.
             continue
         if aggregation == agg and granularity == float(g):
             keys.append(key)
     return keys
Beispiel #25
0
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     container = self._container_name(metric)
     try:
         headers, files = self.swift.get_container(
             container, full_listing=True)
     except swclient.ClientException as e:
         if e.http_status == 404:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = set()
     for f in files:
         try:
             meta = f['name'].split('_')
             if (aggregation == meta[1] and granularity == float(meta[2])
                     and self._version_check(f['name'], version)):
                 keys.add(meta[0])
         except (ValueError, IndexError):
             # Might be "none", or any other file. Be resilient.
             continue
     return keys
Beispiel #26
0
 def _list_split_keys_unbatched(self, metric, aggregations, version=3):
     bucket = self._bucket_name
     keys = {}
     for aggregation in aggregations:
         keys[aggregation] = set()
         response = {}
         while response.get('IsTruncated', True):
             if 'NextContinuationToken' in response:
                 kwargs = {
                     'ContinuationToken': response['NextContinuationToken']
                 }
             else:
                 kwargs = {}
             response = self.s3.list_objects_v2(
                 Bucket=bucket,
                 Prefix=self._prefix(metric) + '%s_%s' % (
                     aggregation.method,
                     utils.timespan_total_seconds(aggregation.granularity),
                 ),
                 **kwargs)
             # If response is empty then check that the metric exists
             contents = response.get('Contents', ())
             if not contents and not self._metric_exists_p(metric, version):
                 raise storage.MetricDoesNotExist(metric)
             for f in contents:
                 try:
                     if (self._version_check(f['Key'], version)):
                         meta = f['Key'].split('_')
                         keys[aggregation].add(
                             carbonara.SplitKey(
                                 utils.to_timestamp(meta[2]),
                                 sampling=aggregation.granularity))
                 except (ValueError, IndexError):
                     # Might be "none", or any other file. Be resilient.
                     continue
     return keys
Beispiel #27
0
 def _get_unaggregated_timeserie(self, metric, version=3):
     data = self._client.hget(self._metric_key(metric),
                              self._unaggregated_field(version))
     if data is None:
         raise storage.MetricDoesNotExist(metric)
     return data
Beispiel #28
0
 def _get_unaggregated_timeserie(self, metric, version=3):
     try:
         return self._get_object_content(
             self._build_unaggregated_timeserie_path(metric, version))
     except rados.ObjectNotFound:
         raise storage.MetricDoesNotExist(metric)
Beispiel #29
0
 def _get_unaggregated_timeserie(self, metric):
     try:
         return self._get_object_content("gnocchi_%s_none" % metric.id)
     except rados.ObjectNotFound:
         raise storage.MetricDoesNotExist(metric)