コード例 #1
0
ファイル: file.py プロジェクト: luo-zn/gnocchi
 def _list_split_keys_unbatched(self, metric, aggregations, version=3):
     keys = collections.defaultdict(set)
     for method, grouped_aggregations in itertools.groupby(
             sorted(aggregations, key=ATTRGETTER_METHOD),
             ATTRGETTER_METHOD):
         try:
             files = os.listdir(
                 self._build_metric_path(metric, method))
         except OSError as e:
             if e.errno == errno.ENOENT:
                 raise storage.MetricDoesNotExist(metric)
             raise
         raw_keys = list(map(
             lambda k: k.split("_"),
             filter(
                 lambda f: self._version_check(f, version),
                 files)))
         if not raw_keys:
             continue
         zipped = list(zip(*raw_keys))
         k_timestamps = utils.to_timestamps(zipped[0])
         k_granularities = list(map(utils.to_timespan, zipped[1]))
         grouped_aggregations = list(grouped_aggregations)
         for timestamp, granularity in six.moves.zip(
                 k_timestamps, k_granularities):
             for agg in grouped_aggregations:
                 if granularity == agg.granularity:
                     keys[agg].add(carbonara.SplitKey(
                         timestamp,
                         sampling=granularity))
                     break
     return keys
コード例 #2
0
 def _list_split_keys(self, metric, aggregations, version=3):
     key = self._metric_key(metric)
     pipe = self._client.pipeline(transaction=False)
     pipe.exists(key)
     for aggregation in aggregations:
         self._scripts["list_split_keys"](
             keys=[key], args=[self._aggregated_field_for_split(
                 aggregation.method, "*",
                 version, aggregation.granularity)],
             client=pipe,
         )
     results = pipe.execute()
     metric_exists_p = results.pop(0)
     if not metric_exists_p:
         raise storage.MetricDoesNotExist(metric)
     keys = {}
     for aggregation, k in six.moves.zip(aggregations, results):
         if not k:
             keys[aggregation] = set()
             continue
         timestamps, methods, granularities = list(zip(*k))
         timestamps = utils.to_timestamps(timestamps)
         granularities = map(utils.to_timespan, granularities)
         keys[aggregation] = {
             carbonara.SplitKey(timestamp,
                                sampling=granularity)
             for timestamp, granularity
             in six.moves.zip(timestamps, granularities)
         }
     return keys
コード例 #3
0
ファイル: file.py プロジェクト: thomasgoirand/gnocchi
 def _list_split_keys(self, metric, aggregations, version=3):
     keys = collections.defaultdict(set)
     for method, grouped_aggregations in itertools.groupby(
             sorted(aggregations, key=ATTRGETTER_METHOD),
             ATTRGETTER_METHOD):
         try:
             files = os.listdir(self._build_metric_path(metric, method))
         except OSError as e:
             if e.errno == errno.ENOENT:
                 raise storage.MetricDoesNotExist(metric)
             raise
         raw_keys = list(
             map(lambda k: k.split("_"),
                 filter(lambda f: self._version_check(f, version), files)))
         if not raw_keys:
             continue
         zipped = list(zip(*raw_keys))
         k_timestamps = utils.to_timestamps(zipped[0])
         k_granularities = list(map(utils.to_timespan, zipped[1]))
         grouped_aggregations = list(grouped_aggregations)
         for timestamp, granularity in six.moves.zip(
                 k_timestamps, k_granularities):
             for agg in grouped_aggregations:
                 if granularity == agg.granularity:
                     keys[agg].add(
                         carbonara.SplitKey(timestamp,
                                            sampling=granularity))
                     break
     return keys
コード例 #4
0
    def _list_split_keys(self, metric, aggregations, version=3):
        container = self._container_name(metric)
        try:
            headers, files = self.swift.get_container(container,
                                                      full_listing=True)
        except swclient.ClientException as e:
            if e.http_status == 404:
                raise storage.MetricDoesNotExist(metric)
            raise

        raw_keys = list(
            map(lambda k: k.split("_"),
                (f['name']
                 for f in files if self._version_check(f['name'], version)
                 and not f['name'].startswith('none'))))
        keys = collections.defaultdict(set)
        if not raw_keys:
            return keys
        zipped = list(zip(*raw_keys))
        k_timestamps = utils.to_timestamps(zipped[0])
        k_methods = zipped[1]
        k_granularities = list(map(utils.to_timespan, zipped[2]))

        for timestamp, method, granularity in six.moves.zip(
                k_timestamps, k_methods, k_granularities):
            for aggregation in aggregations:
                if (aggregation.method == method
                        and aggregation.granularity == granularity):
                    keys[aggregation].add(
                        carbonara.SplitKey(timestamp, sampling=granularity))
                    break
        return keys
コード例 #5
0
ファイル: swift.py プロジェクト: luo-zn/gnocchi
    def _list_split_keys_unbatched(self, metric, aggregations, version=3):
        container = self._container_name(metric)
        try:
            headers, files = self.swift.get_container(
                container, full_listing=True)
        except swclient.ClientException as e:
            if e.http_status == 404:
                raise storage.MetricDoesNotExist(metric)
            raise

        raw_keys = list(map(
            lambda k: k.split("_"),
            (f['name'] for f in files
             if self._version_check(f['name'], version)
             and not f['name'].startswith('none'))))
        keys = collections.defaultdict(set)
        if not raw_keys:
            return keys
        zipped = list(zip(*raw_keys))
        k_timestamps = utils.to_timestamps(zipped[0])
        k_methods = zipped[1]
        k_granularities = list(map(utils.to_timespan, zipped[2]))

        for timestamp, method, granularity in six.moves.zip(
                k_timestamps, k_methods, k_granularities):
            for aggregation in aggregations:
                if (aggregation.method == method
                   and aggregation.granularity == granularity):
                    keys[aggregation].add(carbonara.SplitKey(
                        timestamp,
                        sampling=granularity))
                    break
        return keys
コード例 #6
0
ファイル: redis.py プロジェクト: yungjinzhou/gnocchi
 def _list_split_keys(self, metrics_and_aggregations, version=3):
     pipe = self._client.pipeline(transaction=False)
     # Keep an ordered list of metrics
     metrics = list(metrics_and_aggregations.keys())
     for metric in metrics:
         key = self._metric_key(metric)
         pipe.exists(key)
         aggregations = metrics_and_aggregations[metric]
         for aggregation in aggregations:
             self._scripts["list_split_keys"](
                 keys=[key],
                 args=[
                     self._aggregated_field_for_split(
                         aggregation.method, "*", version,
                         aggregation.granularity)
                 ],
                 client=pipe,
             )
     results = pipe.execute()
     keys = collections.defaultdict(dict)
     start = 0
     for metric in metrics:
         metric_exists_p = results[start]
         if not metric_exists_p:
             raise storage.MetricDoesNotExist(metric)
         aggregations = metrics_and_aggregations[metric]
         number_of_aggregations = len(aggregations)
         keys_for_aggregations = results[start + 1:start + 1 +
                                         number_of_aggregations]
         start += 1 + number_of_aggregations  # 1 for metric_exists_p
         for aggregation, k in six.moves.zip(aggregations,
                                             keys_for_aggregations):
             if not k:
                 keys[metric][aggregation] = set()
                 continue
             timestamps, methods, granularities = list(zip(*k))
             timestamps = utils.to_timestamps(timestamps)
             granularities = map(utils.to_timespan, granularities)
             keys[metric][aggregation] = {
                 carbonara.SplitKey(timestamp, sampling=granularity)
                 for timestamp, granularity in six.moves.zip(
                     timestamps, granularities)
             }
     return keys
コード例 #7
0
ファイル: ceph.py プロジェクト: thomasgoirand/gnocchi
    def _list_split_keys(self, metric, aggregations, version=3):
        with rados.ReadOpCtx() as op:
            omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
            try:
                self.ioctx.operate_read_op(
                    op, self._build_unaggregated_timeserie_path(metric, 3))
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            # NOTE(sileht): after reading the libradospy, I'm
            # not sure that ret will have the correct value
            # get_omap_vals transforms the C int to python int
            # before operate_read_op is called, I dunno if the int
            # content is copied during this transformation or if
            # this is a pointer to the C int, I think it's copied...
            try:
                ceph.errno_to_exception(ret)
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            raw_keys = [
                name.split("_") for name, value in omaps
                if self._version_check(name, version)
            ]
            keys = collections.defaultdict(set)
            if not raw_keys:
                return keys
            zipped = list(zip(*raw_keys))
            k_timestamps = utils.to_timestamps(zipped[2])
            k_methods = zipped[3]
            k_granularities = list(map(utils.to_timespan, zipped[4]))

            for timestamp, method, granularity in six.moves.zip(
                    k_timestamps, k_methods, k_granularities):
                for aggregation in aggregations:
                    if (aggregation.method == method
                            and aggregation.granularity == granularity):
                        keys[aggregation].add(
                            carbonara.SplitKey(timestamp,
                                               sampling=granularity))
                        break
            return keys
コード例 #8
0
ファイル: ceph.py プロジェクト: luo-zn/gnocchi
    def _list_split_keys_unbatched(self, metric, aggregations, version=3):
        with rados.ReadOpCtx() as op:
            omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
            try:
                self.ioctx.operate_read_op(
                    op, self._build_unaggregated_timeserie_path(metric, 3))
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            # NOTE(sileht): after reading the libradospy, I'm
            # not sure that ret will have the correct value
            # get_omap_vals transforms the C int to python int
            # before operate_read_op is called, I dunno if the int
            # content is copied during this transformation or if
            # this is a pointer to the C int, I think it's copied...
            try:
                ceph.errno_to_exception(ret)
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            raw_keys = [name.split("_")
                        for name, value in omaps
                        if self._version_check(name, version)]
            keys = collections.defaultdict(set)
            if not raw_keys:
                return keys
            zipped = list(zip(*raw_keys))
            k_timestamps = utils.to_timestamps(zipped[2])
            k_methods = zipped[3]
            k_granularities = list(map(utils.to_timespan, zipped[4]))

            for timestamp, method, granularity in six.moves.zip(
                    k_timestamps, k_methods, k_granularities):
                for aggregation in aggregations:
                    if (aggregation.method == method
                       and aggregation.granularity == granularity):
                        keys[aggregation].add(carbonara.SplitKey(
                            timestamp,
                            sampling=granularity))
                        break
            return keys
コード例 #9
0
ファイル: redis.py プロジェクト: luo-zn/gnocchi
 def _list_split_keys(self, metrics_and_aggregations, version=3):
     pipe = self._client.pipeline(transaction=False)
     # Keep an ordered list of metrics
     metrics = list(metrics_and_aggregations.keys())
     for metric in metrics:
         key = self._metric_key(metric)
         pipe.exists(key)
         aggregations = metrics_and_aggregations[metric]
         for aggregation in aggregations:
             self._scripts["list_split_keys"](
                 keys=[key], args=[self._aggregated_field_for_split(
                     aggregation.method, "*",
                     version, aggregation.granularity)],
                 client=pipe,
             )
     results = pipe.execute()
     keys = collections.defaultdict(dict)
     start = 0
     for metric in metrics:
         metric_exists_p = results.pop(0)
         if not metric_exists_p:
             raise storage.MetricDoesNotExist(metric)
         aggregations = metrics_and_aggregations[metric]
         number_of_aggregations = len(aggregations)
         keys_for_aggregations = results[start:number_of_aggregations]
         start += number_of_aggregations
         for aggregation, k in six.moves.zip(
                 aggregations, keys_for_aggregations):
             if not k:
                 keys[metric][aggregation] = set()
                 continue
             timestamps, methods, granularities = list(zip(*k))
             timestamps = utils.to_timestamps(timestamps)
             granularities = map(utils.to_timespan, granularities)
             keys[metric][aggregation] = {
                 carbonara.SplitKey(timestamp,
                                    sampling=granularity)
                 for timestamp, granularity
                 in six.moves.zip(timestamps, granularities)
             }
     return keys
コード例 #10
0
ファイル: test_utils.py プロジェクト: yungjinzhou/gnocchi
 def test_to_timestamp_empty(self):
     self.assertEqual([], utils.to_timestamps([]))