示例#1
0
 def serialize(self):
     return {
         'timespan': None
         if self.timespan is None
         else float(utils.timespan_total_seconds(self.timespan)),
         'granularity': float(
             utils.timespan_total_seconds(self.granularity)),
         'points': self.points,
     }
示例#2
0
 def serialize(self):
     return {
         'timespan': None
         if self.timespan is None
         else float(utils.timespan_total_seconds(self.timespan)),
         'granularity': float(
             utils.timespan_total_seconds(self.granularity)),
         'points': self.points,
     }
示例#3
0
 def jsonify(self):
     """Return a dict representation with human readable values."""
     return {
         'timespan': six.text_type(
             datetime.timedelta(
                 seconds=utils.timespan_total_seconds(
                     self.timespan)))
         if self.timespan is not None
         else None,
         'granularity': six.text_type(
             datetime.timedelta(
                 seconds=utils.timespan_total_seconds(
                     self.granularity))),
         'points': self.points,
     }
示例#4
0
 def jsonify(self):
     """Return a dict representation with human readable values."""
     return {
         'timespan': six.text_type(
             datetime.timedelta(
                 seconds=utils.timespan_total_seconds(
                     self.timespan)))
         if self.timespan is not None
         else None,
         'granularity': six.text_type(
             datetime.timedelta(
                 seconds=utils.timespan_total_seconds(
                     self.granularity))),
         'points': self.points,
     }
示例#5
0
文件: s3.py 项目: zaitcev/gnocchi
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     bucket = self._bucket_name
     keys = set()
     response = {}
     while response.get('IsTruncated', True):
         if 'NextContinuationToken' in response:
             kwargs = {
                 'ContinuationToken': response['NextContinuationToken']
             }
         else:
             kwargs = {}
         try:
             response = self.s3.list_objects_v2(
                 Bucket=bucket,
                 Prefix=self._prefix(metric) + '%s_%s' % (
                     aggregation,
                     utils.timespan_total_seconds(granularity),
                 ),
                 **kwargs)
         except botocore.exceptions.ClientError as e:
             if e.response['Error'].get('Code') == "NoSuchKey":
                 raise storage.MetricDoesNotExist(metric)
             raise
         for f in response.get('Contents', ()):
             try:
                 meta = f['Key'].split('_')
                 if (self._version_check(f['Key'], version)):
                     keys.add(meta[2])
             except (ValueError, IndexError):
                 # Might be "none", or any other file. Be resilient.
                 continue
     return keys
示例#6
0
    def __init__(self, name, back_window, definition,
                 aggregation_methods=None):
        self.name = name
        self.back_window = back_window
        self.definition = []
        for d in definition:
            if isinstance(d, ArchivePolicyItem):
                self.definition.append(d)
            elif isinstance(d, dict):
                self.definition.append(ArchivePolicyItem(**d))
            elif len(d) == 2:
                self.definition.append(
                    ArchivePolicyItem(points=d[0], granularity=d[1]))
            else:
                raise ValueError(
                    "Unable to understand policy definition %s" % d)

        duplicate_granularities = [
            granularity
            for granularity, count in collections.Counter(
                d.granularity for d in self.definition).items()
            if count > 1
        ]
        if duplicate_granularities:
            raise ValueError(
                "More than one archive policy "
                "uses granularity `%s'"
                % utils.timespan_total_seconds(duplicate_granularities[0])
            )

        if aggregation_methods is None:
            self.aggregation_methods = self.DEFAULT_AGGREGATION_METHODS
        else:
            self.aggregation_methods = aggregation_methods
示例#7
0
文件: s3.py 项目: luo-zn/gnocchi
 def _object_name(split_key, aggregation, version=3):
     name = '%s_%s_%s' % (
         aggregation,
         utils.timespan_total_seconds(split_key.sampling),
         split_key,
     )
     return name + '_v%s' % version if version else name
示例#8
0
文件: swift.py 项目: sum12/gnocchi
 def _object_name(split_key, aggregation, version=3):
     name = '%s_%s_%s' % (
         split_key,
         aggregation,
         utils.timespan_total_seconds(split_key.sampling),
     )
     return name + '_v%s' % version if version else name
示例#9
0
    def __init__(self, name, back_window, definition,
                 aggregation_methods=None):
        self.name = name
        self.back_window = back_window
        self.definition = []
        for d in definition:
            if isinstance(d, ArchivePolicyItem):
                self.definition.append(d)
            elif isinstance(d, dict):
                self.definition.append(ArchivePolicyItem(**d))
            elif len(d) == 2:
                self.definition.append(
                    ArchivePolicyItem(points=d[0], granularity=d[1]))
            else:
                raise ValueError(
                    "Unable to understand policy definition %s" % d)

        duplicate_granularities = [
            granularity
            for granularity, count in collections.Counter(
                d.granularity for d in self.definition).items()
            if count > 1
        ]
        if duplicate_granularities:
            raise ValueError(
                "More than one archive policy "
                "uses granularity `%s'"
                % utils.timespan_total_seconds(duplicate_granularities[0])
            )

        if aggregation_methods is None:
            self.aggregation_methods = self.DEFAULT_AGGREGATION_METHODS
        else:
            self.aggregation_methods = set(aggregation_methods)
示例#10
0
文件: ceph.py 项目: Asu4ni/gnocchi
    def _list_split_keys(self, metric, aggregation, granularity, version=3):
        with rados.ReadOpCtx() as op:
            omaps, ret = self.ioctx.get_omap_vals(op, "", "", -1)
            try:
                self.ioctx.operate_read_op(
                    op, self._build_unaggregated_timeserie_path(metric, 3))
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            # NOTE(sileht): after reading the libradospy, I'm
            # not sure that ret will have the correct value
            # get_omap_vals transforms the C int to python int
            # before operate_read_op is called, I dunno if the int
            # content is copied during this transformation or if
            # this is a pointer to the C int, I think it's copied...
            try:
                ceph.errno_to_exception(ret)
            except rados.ObjectNotFound:
                raise storage.MetricDoesNotExist(metric)

            keys = set()
            granularity = str(utils.timespan_total_seconds(granularity))
            for name, value in omaps:
                meta = name.split('_')
                if (aggregation == meta[3] and granularity == meta[4]
                        and self._version_check(name, version)):
                    keys.add(meta[2])
            return keys
示例#11
0
文件: file.py 项目: luo-zn/gnocchi
 def _build_metric_path_for_split(self, metric, aggregation,
                                  key, version=3):
     path = os.path.join(
         self._build_metric_path(metric, aggregation),
         str(key)
         + "_"
         + str(utils.timespan_total_seconds(key.sampling)))
     return path + '_v%s' % version if version else path
示例#12
0
 def __init__(self, metric, method, granularity):
     self.metric = metric
     self.method = method
     self.granularity = granularity
     super(AggregationDoesNotExist, self).__init__(
         "Aggregation method '%s' at granularity '%s' "
         "for metric %s does not exist" %
         (method, utils.timespan_total_seconds(granularity), metric))
示例#13
0
文件: file.py 项目: yjLiInSky/gnocchi
 def _build_metric_path_for_split(self, metric, aggregation,
                                  key, version=3):
     path = os.path.join(
         self._build_metric_path(metric, aggregation),
         str(key)
         + "_"
         + str(utils.timespan_total_seconds(key.sampling)))
     return path + '_v%s' % version if version else path
示例#14
0
 def jsonify(self):
     return {
         "cause": "Aggregation does not exist",
         "detail": {
             # FIXME(jd) Pecan does not use our JSON renderer for errors
             # So we need to convert this
             "granularity": utils.timespan_total_seconds(self.granularity),
             "aggregation_method": self.method,
         },
     }
示例#15
0
 def _aggregated_field_for_split(cls,
                                 aggregation,
                                 key,
                                 version=3,
                                 granularity=None):
     path = cls.FIELD_SEP.join([
         str(key), aggregation,
         str(utils.timespan_total_seconds(granularity or key.sampling))
     ])
     return path + '_v%s' % version if version else path
示例#16
0
    def retrieve_data(storage_obj, metric, start, stop, window):
        """Retrieves finest-res data available from storage."""
        window_seconds = utils.timespan_total_seconds(window)
        try:
            min_grain = min(
                ap.granularity for ap in metric.archive_policy.definition
                if (window_seconds %
                    utils.timespan_total_seconds(ap.granularity) == 0))
        except ValueError:
            msg = ("No data available that is either full-res or "
                   "of a granularity that factors into the window size "
                   "you specified.")
            raise aggregates.CustomAggFailure(msg)

        data = list(
            zip(*storage_obj.get_measures(
                metric, start, stop, granularity=min_grain)))

        return (min_grain,
                pandas.Series(data[2], data[0]) if data else pandas.Series())
示例#17
0
文件: file.py 项目: yjLiInSky/gnocchi
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     try:
         files = os.listdir(self._build_metric_path(metric, aggregation))
     except OSError as e:
         if e.errno == errno.ENOENT:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = set()
     granularity = str(utils.timespan_total_seconds(granularity))
     for f in files:
         meta = f.split("_")
         if meta[1] == granularity and self._version_check(f, version):
             keys.add(meta[0])
     return keys
示例#18
0
文件: swift.py 项目: sum12/gnocchi
 def _list_split_keys(self, metric, aggregation, granularity, version=3):
     container = self._container_name(metric)
     try:
         headers, files = self.swift.get_container(container,
                                                   full_listing=True)
     except swclient.ClientException as e:
         if e.http_status == 404:
             raise storage.MetricDoesNotExist(metric)
         raise
     keys = set()
     granularity = str(utils.timespan_total_seconds(granularity))
     for f in files:
         try:
             meta = f['name'].split('_')
             if (aggregation == meta[1] and granularity == meta[2]
                     and self._version_check(f['name'], version)):
                 keys.add(meta[0])
         except (ValueError, IndexError):
             # Might be "none", or any other file. Be resilient.
             continue
     return keys
示例#19
0
文件: s3.py 项目: luo-zn/gnocchi
 def _list_split_keys_unbatched(self, metric, aggregations, version=3):
     bucket = self._bucket_name
     keys = {}
     for aggregation in aggregations:
         keys[aggregation] = set()
         response = {}
         while response.get('IsTruncated', True):
             if 'NextContinuationToken' in response:
                 kwargs = {
                     'ContinuationToken': response['NextContinuationToken']
                 }
             else:
                 kwargs = {}
             response = self.s3.list_objects_v2(
                 Bucket=bucket,
                 Prefix=self._prefix(metric) + '%s_%s' % (
                     aggregation.method,
                     utils.timespan_total_seconds(
                         aggregation.granularity),
                 ),
                 **kwargs)
             # If response is empty then check that the metric exists
             contents = response.get('Contents', ())
             if not contents and not self._metric_exists_p(metric, version):
                 raise storage.MetricDoesNotExist(metric)
             for f in contents:
                 try:
                     if (self._version_check(f['Key'], version)):
                         meta = f['Key'].split('_')
                         keys[aggregation].add(carbonara.SplitKey(
                             utils.to_timestamp(meta[2]),
                             sampling=aggregation.granularity))
                 except (ValueError, IndexError):
                     # Might be "none", or any other file. Be resilient.
                     continue
     return keys
示例#20
0
文件: s3.py 项目: yungjinzhou/gnocchi
 def _list_split_keys_unbatched(self, metric, aggregations, version=3):
     bucket = self._bucket_name
     keys = {}
     for aggregation in aggregations:
         keys[aggregation] = set()
         response = {}
         while response.get('IsTruncated', True):
             if 'NextContinuationToken' in response:
                 kwargs = {
                     'ContinuationToken': response['NextContinuationToken']
                 }
             else:
                 kwargs = {}
             response = self.s3.list_objects_v2(
                 Bucket=bucket,
                 Prefix=self._prefix(metric) + '%s_%s' % (
                     aggregation.method,
                     utils.timespan_total_seconds(aggregation.granularity),
                 ),
                 **kwargs)
             # If response is empty then check that the metric exists
             contents = response.get('Contents', ())
             if not contents and not self._metric_exists_p(metric, version):
                 raise storage.MetricDoesNotExist(metric)
             for f in contents:
                 try:
                     if (self._version_check(f['Key'], version)):
                         meta = f['Key'].split('_')
                         keys[aggregation].add(
                             carbonara.SplitKey(
                                 utils.to_timestamp(meta[2]),
                                 sampling=aggregation.granularity))
                 except (ValueError, IndexError):
                     # Might be "none", or any other file. Be resilient.
                     continue
     return keys
示例#21
0
 def __init__(self, metric, granularity):
     self.metric = metric
     self.granularity = granularity
     super(GranularityDoesNotExist, self).__init__(
         "Granularity '%s' for metric %s does not exist" %
         (utils.timespan_total_seconds(granularity), metric))
示例#22
0
文件: redis.py 项目: luo-zn/gnocchi
 def _aggregated_field_for_split(cls, aggregation, key, version=3,
                                 granularity=None):
     path = cls.FIELD_SEP.join([
         str(key), aggregation,
         str(utils.timespan_total_seconds(granularity or key.sampling))])
     return path + '_v%s' % version if version else path
示例#23
0
 def _get_object_name(metric, key, aggregation, version=3):
     name = str(
         "gnocchi_%s_%s_%s_%s" %
         (metric.id, key, aggregation,
          utils.timespan_total_seconds(key.sampling)), )
     return name + '_v%s' % version if version else name
示例#24
0
def get_measures(storage,
                 metrics_and_aggregations,
                 operations,
                 from_timestamp=None,
                 to_timestamp=None,
                 granularity=None,
                 needed_overlap=100.0,
                 fill=None,
                 ref_identifier="id"):
    """Get aggregated measures of multiple entities.

    :param storage: The storage driver.
    :param metrics_and_aggregations: List of metric+agg_method tuple
                                     measured to aggregate.
    :param from timestamp: The timestamp to get the measure from.
    :param to timestamp: The timestamp to get the measure to.
    :param granularity: The granularity to retrieve.
    :param fill: The value to use to fill in missing data in series.
    """

    references_with_missing_granularity = []
    for (metric, aggregation) in metrics_and_aggregations:
        if aggregation not in metric.archive_policy.aggregation_methods:
            raise gnocchi_storage.AggregationDoesNotExist(metric, aggregation)
        if granularity is not None:
            for d in metric.archive_policy.definition:
                if d.granularity == granularity:
                    break
            else:
                references_with_missing_granularity.append(
                    (getattr(metric, ref_identifier), aggregation))

    if references_with_missing_granularity:
        raise exceptions.UnAggregableTimeseries(
            references_with_missing_granularity,
            "granularity '%d' is missing" %
            utils.timespan_total_seconds(granularity))

    if granularity is None:
        granularities = (definition.granularity
                         for (metric, aggregation) in metrics_and_aggregations
                         for definition in metric.archive_policy.definition)
        granularities_in_common = [
            g for g, occurrence in six.iteritems(
                collections.Counter(granularities))
            if occurrence == len(metrics_and_aggregations)
        ]

        if not granularities_in_common:
            raise exceptions.UnAggregableTimeseries(
                list((str(getattr(m, ref_identifier)), a)
                     for (m, a) in metrics_and_aggregations),
                'No granularity match')
    else:
        granularities_in_common = [granularity]

    tss = utils.parallel_map(
        _get_measures_timeserie,
        [(storage, metric, aggregation, ref_identifier, g, from_timestamp,
          to_timestamp) for (metric, aggregation) in metrics_and_aggregations
         for g in granularities_in_common])

    return aggregated(tss, operations, from_timestamp, to_timestamp,
                      needed_overlap, fill)
示例#25
0
文件: ceph.py 项目: luo-zn/gnocchi
 def _get_object_name(metric, key, aggregation, version=3):
     name = str("gnocchi_%s_%s_%s_%s" % (
         metric.id, key, aggregation,
         utils.timespan_total_seconds(key.sampling)),
     )
     return name + '_v%s' % version if version else name