Beispiel #1
0
    def _get_aggregate_functions(aggregate):
        if not aggregate:
            return [f for f in STANDARD_AGGREGATES.values()]

        functions = []

        for a in aggregate:
            if a.func in STANDARD_AGGREGATES:
                functions.append(STANDARD_AGGREGATES[a.func])
            elif a.func in UNPARAMETERIZED_AGGREGATES:
                functions.append(UNPARAMETERIZED_AGGREGATES[a.func])
            elif a.func in PARAMETERIZED_AGGREGATES['compute']:
                validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func)
                if not (validate and validate(a.param)):
                    raise storage.StorageBadAggregate('Bad aggregate: %s.%s' %
                                                      (a.func, a.param))
                compute = PARAMETERIZED_AGGREGATES['compute'][a.func]
                functions.append(compute(a.param))
            else:
                # NOTE(zqfan): We already have checked at API level, but
                # still leave it here in case of directly storage calls.
                msg = _('Invalid aggregation function: %s') % a.func
                raise storage.StorageBadAggregate(msg)

        return functions
Beispiel #2
0
    def _aggregate_param(self, fragment_key, aggregate):
        fragment_map = self.STANDARD_AGGREGATES[fragment_key]

        if not aggregate:
            return ''.join([f for f in fragment_map.values()])

        fragments = ''

        for a in aggregate:
            if a.func in self.STANDARD_AGGREGATES[fragment_key]:
                fragment_map = self.STANDARD_AGGREGATES[fragment_key]
                fragments += fragment_map[a.func]
            elif a.func in self.UNPARAMETERIZED_AGGREGATES[fragment_key]:
                fragment_map = self.UNPARAMETERIZED_AGGREGATES[fragment_key]
                fragments += fragment_map[a.func]
            elif a.func in self.PARAMETERIZED_AGGREGATES[fragment_key]:
                fragment_map = self.PARAMETERIZED_AGGREGATES[fragment_key]
                v = self.PARAMETERIZED_AGGREGATES['validate'].get(a.func)
                if not (v and v(a.param)):
                    raise storage.StorageBadAggregate('Bad aggregate: %s.%s'
                                                      % (a.func, a.param))
                params = dict(aggregate_param=a.param)
                fragments += (fragment_map[a.func] % params)
            else:
                raise NotImplementedError('Selectable aggregate function %s'
                                          ' is not supported' % a.func)

        return fragments
Beispiel #3
0
 def _compile_aggregate_stages(self, aggregate, group_stage, project_stage):
     if not aggregate:
         for aggregation in Connection.STANDARD_AGGREGATES.values():
             group_stage.update(
                 aggregation.group(version_array=self.version))
             project_stage.update(
                 aggregation.project(version_array=self.version))
     else:
         for description in aggregate:
             aggregation = Connection.AGGREGATES.get(description.func)
             if aggregation:
                 if not aggregation.validate(description.param):
                     raise storage.StorageBadAggregate(
                         'Bad aggregate: %s.%s' %
                         (description.func, description.param))
                 group_stage.update(
                     aggregation.group(description.param,
                                       version_array=self.version))
                 project_stage.update(
                     aggregation.project(description.param,
                                         version_array=self.version))
Beispiel #4
0
    def _get_aggregate_functions(aggregate):
        if not aggregate:
            return [f for f in STANDARD_AGGREGATES.values()]

        functions = []

        for a in aggregate:
            if a.func in STANDARD_AGGREGATES:
                functions.append(STANDARD_AGGREGATES[a.func])
            elif a.func in UNPARAMETERIZED_AGGREGATES:
                functions.append(UNPARAMETERIZED_AGGREGATES[a.func])
            elif a.func in PARAMETERIZED_AGGREGATES['compute']:
                validate = PARAMETERIZED_AGGREGATES['validate'].get(a.func)
                if not (validate and validate(a.param)):
                    raise storage.StorageBadAggregate('Bad aggregate: %s.%s' %
                                                      (a.func, a.param))
                compute = PARAMETERIZED_AGGREGATES['compute'][a.func]
                functions.append(compute(a.param))
            else:
                raise NotImplementedError('Selectable aggregate function %s'
                                          ' is not supported' % a.func)

        return functions
Beispiel #5
0
    def get_meter_statistics(self,
                             sample_filter,
                             period=None,
                             groupby=None,
                             aggregate=None):
        """Return an iterable of models.Statistics instance.

        Items are containing meter statistics described by the query
        parameters. The filter must have a meter value set.
        """
        # NOTE(zqfan): We already have checked at API level, but
        # still leave it here in case of directly storage calls.
        if aggregate:
            for a in aggregate:
                if a.func not in self.AGGREGATES:
                    msg = _('Invalid aggregation function: %s') % a.func
                    raise storage.StorageBadAggregate(msg)

        if (groupby and set(groupby) - set([
                'user_id', 'project_id', 'resource_id', 'source',
                'resource_metadata.instance_type'
        ])):
            raise ceilometer.NotImplementedError(
                "Unable to group by these fields")
        q = pymongo_utils.make_query_from_filter(sample_filter)

        group_stage = {}
        project_stage = {
            "unit": "$_id.unit",
            "name": "$_id.name",
            "first_timestamp": "$first_timestamp",
            "last_timestamp": "$last_timestamp",
            "period_start": "$_id.period_start",
        }

        # Add timestamps to $group stage
        group_stage.update({
            "first_timestamp": {
                "$min": "$timestamp"
            },
            "last_timestamp": {
                "$max": "$timestamp"
            }
        })

        # Define a _id field for grouped documents
        unique_group_field = {"name": "$counter_name", "unit": "$counter_unit"}

        # Define a first timestamp for periods
        if sample_filter.start_timestamp:
            first_timestamp = sample_filter.start_timestamp
        else:
            first_timestamp_cursor = self.db.meter.find(limit=1,
                                                        sort=[
                                                            ('timestamp',
                                                             pymongo.ASCENDING)
                                                        ])
            if first_timestamp_cursor.count():
                first_timestamp = first_timestamp_cursor[0]['timestamp']
            else:
                first_timestamp = utils.EPOCH_TIME

        # Add a start_period field to unique identifier of grouped documents
        if period:
            period_dict = self._make_period_dict(period, first_timestamp)
            unique_group_field.update(period_dict)

        # Add a groupby fields to unique identifier of grouped documents
        if groupby:
            unique_group_field.update(
                dict((field.replace(".", "/"), "$%s" % field)
                     for field in groupby))

        group_stage.update({"_id": unique_group_field})

        self._compile_aggregate_stages(aggregate, group_stage, project_stage)

        # Aggregation stages list. It's work one by one and uses documents
        # from previous stages.
        aggregation_query = [{
            '$match': q
        }, {
            "$sort": {
                "timestamp": 1
            }
        }, {
            "$group": group_stage
        }, {
            "$sort": {
                "_id.period_start": 1
            }
        }, {
            "$project": project_stage
        }]

        # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0
        results = self.db.meter.aggregate(aggregation_query,
                                          **self._make_aggregation_params())
        return [
            self._stats_result_to_model(point, groupby, aggregate, period,
                                        first_timestamp)
            for point in self._get_results(results)
        ]