예제 #1
0
    def get_volume(self, start, end=None, project_id=None, q_filter=None):
        active_volume_stats = self.resources_stats('volume.size', start, end,
                                                   project_id, q_filter)
        volume_data = []
        for volume_stats in active_volume_stats:
            volume_id = volume_stats.groupby['resource_id']
            if not self._cacher.has_resource_detail('volume', volume_id):
                raw_resource = self._conn.resources.get(volume_id)
                volume = self.t_ceilometer.strip_resource_data(
                    'volume', raw_resource)
                self._cacher.add_resource_detail('volume', volume_id, volume)
            volume = self._cacher.get_resource_detail('volume', volume_id)

            try:
                volume_data.append(
                    self.t_cloudkitty.format_item(
                        volume,
                        METRICS_CONF['services_units']['volume'],
                        volume_stats.max,
                    ))
            # NOTE(mc): deprecated except part kept for backward compatibility.
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated oslo config method.')
                volume_data.append(
                    self.t_cloudkitty.format_item(
                        volume,
                        self.units_mappings['volume'],
                        volume_stats.max,
                    ))

        if not volume_data:
            raise collector.NoDataCollected(self.collector_name, 'volume')
        return self.t_cloudkitty.format_service('volume', volume_data)
예제 #2
0
 def get_volume(self, start, end=None, project_id=None, q_filter=None):
     active_volume_stats = self.resources_stats('volume.size',
                                                start,
                                                end,
                                                project_id,
                                                q_filter)
     volume_data = []
     for volume_stats in active_volume_stats:
         volume_id = volume_stats.groupby['resource_id']
         if not self._cacher.has_resource_detail('volume',
                                                 volume_id):
             raw_resource = self._conn.resources.get(volume_id)
             volume = self.t_ceilometer.strip_resource_data('volume',
                                                            raw_resource)
             self._cacher.add_resource_detail('volume',
                                              volume_id,
                                              volume)
         volume = self._cacher.get_resource_detail('volume',
                                                   volume_id)
         volume_data.append(self.t_cloudkitty.format_item(volume,
                                                          'GB',
                                                          volume_stats.max))
     if not volume_data:
         raise collector.NoDataCollected(self.collector_name, 'volume')
     return self.t_cloudkitty.format_service('volume', volume_data)
예제 #3
0
    def run(self):
        while True:
            timestamp = self.check_state()
            if not timestamp:
                break

            for service in CONF.collect.services:
                try:
                    try:
                        data = self._collect(service, timestamp)
                    except collector.NoDataCollected:
                        raise
                    except Exception as e:
                        LOG.warn('Error while collecting service {service}:'
                                 ' {error}'.format(service=service,
                                                   error=six.text_type(e)))
                        raise collector.NoDataCollected('', service)
                except collector.NoDataCollected:
                    begin = timestamp
                    end = begin + self._period
                    for processor in self._processors:
                        processor.obj.nodata(begin, end)
                    self._storage.nodata(begin, end, self._tenant_id)
                else:
                    # Rating
                    for processor in self._processors:
                        processor.obj.process(data)
                    # Writing
                    self._storage.append(data, self._tenant_id)

            # We're getting a full period so we directly commit
            self._storage.commit(self._tenant_id)
예제 #4
0
    def run(self):
        while True:
            timestamp = self.check_state()
            if not timestamp:
                break

            for service in METRICS_CONF['services']:
                try:
                    try:
                        data = self._collect(service, timestamp)
                    except collector.NoDataCollected:
                        raise
                    except Exception as e:
                        LOG.warning(
                            'Error while collecting service '
                            '%(service)s: %(error)s',
                            {'service': service, 'error': e})
                        raise collector.NoDataCollected('', service)
                except collector.NoDataCollected:
                    begin = timestamp
                    end = begin + self._period
                    for processor in self._processors:
                        processor.obj.nodata(begin, end)
                    self._storage.nodata(begin, end, self._tenant_id)
                else:
                    # Rating
                    for processor in self._processors:
                        processor.obj.process(data)
                    # Writing
                    self._storage.append(data, self._tenant_id)

            # We're getting a full period so we directly commit
            self._storage.commit(self._tenant_id)
예제 #5
0
 def get_network_floating(self,
                          start,
                          end=None,
                          project_id=None,
                          q_filter=None):
     active_floating_ids = self.active_resources('ip.floating', start, end,
                                                 project_id, q_filter)
     floating_data = []
     for floating_id in active_floating_ids:
         if not self._cacher.has_resource_detail('network.floating',
                                                 floating_id):
             raw_resource = self._conn.resources.get(floating_id)
             floating = self.t_ceilometer.strip_resource_data(
                 'network.floating', raw_resource)
             self._cacher.add_resource_detail('network.floating',
                                              floating_id, floating)
         floating = self._cacher.get_resource_detail(
             'network.floating', floating_id)
         floating_data.append(
             self.t_cloudkitty.format_item(floating, 'ip', 1))
     if not floating_data:
         raise collector.NoDataCollected(self.collector_name,
                                         'network.floating')
     return self.t_cloudkitty.format_service('network.floating',
                                             floating_data)
예제 #6
0
    def get_compute(self, start, end=None, project_id=None, q_filter=None):
        active_instance_ids = self.active_resources('cpu', start, end,
                                                    project_id, q_filter)
        compute_data = []
        for instance_id in active_instance_ids:
            if not self._cacher.has_resource_detail('compute', instance_id):
                raw_resource = self._conn.resources.get(instance_id)
                instance = self.t_ceilometer.strip_resource_data(
                    'compute', raw_resource)
                self._cacher.add_resource_detail('compute', instance_id,
                                                 instance)
            instance = self._cacher.get_resource_detail('compute', instance_id)

            try:
                compute_data.append(
                    self.t_cloudkitty.format_item(
                        instance,
                        METRICS_CONF['services_units']['compute'],
                        1,
                    ))
            # NOTE(mc): deprecated except part kept for backward compatibility.
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated oslo config method.')
                compute_data.append(
                    self.t_cloudkitty.format_item(
                        instance,
                        self.units_mappings['compute'],
                        1,
                    ))
        if not compute_data:
            raise collector.NoDataCollected(self.collector_name, 'compute')
        return self.t_cloudkitty.format_service('compute', compute_data)
예제 #7
0
 def _get_network_bw(self,
                     direction,
                     start,
                     end=None,
                     project_id=None,
                     q_filter=None):
     if direction == 'in':
         resource_type = 'network.incoming.bytes'
     else:
         direction = 'out'
         resource_type = 'network.outgoing.bytes'
     active_tap_stats = self.resources_stats(resource_type, start, end,
                                             project_id, q_filter)
     bw_data = []
     for tap_stat in active_tap_stats:
         tap_id = tap_stat.groupby['resource_id']
         if not self._cacher.has_resource_detail('network.tap', tap_id):
             raw_resource = self._conn.resources.get(tap_id)
             tap = self.t_ceilometer.strip_resource_data(
                 'network.tap', raw_resource)
             self._cacher.add_resource_detail('network.tap', tap_id, tap)
         tap = self._cacher.get_resource_detail('network.tap', tap_id)
         tap_bw_mb = tap_stat.max / 1048576.0
         bw_data.append(self.t_cloudkitty.format_item(tap, 'MB', tap_bw_mb))
     ck_res_name = 'network.bw.{}'.format(direction)
     if not bw_data:
         raise collector.NoDataCollected(self.collector_name, ck_res_name)
     return self.t_cloudkitty.format_service(ck_res_name, bw_data)
예제 #8
0
    def get_instance_addon(self,
                           start,
                           end=None,
                           project_id=None,
                           q_filter=None):
        active_instance_ids = self.active_resources('instance', start, end,
                                                    project_id, q_filter)

        instance_addon_data = []
        for instance_id in active_instance_ids:
            if not self._cacher.has_resource_detail('instance.addon',
                                                    instance_id):
                raw_resource = self._conn.resources.get(instance_id)

                instance_addon = self.t_ceilometer.strip_resource_data(
                    'instance.addon', raw_resource)
                self._cacher.add_resource_detail('instance.addon', instance_id,
                                                 instance_addon)
            instance_addon = self._cacher.get_resource_detail(
                'instance.addon', instance_id)

            instance_addon_data.append(
                self.t_cloudkitty.format_item(instance_addon, 'instance.addon',
                                              1))

        if not instance_addon_data:
            raise collector.NoDataCollected(self.collector_name,
                                            'instance.addon')
        return self.t_cloudkitty.format_service('instance.addon',
                                                instance_addon_data)
예제 #9
0
 def retrieve(self, resource_name, start, end, project_id, q_filter=None):
     resources = self.resource_info(resource_name, start, end,
                                    project_id=project_id,
                                    q_filter=q_filter)
     if not resources:
         raise collector.NoDataCollected(self.collector_name, resource_name)
     return self.t_cloudkitty.format_service(resource_name, resources)
예제 #10
0
    def run(self):
        while True:
            timestamp = self.check_state()
            if not timestamp:
                break

            metrics = list(self.conf['metrics'].keys())

            for metric in metrics:
                try:
                    try:
                        data = self._collect(metric, timestamp)
                    except collector.NoDataCollected:
                        raise
                    except Exception as e:
                        LOG.warning(
                            'Error while collecting metric '
                            '%(metric)s: %(error)s',
                            {'metric': metric, 'error': e})
                        raise collector.NoDataCollected('', metric)
                except collector.NoDataCollected:
                    begin = timestamp
                    end = begin + self._period
                    for processor in self._processors:
                        processor.obj.nodata(begin, end)
                    self._storage.nodata(begin, end, self._tenant_id)
                else:
                    # Rating
                    for processor in self._processors:
                        processor.obj.process(data)
                    # Writing
                    self._storage.append(data, self._tenant_id)

            # We're getting a full period so we directly commit
            self._storage.commit(self._tenant_id)
예제 #11
0
    def _get_network_bw(self,
                        direction,
                        start,
                        end=None,
                        project_id=None,
                        q_filter=None):
        if direction == 'in':
            resource_type = 'network.incoming.bytes'
        else:
            direction = 'out'
            resource_type = 'network.outgoing.bytes'
        active_tap_stats = self.resources_stats(resource_type, start, end,
                                                project_id, q_filter)
        bw_data = []
        for tap_stat in active_tap_stats:
            tap_id = tap_stat.groupby['resource_id']
            if not self._cacher.has_resource_detail('network.tap', tap_id):
                raw_resource = self._conn.resources.get(tap_id)
                tap = self.t_ceilometer.strip_resource_data(
                    'network.tap', raw_resource)
                self._cacher.add_resource_detail('network.tap', tap_id, tap)
            tap = self._cacher.get_resource_detail('network.tap', tap_id)

            # Unit conversion
            try:
                conv = METRICS_CONF['metrics_units']['network.bw.' + direction]
                tap_bw_mb = ck_utils.convert_unit(
                    decimal.Decimal(tap_stat.max),
                    conv[resource_type].get('factor', 1),
                    conv[resource_type].get('offset', 0),
                )
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated hardcoded method.')
                tap_bw_mb = decimal.Decimal(tap_stat.max) / units.M

            try:
                met = METRICS_CONF['metrics_units']['network.bw.' + direction]
                bw_data.append(
                    self.t_cloudkitty.format_item(
                        tap,
                        list(met.values())[0]['unit'],
                        tap_bw_mb,
                    ))
            # NOTE(mc): deprecated except part kept for backward compatibility.
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated oslo config method.')
                bw_data.append(
                    self.t_cloudkitty.format_item(
                        tap,
                        self.units_mappings['network.bw.' + direction],
                        tap_bw_mb,
                    ))

        ck_res_name = 'network.bw.{}'.format(direction)
        if not bw_data:
            raise collector.NoDataCollected(self.collector_name, ck_res_name)
        return self.t_cloudkitty.format_service(ck_res_name, bw_data)
예제 #12
0
 def test_do_collection_some_empty(self):
     metrics = ['metric{}'.format(i) for i in range(7)]
     side_effect = [(
         metrics[i],
         {'period': {'begin': 0,
                     'end': 3600},
          'usage': i},
     ) for i in range(5)]
     side_effect.insert(2, collector.NoDataCollected('a', 'b'))
     side_effect.insert(4, collector.NoDataCollected('a', 'b'))
     self.worker._collect.side_effect = side_effect
     output = sorted(self.worker._do_collection(metrics, 0).items(),
                     key=lambda x: x[1]['usage'])
     self.assertEqual([
         i for i in side_effect
         if not isinstance(i, collector.NoDataCollected)
     ], output)
예제 #13
0
    def _collect(self, metric, start_timestamp):
        next_timestamp = tzutils.add_delta(
            start_timestamp, timedelta(seconds=self._period))

        name, data = self._collector.retrieve(
            metric,
            start_timestamp,
            next_timestamp,
            self._tenant_id
        )
        if not data:
            raise collector.NoDataCollected(self._collector, metric)

        return name, data
예제 #14
0
 def generic_retrieve(self,
                      resource_name,
                      start,
                      end=None,
                      project_id=None,
                      q_filter=None):
     resources = self.resource_info(resource_name, start, end, project_id,
                                    q_filter)
     if not resources:
         raise collector.NoDataCollected(self.collector_name, resource_name)
     for resource in resources:
         # NOTE(sheeprine): Reference to gnocchi resource used by storage
         resource['resource_id'] = resource['desc']['resource_id']
     return self.t_cloudkitty.format_service(resource_name, resources)
예제 #15
0
    def get_image(self, start, end=None, project_id=None, q_filter=None):
        active_image_stats = self.resources_stats('image.size', start, end,
                                                  project_id, q_filter)
        image_data = []
        for image_stats in active_image_stats:
            image_id = image_stats.groupby['resource_id']
            if not self._cacher.has_resource_detail('image', image_id):
                raw_resource = self._conn.resources.get(image_id)
                image = self.t_ceilometer.strip_resource_data(
                    'image', raw_resource)
                self._cacher.add_resource_detail('image', image_id, image)
            image = self._cacher.get_resource_detail('image', image_id)

            # Unit conversion
            try:
                conv_data = METRICS_CONF['metrics_units']['image']
                image_size_mb = ck_utils.convert_unit(
                    decimal.Decimal(image_stats.max),
                    conv_data['image.size'].get('factor', 1),
                    conv_data['image.size'].get('offset', 0),
                )
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated hardcoded method.')
                image_size_mb = decimal.Decimal(image_stats.max) / units.Mi

            try:
                met = list(METRICS_CONF['metrics_units']['image'].values())
                image_data.append(
                    self.t_cloudkitty.format_item(
                        image,
                        met[0]['unit'],
                        image_size_mb,
                    ))
            # NOTE(mc): deprecated except part kept for backward compatibility.
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated oslo config method.')
                image_data.append(
                    self.t_cloudkitty.format_item(
                        image,
                        self.units_mappings['image'],
                        image_size_mb,
                    ))

        if not image_data:
            raise collector.NoDataCollected(self.collector_name, 'image')
        return self.t_cloudkitty.format_service('image', image_data)
예제 #16
0
    def get_radosgw_usage(self,
                          start,
                          end=None,
                          project_id=None,
                          q_filter=None):
        active_rgw_stats = self.resources_stats('radosgw.objects.size', start,
                                                end, project_id, q_filter)
        rgw_data = []
        for rgw_stats in active_rgw_stats:
            rgw_id = rgw_stats.groupby['resource_id']
            if not self._cacher.has_resource_detail('radosgw.usage', rgw_id):
                raw_resource = self._conn.resources.get(rgw_id)
                rgw = self.t_ceilometer.strip_resource_data(
                    'radosgw.usage', raw_resource)
                self._cacher.add_resource_detail('radosgw.usage', rgw_id, rgw)
            rgw = self._cacher.get_resource_detail('radosgw.usage', rgw_id)

            # Unit conversion
            try:
                conv_data = METRICS_CONF['metrics_units']['radosgw.usage']
                rgw_size = ck_utils.convert_unit(
                    decimal.Decimal(rgw_stats.max),
                    conv_data['radosgw.object.size'].get('factor', 1),
                    conv_data['radosgw.object.size'].get('offset', 0),
                )

                rgw_data.append(
                    self.t_cloudkitty.format_item(
                        rgw,
                        conv_data['rados.objects.size']['unit'],
                        rgw_size,
                    ))
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated hardcoded method.')
                rgw_size = decimal.Decimal(rgw_stats.max) / units.Gi
                rgw_data.append(
                    self.t_cloudkitty.format_item(
                        rgw,
                        self.units_mappings['radosgw.usage'],
                        rgw_size,
                    ))

        if not rgw_data:
            raise collector.NoDataCollected(self.collector_name,
                                            'radosgw.usage')
        return self.t_cloudkitty.format_service('radosgw.usage', rgw_data)
예제 #17
0
 def _get_data(self,
               res_type,
               start,
               end=None,
               project_id=None,
               q_filter=None):
     self._open_csv()
     rows = self.filter_rows(start, end, project_id, res_type=res_type)
     data = []
     for row in rows:
         data.append({
             'desc': json.loads(row['desc']),
             'vol': json.loads(row['vol'])
         })
     if not data:
         raise collector.NoDataCollected(self.collector_name, res_type)
     return self.t_cloudkitty.format_service(res_type, data)
예제 #18
0
 def get_image(self, start, end=None, project_id=None, q_filter=None):
     active_image_stats = self.resources_stats('image.size', start, end,
                                               project_id, q_filter)
     image_data = []
     for image_stats in active_image_stats:
         image_id = image_stats.groupby['resource_id']
         if not self._cacher.has_resource_detail('image', image_id):
             raw_resource = self._conn.resources.get(image_id)
             image = self.t_ceilometer.strip_resource_data(
                 'image', raw_resource)
             self._cacher.add_resource_detail('image', image_id, image)
         image = self._cacher.get_resource_detail('image', image_id)
         image_mb = image_stats.max / 1048576.0
         image_data.append(
             self.t_cloudkitty.format_item(image, 'image', image_mb))
     if not image_data:
         raise collector.NoDataCollected(self.collector_name, 'image')
     return self.t_cloudkitty.format_service('image', image_data)
예제 #19
0
    def run(self):
        while True:
            timestamp = self.check_state()
            if not timestamp:
                break

            metrics = list(self._conf['metrics'].keys())

            storage_data = []
            for metric in metrics:
                try:
                    try:
                        data = self._collect(metric, timestamp)
                    except collector.NoDataCollected:
                        raise
                    except Exception as e:
                        LOG.warning(
                            '[%(scope_id)s] Error while collecting metric '
                            '%(metric)s: %(error)s',
                            {
                                'scope_id': self._tenant_id,
                                'metric': metric,
                                'error': e,
                            },
                        )
                        raise collector.NoDataCollected('', metric)
                except collector.NoDataCollected:
                    LOG.info('[{}] No data collected for metric {} '
                             'at timestamp {}'.format(
                                 self._tenant_id, metric,
                                 ck_utils.ts2dt(timestamp)))
                else:
                    # Rating
                    for processor in self._processors:
                        processor.obj.process(data)
                    # Writing
                    if isinstance(data, list):
                        storage_data += data
                    else:
                        storage_data.append(data)

            # We're getting a full period so we directly commit
            self._storage.push(storage_data, self._tenant_id)
            self._state.set_state(self._tenant_id, timestamp)
예제 #20
0
    def fetch_all(self, metric_name, start, end, project_id, q_filter=None):
        """Returns metrics to be valorized."""
        # NOTE(mc): Remove potential trailing '/' to avoid
        # url building problems
        url = CONF.collector_prometheus.prometheus_url
        if url.endswith('/'):
            url = url[:-1]

        res = PrometheusClient.get_data(
            url,
            self.conf[metric_name]['extra_args']['query'],
            start,
            end,
            self.period,
            metric_name,
        )

        # If the query returns an empty dataset,
        # raise a NoDataCollected exception.
        if not res['data']['result']:
            raise collector.NoDataCollected(self.collector_name, metric_name)

        formatted_resources = []

        for item in res['data']['result']:
            metadata, groupby, qty = self._format_data(
                metric_name,
                project_id,
                start,
                end,
                item,
            )

            item = self.t_cloudkitty.format_item(
                groupby,
                metadata,
                self.conf[metric_name]['unit'],
                qty=qty,
            )

            formatted_resources.append(item)

        return formatted_resources
예제 #21
0
    def get_network_floating(self,
                             start,
                             end=None,
                             project_id=None,
                             q_filter=None):
        active_floating_ids = self.active_resources('ip.floating', start, end,
                                                    project_id, q_filter)
        floating_data = []
        for floating_id in active_floating_ids:
            if not self._cacher.has_resource_detail('network.floating',
                                                    floating_id):
                raw_resource = self._conn.resources.get(floating_id)
                floating = self.t_ceilometer.strip_resource_data(
                    'network.floating', raw_resource)
                self._cacher.add_resource_detail('network.floating',
                                                 floating_id, floating)
            floating = self._cacher.get_resource_detail(
                'network.floating', floating_id)

            try:
                metric = METRICS_CONF['metrics_units']['network.floating']
                floating_data.append(
                    self.t_cloudkitty.format_item(
                        floating,
                        list(metric.values())[0]['unit'],
                        1,
                    ))
            # NOTE(mc): deprecated except part kept for backward compatibility.
            except KeyError:
                LOG.warning('Error when trying to use yaml metrology conf.')
                LOG.warning('Fallback on the deprecated oslo config method.')
                floating_data.append(
                    self.t_cloudkitty.format_item(
                        floating,
                        self.units_mappings['network.floating'],
                        1,
                    ))

        if not floating_data:
            raise collector.NoDataCollected(self.collector_name,
                                            'network.floating')
        return self.t_cloudkitty.format_service('network.floating',
                                                floating_data)
예제 #22
0
 def get_compute(self, start, end=None, project_id=None, q_filter=None):
     active_instance_ids = self.active_resources('cpu', start, end,
                                                 project_id, q_filter)
     compute_data = []
     for instance_id in active_instance_ids:
         if not self._cacher.has_resource_detail('compute', instance_id):
             raw_resource = self._conn.resources.get(instance_id)
             instance = self.t_ceilometer.strip_resource_data(
                 'compute', raw_resource)
             self._cacher.add_resource_detail('compute', instance_id,
                                              instance)
         instance = self._cacher.get_resource_detail('compute', instance_id)
         compute_data.append(
             self.t_cloudkitty.format_item(instance,
                                           self.units_mappings["compute"],
                                           1))
     if not compute_data:
         raise collector.NoDataCollected(self.collector_name, 'compute')
     return self.t_cloudkitty.format_service('compute', compute_data)
예제 #23
0
 def get_traffic(self, start, end=None, project_id=None, q_filter=None):
     active_traffic_stats = self.resources_stats('bandwidth', start, end,
                                                 project_id, q_filter)
     traffic_data = []
     for traffic_stat in active_traffic_stats:
         label_id = traffic_stat.groupby['resource_id']
         if not self._cacher.has_resource_detail('traffic', label_id):
             raw_resource = self._conn.resources.get(label_id)
             traffic = self.t_ceilometer.strip_resource_data(
                 'traffic', raw_resource)
             self._cacher.add_resource_detail('traffic', label_id, traffic)
         traffic = self._cacher.get_resource_detail('traffic', label_id)
         traffic_gb = traffic_stat.sum / 1073741824.0
         traffic["traffic_size"] = traffic_gb
         traffic_data.append(
             self.t_cloudkitty.format_item(traffic, 'GB', traffic_gb))
     if not traffic_data:
         raise collector.NoDataCollected(self.collector_name, 'traffic')
     return self.t_cloudkitty.format_service('traffic', traffic_data)
예제 #24
0
    def build_query(cls, source, query, start, end, period, metric_name):
        """Build PromQL instant queries."""
        start = ck_utils.iso8601_from_timestamp(start)
        end = ck_utils.iso8601_from_timestamp(end)

        if '$period' in query:
            try:
                query = ck_utils.template_str_substitute(
                    query, {'period': str(period) + 's'},
                )
            except (KeyError, ValueError):
                raise collector.NoDataCollected(
                    collector.collector_name,
                    metric_name
                )

        # Due to the design of Cloudkitty, only instant queries are supported.
        # In that case 'time' equals 'end' and
        # the window time is reprezented by the period.
        return source + '/query?query=' + query + '&time=' + end
예제 #25
0
    def _fetch_measures(self,
                        metric_name,
                        start,
                        end,
                        project_id=None,
                        q_filter=None):
        """Get measures for given metric during the timeframe.

        :param metric_name: metric name to filter on.
        :type metric_name: str
        :param start: Start of the timeframe.
        :param end: End of the timeframe if needed.
        :param project_id: Filter on a specific tenant/project.
        :type project_id: str
        :param q_filter: Append a custom filter.
        :type q_filter: list
        """

        dimensions = self._get_dimensions(metric_name, project_id, q_filter)
        group_by = self.conf[metric_name]['groupby']
        resource_key = self.conf[metric_name]['extra_args']['resource_key']
        if resource_key not in group_by:
            LOG.error('Resource key "{}" is not in group_by keys: "{}". '
                      'Please adapt your configuration.'.format(
                          resource_key, group_by))
            raise collector.NoDataCollected(self.collector_name, metric_name)

        # NOTE(lpeschke): One aggregated measure per collect period
        period = end - start

        extra_args = self.conf[metric_name]['extra_args']
        return self._conn.metrics.list_statistics(
            name=metric_name,
            merge_metrics=True,
            dimensions=dimensions,
            start_time=ck_utils.ts2dt(start),
            end_time=ck_utils.ts2dt(end),
            period=period,
            statistics=extra_args['aggregation_method'],
            group_by=group_by)
예제 #26
0
    def get_cloudstorage(self,
                         start,
                         end=None,
                         project_id=None,
                         q_filter=None):

        # To bill in each and every period
        active_cloud_volume_stats = self.resources_stats(
            'storage.objects.size', start, end, project_id, q_filter)
        cloud_volume_data = []

        for cloud_volume_stats in active_cloud_volume_stats:

            cloud_volume_id = cloud_volume_stats.groupby['resource_id']
            if not self._cacher.has_resource_detail('cloudstorage',
                                                    cloud_volume_id):
                raw_resource = self._conn.resources.get(cloud_volume_id)

                cloud_volume = self.t_ceilometer.strip_resource_data(
                    'cloudstorage', raw_resource)

                self._cacher.add_resource_detail('cloudstorage',
                                                 cloud_volume_id, cloud_volume)

            cloud_volume = self._cacher.get_resource_detail(
                'cloudstorage', cloud_volume_id)

            # Convert bytes to GB
            cloud_volume_gb = cloud_volume_stats.max / 1073741824.0

            cloud_volume_data.append(
                self.t_cloudkitty.format_item(cloud_volume, 'GB',
                                              cloud_volume_gb))

        if not cloud_volume_data:
            raise collector.NoDataCollected(self.collector_name,
                                            'cloudstorage')
        return self.t_cloudkitty.format_service('cloudstorage',
                                                cloud_volume_data)
예제 #27
0
 def get_radosgw_usage(self,
                       start,
                       end=None,
                       project_id=None,
                       q_filter=None):
     active_rgw_stats = self.resources_stats('radosgw.objects.size', start,
                                             end, project_id, q_filter)
     rgw_data = []
     for rgw_stats in active_rgw_stats:
         rgw_id = rgw_stats.groupby['resource_id']
         if not self._cacher.has_resource_detail('radosgw.usage', rgw_id):
             raw_resource = self._conn.resources.get(rgw_id)
             rgw = self.t_ceilometer.strip_resource_data(
                 'radosgw.usage', raw_resource)
             self._cacher.add_resource_detail('radosgw.usage', rgw_id, rgw)
         rgw = self._cacher.get_resource_detail('radosgw.usage', rgw_id)
         rgw_size = rgw_stats.max / 1073741824.0
         rgw["radosgw_size"] = rgw_size
         rgw_data.append(self.t_cloudkitty.format_item(rgw, 'GB', rgw_size))
     if not rgw_data:
         raise collector.NoDataCollected(self.collector_name,
                                         'radosgw.usage')
     return self.t_cloudkitty.format_service('radosgw.usage', rgw_data)