コード例 #1
0
    def __init__(self, parsed_url):
        super(MonascaPublisher, self).__init__(parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(parsed_url)
        self.mon_filter = MonascaDataFilter()

        batch_timer = loopingcall.FixedIntervalLoopingCall(self.flush_batch)
        batch_timer.start(interval=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []
            retry_timer = loopingcall.FixedIntervalLoopingCall(
                self.retry_batch)
            retry_timer.start(
                interval=cfg.CONF.monasca.retry_interval,
                initial_delay=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.archive_on_failure:
            archive_path = cfg.CONF.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = cfg.CONF.find_file(archive_path)

            self.archive_handler = publisher.get_publisher('file://' +
                                                           str(archive_path))
コード例 #2
0
    def __init__(self, parsed_url):
        super(MonascaPublisher, self).__init__(parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(parsed_url)
        self.mon_filter = MonascaDataFilter()

        batch_timer = loopingcall.FixedIntervalLoopingCall(self.flush_batch)
        batch_timer.start(interval=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []
            retry_timer = loopingcall.FixedIntervalLoopingCall(
                self.retry_batch)
            retry_timer.start(
                interval=cfg.CONF.monasca.retry_interval,
                initial_delay=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.archive_on_failure:
            archive_path = cfg.CONF.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = cfg.CONF.find_file(archive_path)

            self.archive_handler = publisher.get_publisher('file://' +
                                                           str(archive_path))
コード例 #3
0
    def __init__(self, conf, parsed_url):
        super(MonascaPublisher, self).__init__(conf, parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(self.conf, parsed_url)
        self.mon_filter = MonascaDataFilter(self.conf)

        # add flush_batch function to periodic callables
        periodic_callables = [
            # The function to run + any automatically provided
            # positional and keyword arguments to provide to it
            # everytime it is activated.
            (self.flush_batch, (), {}),
        ]

        if self.conf.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []

            # add retry_batch function to periodic callables
            periodic_callables.append((self.retry_batch, (), {}))

        if self.conf.monasca.archive_on_failure:
            archive_path = self.conf.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = self.conf.find_file(archive_path)

            self.archive_handler = publisher.get_publisher(
                self.conf,
                'file://' +
                str(archive_path),
                'ceilometer.sample.publisher')

        # start periodic worker
        self.periodic_worker = periodics.PeriodicWorker(periodic_callables)
        self.periodic_thread = threading.Thread(
            target=self.periodic_worker.start)
        self.periodic_thread.daemon = True
        self.periodic_thread.start()
コード例 #4
0
    def __init__(self, conf, parsed_url):
        super(MonascaPublisher, self).__init__(conf, parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(self.conf, parsed_url)
        self.mon_filter = MonascaDataFilter(self.conf)

        # add flush_batch function to periodic callables
        periodic_callables = [
            # The function to run + any automatically provided
            # positional and keyword arguments to provide to it
            # everytime it is activated.
            (self.flush_batch, (), {}),
        ]

        if self.conf.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []

            # add retry_batch function to periodic callables
            periodic_callables.append((self.retry_batch, (), {}))

        if self.conf.monasca.archive_on_failure:
            archive_path = self.conf.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = self.conf.find_file(archive_path)

            self.archive_handler = publisher.get_publisher(
                self.conf,
                'file://' +
                str(archive_path),
                'ceilometer.sample.publisher')

        # start periodic worker
        self.periodic_worker = periodics.PeriodicWorker(periodic_callables)
        self.periodic_thread = threading.Thread(
            target=self.periodic_worker.start)
        self.periodic_thread.daemon = True
        self.periodic_thread.start()
コード例 #5
0
class MonascaPublisher(publisher.PublisherBase):
    """Publisher to publish samples to monasca using monasca-client.

    Example URL to place in pipeline.yaml:
        - monclient://http://192.168.10.4:8070/v2.0
    """
    def __init__(self, parsed_url):
        super(MonascaPublisher, self).__init__(parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(parsed_url)
        self.mon_filter = MonascaDataFilter()

        batch_timer = loopingcall.FixedIntervalLoopingCall(self.flush_batch)
        batch_timer.start(interval=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []
            retry_timer = loopingcall.FixedIntervalLoopingCall(
                self.retry_batch)
            retry_timer.start(
                interval=cfg.CONF.monasca.retry_interval,
                initial_delay=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.archive_on_failure:
            archive_path = cfg.CONF.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = cfg.CONF.find_file(archive_path)

            self.archive_handler = publisher.get_publisher('file://' +
                                                           str(archive_path))

    def _publish_handler(self, func, metrics, batch=False):
        """Handles publishing and exceptions that arise."""

        try:
            metric_count = len(metrics)
            if batch:
                func(**{'jsonbody': metrics})
            else:
                func(**metrics[0])
            LOG.debug(_('Successfully published %d metric(s)') % metric_count)
        except mon_client.MonascaServiceException:
            # Assuming atomicity of create or failure - meaning
            # either all succeed or all fail in a batch
            LOG.error(_('Metric create failed for %(count)d metric(s) with'
                        ' name(s) %(names)s ') %
                      ({'count': len(metrics),
                        'names': ','.join([metric['name']
                                           for metric in metrics])}))
            if cfg.CONF.monasca.retry_on_failure:
                # retry payload in case of internal server error(500),
                # service unavailable error(503),bad gateway (502) or
                # Communication Error

                # append failed metrics to retry_queue
                LOG.debug(_('Adding metrics to retry queue.'))
                self.retry_queue.extend(metrics)
                # initialize the retry_attempt for the each failed
                # metric in retry_counter
                self.retry_counter.extend(
                    [0 * i for i in range(metric_count)])
            else:
                if hasattr(self, 'archive_handler'):
                    self.archive_handler.publish_samples(None, metrics)
        except Exception:
            if hasattr(self, 'archive_handler'):
                    self.archive_handler.publish_samples(None, metrics)

    def publish_samples(self, context, samples):
        """Main method called to publish samples."""

        for sample in samples:
            metric = self.mon_filter.process_sample_for_monasca(sample)
            # In batch mode, push metric to queue,
            # else publish the metric
            if cfg.CONF.monasca.batch_mode:
                LOG.debug(_('Adding metric to queue.'))
                self.metric_queue.append(metric)
            else:
                LOG.debug(_('Publishing metric with name %(name)s and'
                            ' timestamp %(ts)s to endpoint.') %
                          ({'name': metric['name'],
                            'ts': metric['timestamp']}))

                self._publish_handler(self.mon_client.metrics_create, [metric])

    def is_batch_ready(self):
        """Method to check if batch is ready to trigger."""

        previous_time = self.time_of_last_batch_run
        current_time = time.time()
        elapsed_time = current_time - previous_time

        if elapsed_time >= cfg.CONF.monasca.batch_timeout and len(self.
           metric_queue) > 0:
            LOG.debug(_('Batch timeout exceeded, triggering batch publish.'))
            return True
        else:
            if len(self.metric_queue) >= cfg.CONF.monasca.batch_count:
                LOG.debug(_('Batch queue full, triggering batch publish.'))
                return True
            else:
                return False

    def flush_batch(self):
        """Method to flush the queued metrics."""

        if self.is_batch_ready():
            # publish all metrics in queue at this point
            batch_count = len(self.metric_queue)

            self._publish_handler(self.mon_client.metrics_create,
                                  self.metric_queue[:batch_count],
                                  batch=True)

            self.time_of_last_batch_run = time.time()
            # slice queue to remove metrics that
            # published with success or failed and got queued on
            # retry queue
            self.metric_queue = self.metric_queue[batch_count:]

    def is_retry_ready(self):
        """Method to check if retry batch is ready to trigger."""

        if len(self.retry_queue) > 0:
            LOG.debug(_('Retry queue has items, triggering retry.'))
            return True
        else:
            return False

    def retry_batch(self):
        """Method to retry the failed metrics."""

        if self.is_retry_ready():
            retry_count = len(self.retry_queue)

            # Iterate over the retry_queue to eliminate
            # metrics that have maxed out their retry attempts
            for ctr in xrange(retry_count):
                if self.retry_counter[ctr] > cfg.CONF.monasca.max_retries:
                    if hasattr(self, 'archive_handler'):
                        self.archive_handler.publish_samples(
                            None,
                            [self.retry_queue[ctr]])
                    LOG.debug(_('Removing metric %s from retry queue.'
                                ' Metric retry maxed out retry attempts') %
                              self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]

            # Iterate over the retry_queue to retry the
            # publish for each metric.
            # If an exception occurs, the retry count for
            # the failed metric is incremented.
            # If the retry succeeds, remove the metric and
            # the retry count from the retry_queue and retry_counter resp.
            ctr = 0
            while ctr < len(self.retry_queue):
                try:
                    LOG.debug(_('Retrying metric publish from retry queue.'))
                    self.mon_client.metrics_create(**self.retry_queue[ctr])
                    # remove from retry queue if publish was success
                    LOG.debug(_('Retrying metric %s successful,'
                                ' removing metric from retry queue.') %
                              self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]
                except exc.BaseException:
                    LOG.error(_('Exception encountered in retry. '
                                'Batch will be retried in next attempt.'))
                    # if retry failed, increment the retry counter
                    self.retry_counter[ctr] += 1
                    ctr += 1

    def publish_events(self, context, events):
        """Send an event message for publishing

        :param context: Execution context from the service or RPC call
        :param events: events from pipeline after transformation
        """
        raise ceilometer.NotImplementedError
コード例 #6
0
class MonascaPublisher(publisher.ConfigPublisherBase):
    """Publisher to publish samples to monasca using monasca-client.

    Example URL to place in pipeline.yaml:
        - monasca://http://192.168.10.4:8070/v2.0
    """
    def __init__(self, conf, parsed_url):
        super(MonascaPublisher, self).__init__(conf, parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(self.conf, parsed_url)
        self.mon_filter = MonascaDataFilter(self.conf)

        # add flush_batch function to periodic callables
        periodic_callables = [
            # The function to run + any automatically provided
            # positional and keyword arguments to provide to it
            # everytime it is activated.
            (self.flush_batch, (), {}),
        ]

        if self.conf.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []

            # add retry_batch function to periodic callables
            periodic_callables.append((self.retry_batch, (), {}))

        if self.conf.monasca.archive_on_failure:
            archive_path = self.conf.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = self.conf.find_file(archive_path)

            self.archive_handler = publisher.get_publisher(
                self.conf, 'file://' + str(archive_path),
                'ceilometer.sample.publisher')

        # start periodic worker
        self.periodic_worker = periodics.PeriodicWorker(periodic_callables)
        self.periodic_thread = threading.Thread(
            target=self.periodic_worker.start)
        self.periodic_thread.daemon = True
        self.periodic_thread.start()

    def _publish_handler(self, func, metrics, batch=False):
        """Handles publishing and exceptions that arise."""

        try:
            metric_count = len(metrics)
            if batch:
                func(**{'jsonbody': metrics})
            else:
                func(**metrics[0])
            LOG.info('Successfully published %d metric(s)' % metric_count)
        except mon_client.MonascaServiceException:
            # Assuming atomicity of create or failure - meaning
            # either all succeed or all fail in a batch
            LOG.error(
                'Metric create failed for %(count)d metric(s) with'
                ' name(s) %(names)s ' %
                ({
                    'count': len(metrics),
                    'names': ','.join([metric['name'] for metric in metrics])
                }))
            if self.conf.monasca.retry_on_failure:
                # retry payload in case of internal server error(500),
                # service unavailable error(503),bad gateway (502) or
                # Communication Error

                # append failed metrics to retry_queue
                LOG.debug('Adding metrics to retry queue.')
                self.retry_queue.extend(metrics)
                # initialize the retry_attempt for the each failed
                # metric in retry_counter
                self.retry_counter.extend([0 * i for i in range(metric_count)])
            else:
                if hasattr(self, 'archive_handler'):
                    self.archive_handler.publish_samples(None, metrics)
        except Exception:
            LOG.info(traceback.format_exc())
            if hasattr(self, 'archive_handler'):
                self.archive_handler.publish_samples(None, metrics)

    def publish_samples(self, samples):
        """Main method called to publish samples."""

        for sample in samples:
            metric = self.mon_filter.process_sample_for_monasca(sample)
            # In batch mode, push metric to queue,
            # else publish the metric
            if self.conf.monasca.batch_mode:
                LOG.debug('Adding metric to queue.')
                self.metric_queue.append(metric)
            else:
                LOG.info('Publishing metric with name %(name)s and'
                         ' timestamp %(ts)s to endpoint.' %
                         ({
                             'name': metric['name'],
                             'ts': metric['timestamp']
                         }))

                self._publish_handler(self.mon_client.metrics_create, [metric])

    def is_batch_ready(self):
        """Method to check if batch is ready to trigger."""

        previous_time = self.time_of_last_batch_run
        current_time = time.time()
        elapsed_time = current_time - previous_time

        if elapsed_time >= self.conf.monasca.batch_timeout and len(
                self.metric_queue) > 0:
            LOG.info('Batch timeout exceeded, triggering batch publish.')
            return True
        else:
            if len(self.metric_queue) >= self.conf.monasca.batch_count:
                LOG.info('Batch queue full, triggering batch publish.')
                return True
            else:
                return False

    @periodics.periodic(BATCH_POLLING_INTERVAL)
    def flush_batch(self):
        """Method to flush the queued metrics."""
        # print "flush batch... %s" % str(time.time())
        if self.is_batch_ready():
            # publish all metrics in queue at this point
            batch_count = len(self.metric_queue)

            LOG.info("batch is ready: batch_count %s" % str(batch_count))

            self._publish_handler(self.mon_client.metrics_create,
                                  self.metric_queue[:batch_count],
                                  batch=True)

            self.time_of_last_batch_run = time.time()
            # slice queue to remove metrics that
            # published with success or failed and got queued on
            # retry queue
            self.metric_queue = self.metric_queue[batch_count:]

    def is_retry_ready(self):
        """Method to check if retry batch is ready to trigger."""

        if len(self.retry_queue) > 0:
            LOG.info('Retry queue has items, triggering retry.')
            return True
        else:
            return False

    @periodics.periodic(BATCH_RETRY_INTERVAL)
    def retry_batch(self):
        """Method to retry the failed metrics."""
        # print "retry batch...%s" % str(time.time())
        if self.is_retry_ready():
            retry_count = len(self.retry_queue)

            # Iterate over the retry_queue to eliminate
            # metrics that have maxed out their retry attempts
            for ctr in range(retry_count):
                if self.retry_counter[ctr] > self.conf.\
                        monasca.batch_max_retries:
                    if hasattr(self, 'archive_handler'):
                        self.archive_handler.publish_samples(
                            None, [self.retry_queue[ctr]])
                    LOG.info('Removing metric %s from retry queue.'
                             ' Metric retry maxed out retry attempts' %
                             self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]

            # Iterate over the retry_queue to retry the
            # publish for each metric.
            # If an exception occurs, the retry count for
            # the failed metric is incremented.
            # If the retry succeeds, remove the metric and
            # the retry count from the retry_queue and retry_counter resp.
            ctr = 0
            while ctr < len(self.retry_queue):
                try:
                    LOG.info('Retrying metric publish from retry queue.')
                    self.mon_client.metrics_create(**self.retry_queue[ctr])
                    # remove from retry queue if publish was success
                    LOG.info('Retrying metric %s successful,'
                             ' removing metric from retry queue.' %
                             self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]
                except exc.ClientException:
                    LOG.error('Exception encountered in retry. '
                              'Batch will be retried in next attempt.')
                    # if retry failed, increment the retry counter
                    self.retry_counter[ctr] += 1
                    ctr += 1

    def flush_to_file(self):
        # TODO(persist maxed-out metrics to file)
        pass

    def publish_events(self, events):
        """Send an event message for publishing

        :param events: events from pipeline after transformation
        """
        raise ceilometer.NotImplementedError
コード例 #7
0
class MonascaPublisher(publisher.PublisherBase):
    """Publisher to publish samples to monasca using monasca-client.

    Example URL to place in pipeline.yaml:
        - monclient://http://192.168.10.4:8070/v2.0
    """
    def __init__(self, parsed_url):
        super(MonascaPublisher, self).__init__(parsed_url)

        # list to hold metrics to be published in batch (behaves like queue)
        self.metric_queue = []
        self.time_of_last_batch_run = time.time()

        self.mon_client = mon_client.Client(parsed_url)
        self.mon_filter = MonascaDataFilter()

        batch_timer = loopingcall.FixedIntervalLoopingCall(self.flush_batch)
        batch_timer.start(interval=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.retry_on_failure:
            # list to hold metrics to be re-tried (behaves like queue)
            self.retry_queue = []
            # list to store retry attempts for metrics in retry_queue
            self.retry_counter = []
            retry_timer = loopingcall.FixedIntervalLoopingCall(
                self.retry_batch)
            retry_timer.start(
                interval=cfg.CONF.monasca.retry_interval,
                initial_delay=cfg.CONF.monasca.batch_polling_interval)

        if cfg.CONF.monasca.archive_on_failure:
            archive_path = cfg.CONF.monasca.archive_path
            if not os.path.exists(archive_path):
                archive_path = cfg.CONF.find_file(archive_path)

            self.archive_handler = publisher.get_publisher('file://' +
                                                           str(archive_path))

    def _publish_handler(self, func, metrics, batch=False):
        """Handles publishing and exceptions that arise."""

        try:
            metric_count = len(metrics)
            if batch:
                func(**{'jsonbody': metrics})
            else:
                func(**metrics[0])
            LOG.debug(_('Successfully published %d metric(s)') % metric_count)
        except mon_client.MonascaServiceException:
            # Assuming atomicity of create or failure - meaning
            # either all succeed or all fail in a batch
            LOG.error(
                _('Metric create failed for %(count)d metric(s) with'
                  ' name(s) %(names)s ') %
                ({
                    'count': len(metrics),
                    'names': ','.join([metric['name'] for metric in metrics])
                }))
            if cfg.CONF.monasca.retry_on_failure:
                # retry payload in case of internal server error(500),
                # service unavailable error(503),bad gateway (502) or
                # Communication Error

                # append failed metrics to retry_queue
                LOG.debug(_('Adding metrics to retry queue.'))
                self.retry_queue.extend(metrics)
                # initialize the retry_attempt for the each failed
                # metric in retry_counter
                self.retry_counter.extend([0 * i for i in range(metric_count)])
            else:
                if hasattr(self, 'archive_handler'):
                    self.archive_handler.publish_samples(None, metrics)
        except Exception:
            if hasattr(self, 'archive_handler'):
                self.archive_handler.publish_samples(None, metrics)

    def publish_samples(self, context, samples):
        """Main method called to publish samples."""

        for sample in samples:
            metric = self.mon_filter.process_sample_for_monasca(sample)
            # In batch mode, push metric to queue,
            # else publish the metric
            if cfg.CONF.monasca.batch_mode:
                LOG.debug(_('Adding metric to queue.'))
                self.metric_queue.append(metric)
            else:
                LOG.debug(
                    _('Publishing metric with name %(name)s and'
                      ' timestamp %(ts)s to endpoint.') %
                    ({
                        'name': metric['name'],
                        'ts': metric['timestamp']
                    }))

                self._publish_handler(self.mon_client.metrics_create, [metric])

    def is_batch_ready(self):
        """Method to check if batch is ready to trigger."""

        previous_time = self.time_of_last_batch_run
        current_time = time.time()
        elapsed_time = current_time - previous_time

        if elapsed_time >= cfg.CONF.monasca.batch_timeout and len(
                self.metric_queue) > 0:
            LOG.debug(_('Batch timeout exceeded, triggering batch publish.'))
            return True
        else:
            if len(self.metric_queue) >= cfg.CONF.monasca.batch_count:
                LOG.debug(_('Batch queue full, triggering batch publish.'))
                return True
            else:
                return False

    def flush_batch(self):
        """Method to flush the queued metrics."""

        if self.is_batch_ready():
            # publish all metrics in queue at this point
            batch_count = len(self.metric_queue)

            self._publish_handler(self.mon_client.metrics_create,
                                  self.metric_queue[:batch_count],
                                  batch=True)

            self.time_of_last_batch_run = time.time()
            # slice queue to remove metrics that
            # published with success or failed and got queued on
            # retry queue
            self.metric_queue = self.metric_queue[batch_count:]

    def is_retry_ready(self):
        """Method to check if retry batch is ready to trigger."""

        if len(self.retry_queue) > 0:
            LOG.debug(_('Retry queue has items, triggering retry.'))
            return True
        else:
            return False

    def retry_batch(self):
        """Method to retry the failed metrics."""

        if self.is_retry_ready():
            retry_count = len(self.retry_queue)

            # Iterate over the retry_queue to eliminate
            # metrics that have maxed out their retry attempts
            for ctr in xrange(retry_count):
                if self.retry_counter[ctr] > cfg.CONF.monasca.max_retries:
                    if hasattr(self, 'archive_handler'):
                        self.archive_handler.publish_samples(
                            None, [self.retry_queue[ctr]])
                    LOG.debug(
                        _('Removing metric %s from retry queue.'
                          ' Metric retry maxed out retry attempts') %
                        self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]

            # Iterate over the retry_queue to retry the
            # publish for each metric.
            # If an exception occurs, the retry count for
            # the failed metric is incremented.
            # If the retry succeeds, remove the metric and
            # the retry count from the retry_queue and retry_counter resp.
            ctr = 0
            while ctr < len(self.retry_queue):
                try:
                    LOG.debug(_('Retrying metric publish from retry queue.'))
                    self.mon_client.metrics_create(**self.retry_queue[ctr])
                    # remove from retry queue if publish was success
                    LOG.debug(
                        _('Retrying metric %s successful,'
                          ' removing metric from retry queue.') %
                        self.retry_queue[ctr]['name'])
                    del self.retry_queue[ctr]
                    del self.retry_counter[ctr]
                except exc.BaseException:
                    LOG.error(
                        _('Exception encountered in retry. '
                          'Batch will be retried in next attempt.'))
                    # if retry failed, increment the retry counter
                    self.retry_counter[ctr] += 1
                    ctr += 1

    def publish_events(self, context, events):
        """Send an event message for publishing

        :param context: Execution context from the service or RPC call
        :param events: events from pipeline after transformation
        """
        raise ceilometer.NotImplementedError
コード例 #8
0
 def __init__(self, url):
     self.mc = monasca_client.Client(netutils.urlsplit(url))
     self.mon_filter = MonascaDataFilter()
コード例 #9
0
class Connection(base.Connection):
    CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
                                       AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = utils.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def __init__(self, url):
        self.mc = monasca_client.Client(netutils.urlsplit(url))
        self.mon_filter = MonascaDataFilter()

    @staticmethod
    def _convert_to_dict(stats, cols):
        return {c: stats[i] for i, c in enumerate(cols)}

    def _convert_metaquery(self, metaquery):
        """Strip "metadata." from key and convert value to string

        :param metaquery:  { 'metadata.KEY': VALUE, ... }
        :returns: converted metaquery
        """
        query = {}
        for k, v in metaquery.items():
            key = k.split('.')[1]
            if isinstance(v, basestring):
                query[key] = v
            else:
                query[key] = str(int(v))
        return query

    def _match_metaquery_to_value_meta(self, query, value_meta):
        """Check if metaquery matches value_meta

        :param query: metaquery with converted format
        :param value_meta: metadata from monasca
        :returns: True for matched, False for not matched
        """
        if (query and (len(value_meta) == 0 or
           not set(query.items()).issubset(set(value_meta.items())))):
            return False
        else:
            return True

    def upgrade(self):
        pass

    def clear(self):
        pass

    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(_('metering data %(counter_name)s for %(resource_id)s: '
                   '%(counter_volume)s')
                 % ({'counter_name': data['counter_name'],
                     'resource_id': data['resource_id'],
                     'counter_volume': data['counter_volume']}))

        metric = self.mon_filter.process_sample_for_monasca(data)
        self.mc.metrics_create(**metric)

    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_("Dropping data with TTL %d"), ttl)

    def get_resources(self, user=None, project=None, source=None,
                      start_timestamp=None, start_timestamp_op=None,
                      end_timestamp=None, end_timestamp_op=None,
                      metaquery=None, resource=None, limit=None):
        """Return an iterable of dictionaries containing resource information.

        { 'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          'timestamp': UTC datetime of last update to the resource,
          'metadata': most current metadata for the resource,
          'meter': list of the meters reporting data for the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param limit: Maximum number of results to return.
        """
        if limit == 0:
            return

        q = {}
        if metaquery:
            q = self._convert_metaquery(metaquery)

        if start_timestamp_op and start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 start_timestamp_op)

        if end_timestamp_op and end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 end_timestamp_op)

        if not start_timestamp:
            start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            start_timestamp = timeutils.isotime(start_timestamp)

        if end_timestamp:
            end_timestamp = timeutils.isotime(end_timestamp)

        dims_filter = dict(user_id=user,
                           project_id=project,
                           source=source,
                           resource_id=resource
                           )
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        _search_args = dict(
            start_time=start_timestamp,
            end_time=end_timestamp,
            limit=1)

        _search_args = {k: v for k, v in _search_args.items()
                        if v is not None}

        result_count = 0
        for metric in self.mc.metrics_list(
                **dict(dimensions=dims_filter)):
            _search_args['name'] = metric['name']
            _search_args['dimensions'] = metric['dimensions']
            try:
                for sample in self.mc.measurements_list(**_search_args):
                    d = sample['dimensions']
                    m = self._convert_to_dict(
                        sample['measurements'][0], sample['columns'])
                    vm = m['value_meta']
                    if not self._match_metaquery_to_value_meta(q, vm):
                        continue
                    if d.get('resource_id'):
                        result_count += 1

                        yield api_models.Resource(
                            resource_id=d.get('resource_id'),
                            first_sample_timestamp=(
                                timeutils.parse_isotime(m['timestamp'])),
                            last_sample_timestamp=timeutils.utcnow(),
                            project_id=d.get('project_id'),
                            source=d.get('source'),
                            user_id=d.get('user_id'),
                            metadata=m['value_meta']
                        )

                        if result_count == limit:
                            return

            except monasca_exc.HTTPConflict:
                pass

    def get_meters(self, user=None, project=None, resource=None, source=None,
                   metaquery=None, limit=None):
        """Return an iterable of dictionaries containing meter information.

        { 'name': name of the meter,
          'type': type of the meter (gauge, delta, cumulative),
          'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param resource: Optional resource filter.
        :param source: Optional source filter.
        :param metaquery: Optional dict with metadata to match on.
        :param limit: Maximum number of results to return.
        """
        if limit == 0:
            return

        if metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        _dimensions = dict(
            user_id=user,
            project_id=project,
            resource_id=resource,
            source=source
        )

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_kwargs = {'dimensions': _dimensions}

        if limit:
            _search_kwargs['limit'] = limit

        for metric in self.mc.metrics_list(**_search_kwargs):
            yield api_models.Meter(
                name=metric['name'],
                type=metric['dimensions'].get('type') or 'cumulative',
                unit=metric['dimensions'].get('unit'),
                resource_id=metric['dimensions'].get('resource_id'),
                project_id=metric['dimensions'].get('project_id'),
                source=metric['dimensions'].get('source'),
                user_id=metric['dimensions'].get('user_id'))

    def get_samples(self, sample_filter, limit=None):
        """Return an iterable of dictionaries containing sample information.

        {
          'source': source of the resource,
          'counter_name': name of the resource,if groupby:
            raise ceilometer.NotImplementedError('Groupby not implemented')
          'counter_type': type of the sample (gauge, delta, cumulative),
          'counter_unit': unit of the sample,
          'counter_volume': volume of the sample,
          'user_id': UUID of user owning the resource,
          'project_id': UUID of project owning the resource,
          'resource_id': UUID of the resource,
          'timestamp': timestamp of the sample,
          'resource_metadata': metadata of the sample,
          'message_id': message ID of the sample,
          'message_signature': message signature of the sample,
          'recorded_at': time the sample was recorded
          }

        :param sample_filter: constraints for the sample search.
        :param limit: Maximum number of results to return.
        """

        if limit == 0:
            return

        if not sample_filter or not sample_filter.meter:
            raise ceilometer.NotImplementedError(
                "Supply meter name at the least")

        if (sample_filter.start_timestamp_op and
                sample_filter.start_timestamp_op != 'ge'):
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 sample_filter.
                                                 start_timestamp_op)

        if (sample_filter.end_timestamp_op and
                sample_filter.end_timestamp_op != 'le'):
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 sample_filter.
                                                 end_timestamp_op)

        q = {}
        if sample_filter.metaquery:
            q = self._convert_metaquery(sample_filter.metaquery)

        if sample_filter.message_id:
            raise ceilometer.NotImplementedError('message_id not '
                                                 'implemented '
                                                 'in get_samples')

        if not sample_filter.start_timestamp:
            sample_filter.start_timestamp = datetime.datetime(1970, 1, 1)

        if not sample_filter.end_timestamp:
            sample_filter.end_timestamp = datetime.datetime.utcnow()

        _dimensions = dict(
            user_id=sample_filter.user,
            project_id=sample_filter.project,
            resource_id=sample_filter.resource,
            source=sample_filter.source,
            # Dynamic sample filter attributes, these fields are useful for
            # filtering result.
            unit=getattr(sample_filter, 'unit', None),
            type=getattr(sample_filter, 'type', None),
        )

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _metric_args = dict(name=sample_filter.meter,
                            dimensions=_dimensions)

        start_ts = timeutils.isotime(sample_filter.start_timestamp)
        end_ts = timeutils.isotime(sample_filter.end_timestamp)

        _search_args = dict(
            start_time=start_ts,
            start_timestamp_op=sample_filter.start_timestamp_op,
            end_time=end_ts,
            end_timestamp_op=sample_filter.end_timestamp_op,
            merge_metrics=False
        )

        result_count = 0
        for metric in self.mc.metrics_list(
                **_metric_args):
            _search_args['name'] = metric['name']
            _search_args['dimensions'] = metric['dimensions']
            _search_args = {k: v for k, v in _search_args.items()
                            if v is not None}

            for sample in self.mc.measurements_list(**_search_args):
                d = sample['dimensions']
                for meas in sample['measurements']:
                    m = self._convert_to_dict(
                        meas, sample['columns'])
                    vm = m['value_meta']
                    if not self._match_metaquery_to_value_meta(q, vm):
                        continue
                    result_count += 1
                    yield api_models.Sample(
                        source=d.get('source'),
                        counter_name=sample['name'],
                        counter_type=d.get('type'),
                        counter_unit=d.get('unit'),
                        counter_volume=m['value'],
                        user_id=d.get('user_id'),
                        project_id=d.get('project_id'),
                        resource_id=d.get('resource_id'),
                        timestamp=timeutils.parse_isotime(m['timestamp']),
                        resource_metadata=m['value_meta'],
                        message_id=sample['id'],
                        message_signature='',
                        recorded_at=(timeutils.parse_isotime(m['timestamp'])))

                    if result_count == limit:
                        return

    def get_meter_statistics(self, filter, period=None, groupby=None,
                             aggregate=None):
        """Return a dictionary containing meter statistics.

        Meter statistics is described by the query parameters.
        The filter must have a meter value set.

        { 'min':
          'max':
          'avg':
          'sum':
          'count':
          'period':
          'period_start':
          'period_end':
          'duration':
          'duration_start':
          'duration_end':
          }
        """
        if filter:
            if not filter.meter:
                raise ceilometer.NotImplementedError('Query without meter '
                                                     'not implemented')
        else:
            raise ceilometer.NotImplementedError('Query without filter '
                                                 'not implemented')

        allowed_groupby = ['user_id', 'project_id', 'resource_id', 'source']

        if groupby:
            if len(groupby) > 1:
                raise ceilometer.NotImplementedError('Only one groupby '
                                                     'supported')

            groupby = groupby[0]
            if groupby not in allowed_groupby:
                raise ceilometer.NotImplementedError('Groupby %s not'
                                                     ' implemented' % groupby)

        if filter.metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        if filter.message_id:
            raise ceilometer.NotImplementedError('Message_id query '
                                                 'not implemented')

        if filter.start_timestamp_op and filter.start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 filter.start_timestamp_op)

        if filter.end_timestamp_op and filter.end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 filter.end_timestamp_op)
        if not filter.start_timestamp:
            filter.start_timestamp = timeutils.isotime(
                datetime.datetime(1970, 1, 1))
        else:
            filter.start_timestamp = timeutils.isotime(filter.start_timestamp)

        if filter.end_timestamp:
            filter.end_timestamp = timeutils.isotime(filter.end_timestamp)

        # TODO(monasca): Add this a config parameter
        allowed_stats = ['avg', 'min', 'max', 'sum', 'count']
        if aggregate:
            not_allowed_stats = [a.func for a in aggregate
                                 if a.func not in allowed_stats]
            if not_allowed_stats:
                raise ceilometer.NotImplementedError(('Aggregate function(s) '
                                                      '%s not implemented') %
                                                     not_allowed_stats)

            statistics = [a.func for a in aggregate
                          if a.func in allowed_stats]
        else:
            statistics = allowed_stats

        dims_filter = dict(user_id=filter.user,
                           project_id=filter.project,
                           source=filter.source,
                           resource_id=filter.resource
                           )
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        period = period if period \
            else cfg.CONF.monasca.default_stats_period

        if groupby:
            _metric_args = dict(name=filter.meter,
                                dimensions=dims_filter)
            group_stats_list = []

            for metric in self.mc.metrics_list(**_metric_args):
                _search_args = dict(
                    name=metric['name'],
                    dimensions=metric['dimensions'],
                    start_time=filter.start_timestamp,
                    end_time=filter.end_timestamp,
                    period=period,
                    statistics=','.join(statistics),
                    merge_metrics=False)

                _search_args = {k: v for k, v in _search_args.items()
                                if v is not None}
                stats_list = self.mc.statistics_list(**_search_args)
                group_stats_list.extend(stats_list)

            group_stats_dict = {}

            for stats in group_stats_list:
                groupby_val = stats['dimensions'].get(groupby)
                stats_list = group_stats_dict.get(groupby_val)
                if stats_list:
                    stats_list.append(stats)
                else:
                    group_stats_dict[groupby_val] = [stats]

            def get_max(items):
                return max(items)

            def get_min(items):
                return min(items)

            def get_avg(items):
                return sum(items)/len(items)

            def get_sum(items):
                return sum(items)

            def get_count(items):
                count = 0
                for item in items:
                    count = count + item
                return count

            for group_key, stats_group in group_stats_dict.iteritems():
                max_list = []
                min_list = []
                avg_list = []
                sum_list = []
                count_list = []
                ts_list = []
                group_statistics = {}
                for stats in stats_group:
                    for s in stats['statistics']:
                        stats_dict = self._convert_to_dict(s, stats['columns'])

                        if 'max' in stats['columns']:
                            max_list.append(stats_dict['max'])
                        if 'min' in stats['columns']:
                            min_list.append(stats_dict['min'])
                        if 'avg' in stats['columns']:
                            avg_list.append(stats_dict['avg'])
                        if 'sum' in stats['columns']:
                            sum_list.append(stats_dict['sum'])
                        if 'count' in stats['columns']:
                            count_list.append(stats_dict['count'])

                        ts_list.append(stats_dict['timestamp'])

                        group_statistics['unit'] = (stats['dimensions'].
                                                    get('unit'))

                if len(max_list):
                    group_statistics['max'] = get_max(max_list)
                if len(min_list):
                    group_statistics['min'] = get_min(min_list)
                if len(avg_list):
                    group_statistics['avg'] = get_avg(avg_list)
                if len(sum_list):
                    group_statistics['sum'] = get_sum(sum_list)
                if len(count_list):
                    group_statistics['count'] = get_count(count_list)

                group_statistics['end_timestamp'] = get_max(ts_list)
                group_statistics['timestamp'] = get_min(ts_list)

                ts_start = timeutils.parse_isotime(
                    group_statistics['timestamp']).replace(tzinfo=None)

                ts_end = timeutils.parse_isotime(
                    group_statistics['end_timestamp']).replace(tzinfo=None)

                del group_statistics['end_timestamp']

                if 'count' in group_statistics:
                    group_statistics['count'] = int(group_statistics['count'])
                unit = group_statistics['unit']
                del group_statistics['unit']
                if aggregate:
                        group_statistics['aggregate'] = {}
                        for a in aggregate:
                            key = '%s%s' % (a.func, '/%s' % a.param if a.param
                                            else '')
                            group_statistics['aggregate'][key] = (
                                group_statistics.get(key))
                yield api_models.Statistics(
                    unit=unit,
                    period=period,
                    period_start=ts_start,
                    period_end=ts_end,
                    duration=period,
                    duration_start=ts_start,
                    duration_end=ts_end,
                    groupby={groupby: group_key},
                    **group_statistics
                )
        else:
            _search_args = dict(
                name=filter.meter,
                dimensions=dims_filter,
                start_time=filter.start_timestamp,
                end_time=filter.end_timestamp,
                period=period,
                statistics=','.join(statistics),
                merge_metrics=True)

            _search_args = {k: v for k, v in _search_args.items()
                            if v is not None}
            stats_list = self.mc.statistics_list(**_search_args)
            for stats in stats_list:
                for s in stats['statistics']:
                    stats_dict = self._convert_to_dict(s, stats['columns'])
                    ts_start = timeutils.parse_isotime(
                        stats_dict['timestamp']).replace(tzinfo=None)
                    ts_end = (ts_start + datetime.timedelta(
                        0, period)).replace(tzinfo=None)
                    del stats_dict['timestamp']
                    if 'count' in stats_dict:
                        stats_dict['count'] = int(stats_dict['count'])

                    if aggregate:
                        stats_dict['aggregate'] = {}
                        for a in aggregate:
                            key = '%s%s' % (a.func, '/%s' % a.param if a.param
                                            else '')
                            stats_dict['aggregate'][key] = stats_dict.get(key)

                    yield api_models.Statistics(
                        unit=stats['dimensions'].get('unit'),
                        period=period,
                        period_start=ts_start,
                        period_end=ts_end,
                        duration=period,
                        duration_start=ts_start,
                        duration_end=ts_end,
                        groupby={u'': u''},
                        **stats_dict
                    )

    def _parse_to_filter_list(self, filter_expr):
        """Parse complex query expression to simple filter list.

        For i.e. parse:
            {"or":[{"=":{"meter":"cpu"}},{"=":{"meter":"memory"}}]}
        to
            [[{"=":{"counter_name":"cpu"}}],
             [{"=":{"counter_name":"memory"}}]]
        """
        op, nodes = filter_expr.items()[0]
        msg = "%s operand is not supported" % op

        if op == 'or':
            filter_list = []
            for node in nodes:
                filter_list.extend(self._parse_to_filter_list(node))
            return filter_list
        elif op == 'and':
            filter_list_subtree = []
            for node in nodes:
                filter_list_subtree.append(self._parse_to_filter_list(node))
            filter_list = [[]]
            for filters in filter_list_subtree:
                tmp = []
                for filter in filters:
                    for f in filter_list:
                        tmp.append(f + filter)
                filter_list = tmp
            return filter_list
        elif op == 'not':
            raise ceilometer.NotImplementedError(msg)
        elif op in ("<", "<=", "=", ">=", ">", '!='):
            return [[filter_expr]]
        else:
            raise ceilometer.NotImplementedError(msg)

    def _parse_to_sample_filter(self, simple_filters):
        """Parse to simple filters to sample filter.

        For i.e.: parse
            [{"=":{"counter_name":"cpu"}},{"=":{"counter_volume": 1}}]
        to
            SampleFilter(counter_name="cpu", counter_volume=1)
        """
        equal_only_fields = (
            'counter_name',
            'counter_unit',
            'counter_type',
            'project_id',
            'user_id',
            'source',
            'resource_id',
            # These fields are supported by Ceilometer but cannot supported
            # by Monasca.
            # 'message_id',
            # 'message_signature',
            # 'recorded_at',
        )
        field_map = {
            "project_id": "project",
            "user_id": "user",
            "resource_id": "resource",
            "counter_name": "meter",
            "counter_type": "type",
            "counter_unit": "unit",
        }
        msg = "operand %s cannot be applied to field %s"
        kwargs = {'metaquery': {}}
        for sf in simple_filters:
            op = sf.keys()[0]
            field, value = sf.values()[0].items()[0]
            if field in equal_only_fields:
                if op != '=':
                    raise ceilometer.NotImplementedError(msg % (op, field))
                field = field_map.get(field, field)
                kwargs[field] = value
            elif field == 'timestamp':
                if op == '>=':
                    kwargs['start_timestamp'] = value
                    kwargs['start_timestamp_op'] = 'ge'
                elif op == '<=':
                    kwargs['end_timestamp'] = value
                    kwargs['end_timestamp_op'] = 'le'
                else:
                    raise ceilometer.NotImplementedError(msg % (op, field))
            elif field == 'counter_volume':
                kwargs['volume'] = value
                kwargs['volume_op'] = op
            elif (field.startswith('resource_metadata.') or
                  field.startswith('metadata.')):
                kwargs['metaquery'][field] = value
            else:
                ra_msg = "field %s is not supported" % field
                raise ceilometer.NotImplementedError(ra_msg)
        sample_type = kwargs.pop('type', None)
        sample_unit = kwargs.pop('unit', None)
        sample_volume = kwargs.pop('volume', None)
        sample_volume_op = kwargs.pop('volume_op', None)
        sample_filter = storage.SampleFilter(**kwargs)
        # Add some dynamic attributes, type and unit attributes can be used
        # when query Monasca API, volume and volime_op attributes can
        # be used for volume comparison.
        sample_filter.type = sample_type
        sample_filter.unit = sample_unit
        sample_filter.volume = sample_volume
        sample_filter.volume_op = sample_volume_op
        return sample_filter

    def _parse_to_sample_filters(self, filter_expr):
        """Parse complex query expression to sample filter list."""
        filter_list = self._parse_to_filter_list(filter_expr)
        sample_filters = []
        for filters in filter_list:
            sf = self._parse_to_sample_filter(filters)
            if sf:
                sample_filters.append(sf)
        return sample_filters

    def _validate_samples_by_volume(self, samples, sf):
        if not sf.volume:
            return samples

        op_func_map = {
            '<': operator.lt,
            '<=': operator.le,
            '=': operator.eq,
            '>=': operator.ge,
            '>': operator.gt,
            '!=': operator.ne,
        }

        ret = []
        for s in samples:
            op_func = op_func_map[sf.volume_op]
            volume = getattr(s, 'volume', getattr(s, 'counter_volume', None))
            if op_func(volume, sf.volume):
                ret.append(s)
        return ret

    def query_samples(self, filter_expr=None, orderby=None, limit=None):
        if not filter_expr:
            msg = "fitler must be specified"
            raise ceilometer.NotImplementedError(msg)
        if orderby:
            msg = "orderby is not supported"
            raise ceilometer.NotImplementedError(msg)
        if not limit:
            msg = "limit must be specified"
            raise ceilometer.NotImplementedError(msg)

        LOG.debug("filter_expr = %s", filter_expr)
        sample_filters = self._parse_to_sample_filters(filter_expr)
        LOG.debug("sample_filters = %s", sample_filters)

        ret = []
        for sf in sample_filters:
            if not sf.volume:
                samples = list(self.get_samples(sf, limit))
            else:
                samples = self.get_samples(sf)
                samples = list(self._validate_samples_by_volume(samples, sf))

            if limit <= len(samples):
                ret.extend(samples[0:limit])
                break
            else:
                ret.extend(samples)
                limit -= len(samples)

        return ret
コード例 #10
0
class Connection(base.Connection):
    CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
                                       AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = utils.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def __init__(self, url):
        self.mc = monasca_client.Client(netutils.urlsplit(url))
        self.mon_filter = MonascaDataFilter()

    @staticmethod
    def _convert_to_dict(stats, cols):
        return {c: stats[i] for i, c in enumerate(cols)}

    def _convert_metaquery(self, metaquery):
        """Strip "metadata." from key and convert value to string

        :param metaquery:  { 'metadata.KEY': VALUE, ... }
        :returns: converted metaquery
        """
        query = {}
        for k, v in metaquery.items():
            key = k.split('.')[1]
            if isinstance(v, basestring):
                query[key] = v
            else:
                query[key] = str(int(v))
        return query

    def _match_metaquery_to_value_meta(self, query, value_meta):
        """Check if metaquery matches value_meta

        :param query: metaquery with converted format
        :param value_meta: metadata from monasca
        :returns: True for matched, False for not matched
        """
        if (len(query) > 0 and
            (len(value_meta) == 0
             or not set(query.items()).issubset(set(value_meta.items())))):
            return False
        else:
            return True

    def upgrade(self):
        pass

    def clear(self):
        pass

    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(
            _('metering data %(counter_name)s for %(resource_id)s: '
              '%(counter_volume)s') % ({
                  'counter_name': data['counter_name'],
                  'resource_id': data['resource_id'],
                  'counter_volume': data['counter_volume']
              }))

        metric = self.mon_filter.process_sample_for_monasca(data)
        self.mc.metrics_create(**metric)

    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_("Dropping data with TTL %d"), ttl)

    def get_resources(self,
                      user=None,
                      project=None,
                      source=None,
                      start_timestamp=None,
                      start_timestamp_op=None,
                      end_timestamp=None,
                      end_timestamp_op=None,
                      metaquery=None,
                      resource=None,
                      pagination=None):
        """Return an iterable of dictionaries containing resource information.

        { 'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          'timestamp': UTC datetime of last update to the resource,
          'metadata': most current metadata for the resource,
          'meter': list of the meters reporting data for the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError('Pagination not implemented')

        q = {}
        if metaquery:
            q = self._convert_metaquery(metaquery)

        if start_timestamp_op and start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(
                ('Start time op %s '
                 'not implemented') % start_timestamp_op)

        if end_timestamp_op and end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(
                ('End time op %s '
                 'not implemented') % end_timestamp_op)

        if not start_timestamp:
            start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            start_timestamp = timeutils.isotime(start_timestamp)

        if end_timestamp:
            end_timestamp = timeutils.isotime(end_timestamp)

        dims_filter = dict(user_id=user,
                           project_id=project,
                           source=source,
                           resource_id=resource)
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        _search_args = dict(start_time=start_timestamp,
                            end_time=end_timestamp,
                            limit=1)

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        for metric in self.mc.metrics_list(**dict(dimensions=dims_filter)):
            _search_args['name'] = metric['name']
            _search_args['dimensions'] = metric['dimensions']
            try:
                for sample in self.mc.measurements_list(**_search_args):
                    d = sample['dimensions']
                    m = self._convert_to_dict(sample['measurements'][0],
                                              sample['columns'])
                    vm = m['value_meta']
                    if not self._match_metaquery_to_value_meta(q, vm):
                        continue
                    if d.get('resource_id'):
                        yield api_models.Resource(
                            resource_id=d.get('resource_id'),
                            first_sample_timestamp=(timeutils.parse_isotime(
                                m['timestamp'])),
                            last_sample_timestamp=timeutils.utcnow(),
                            project_id=d.get('project_id'),
                            source=d.get('source'),
                            user_id=d.get('user_id'),
                            metadata=m['value_meta'],
                        )
            except monasca_exc.HTTPConflict:
                pass

    def get_meters(self,
                   user=None,
                   project=None,
                   resource=None,
                   source=None,
                   limit=None,
                   metaquery=None,
                   pagination=None):
        """Return an iterable of dictionaries containing meter information.

        { 'name': name of the meter,
          'type': type of the meter (gauge, delta, cumulative),
          'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param resource: Optional resource filter.
        :param source: Optional source filter.
        :param limit: Maximum number of results to return.
        :param metaquery: Optional dict with metadata to match on.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError('Pagination not implemented')

        if metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        _dimensions = dict(user_id=user,
                           project_id=project,
                           resource_id=resource,
                           source=source)

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_kwargs = {'dimensions': _dimensions}

        if limit:
            _search_kwargs['limit'] = limit

        for metric in self.mc.metrics_list(**_search_kwargs):
            yield api_models.Meter(
                name=metric['name'],
                type=metric['dimensions'].get('type') or 'cumulative',
                unit=metric['dimensions'].get('unit'),
                resource_id=metric['dimensions'].get('resource_id'),
                project_id=metric['dimensions'].get('project_id'),
                source=metric['dimensions'].get('source'),
                user_id=metric['dimensions'].get('user_id'))

    def get_samples(self, sample_filter, limit=None):
        """Return an iterable of dictionaries containing sample information.

        {
          'source': source of the resource,
          'counter_name': name of the resource,
          'counter_type': type of the sample (gauge, delta, cumulative),
          'counter_unit': unit of the sample,
          'counter_volume': volume of the sample,
          'user_id': UUID of user owning the resource,
          'project_id': UUID of project owning the resource,
          'resource_id': UUID of the resource,
          'timestamp': timestamp of the sample,
          'resource_metadata': metadata of the sample,
          'message_id': message ID of the sample,
          'message_signature': message signature of the sample,
          'recorded_at': time the sample was recorded
          }

        :param sample_filter: constraints for the sample search.
        :param limit: Maximum number of results to return.
        """

        if not sample_filter or not sample_filter.meter:
            raise ceilometer.NotImplementedError(
                "Supply meter name at the least")

        if (sample_filter.start_timestamp_op
                and sample_filter.start_timestamp_op != 'ge'):
            raise ceilometer.NotImplementedError(
                ('Start time op %s '
                 'not implemented') % sample_filter.start_timestamp_op)

        if (sample_filter.end_timestamp_op
                and sample_filter.end_timestamp_op != 'le'):
            raise ceilometer.NotImplementedError(
                ('End time op %s '
                 'not implemented') % sample_filter.end_timestamp_op)

        q = {}
        if sample_filter.metaquery:
            q = self._convert_metaquery(sample_filter.metaquery)

        if sample_filter.message_id:
            raise ceilometer.NotImplementedError('message_id not '
                                                 'implemented '
                                                 'in get_samples')

        if not sample_filter.start_timestamp:
            sample_filter.start_timestamp = \
                timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            sample_filter.start_timestamp = \
                timeutils.isotime(sample_filter.start_timestamp)

        if sample_filter.end_timestamp:
            sample_filter.end_timestamp = \
                timeutils.isotime(sample_filter.end_timestamp)

        _dimensions = dict(user_id=sample_filter.user,
                           project_id=sample_filter.project,
                           resource_id=sample_filter.resource,
                           source=sample_filter.source)

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_args = dict(
            name=sample_filter.meter,
            start_time=sample_filter.start_timestamp,
            start_timestamp_op=(sample_filter.start_timestamp_op),
            end_time=sample_filter.end_timestamp,
            end_timestamp_op=sample_filter.end_timestamp_op,
            limit=limit,
            merge_metrics=True,
            dimensions=_dimensions)

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        for sample in self.mc.measurements_list(**_search_args):
            LOG.debug(_('Retrieved sample: %s'), sample)

            d = sample['dimensions']
            for measurement in sample['measurements']:
                meas_dict = self._convert_to_dict(measurement,
                                                  sample['columns'])
                vm = meas_dict['value_meta']
                if not self._match_metaquery_to_value_meta(q, vm):
                    continue
                yield api_models.Sample(
                    source=d.get('source'),
                    counter_name=sample['name'],
                    counter_type=d.get('type'),
                    counter_unit=d.get('unit'),
                    counter_volume=meas_dict['value'],
                    user_id=d.get('user_id'),
                    project_id=d.get('project_id'),
                    resource_id=d.get('resource_id'),
                    timestamp=timeutils.parse_isotime(meas_dict['timestamp']),
                    resource_metadata=meas_dict['value_meta'],
                    message_id=sample['id'],
                    message_signature='',
                    recorded_at=(timeutils.parse_isotime(
                        meas_dict['timestamp'])))

    def get_meter_statistics(self,
                             filter,
                             period=None,
                             groupby=None,
                             aggregate=None):
        """Return a dictionary containing meter statistics.

        Meter statistics is described by the query parameters.
        The filter must have a meter value set.

        { 'min':
          'max':
          'avg':
          'sum':
          'count':
          'period':
          'period_start':
          'period_end':
          'duration':
          'duration_start':
          'duration_end':
          }
        """
        if filter:
            if not filter.meter:
                raise ceilometer.NotImplementedError('Query without meter '
                                                     'not implemented')
        else:
            raise ceilometer.NotImplementedError('Query without filter '
                                                 'not implemented')

        if groupby:
            raise ceilometer.NotImplementedError('Groupby not implemented')

        if filter.metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        if filter.message_id:
            raise ceilometer.NotImplementedError('Message_id query '
                                                 'not implemented')

        if filter.start_timestamp_op and filter.start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(
                ('Start time op %s '
                 'not implemented') % filter.start_timestamp_op)

        if filter.end_timestamp_op and filter.end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(
                ('End time op %s '
                 'not implemented') % filter.end_timestamp_op)

        if not filter.start_timestamp:
            filter.start_timestamp = timeutils.isotime(
                datetime.datetime(1970, 1, 1))

        # TODO(monasca): Add this a config parameter
        allowed_stats = ['avg', 'min', 'max', 'sum', 'count']
        if aggregate:
            not_allowed_stats = [
                a.func for a in aggregate if a.func not in allowed_stats
            ]
            if not_allowed_stats:
                raise ceilometer.NotImplementedError(
                    ('Aggregate function(s) '
                     '%s not implemented') % not_allowed_stats)

            statistics = [a.func for a in aggregate if a.func in allowed_stats]
        else:
            statistics = allowed_stats

        dims_filter = dict(user_id=filter.user,
                           project_id=filter.project,
                           source=filter.source,
                           resource_id=filter.resource)
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        period = period if period \
            else cfg.CONF.monasca.default_stats_period

        _search_args = dict(name=filter.meter,
                            dimensions=dims_filter,
                            start_time=filter.start_timestamp,
                            end_time=filter.end_timestamp,
                            period=period,
                            statistics=','.join(statistics),
                            merge_metrics=True)

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        stats_list = self.mc.statistics_list(**_search_args)
        for stats in stats_list:
            for s in stats['statistics']:
                stats_dict = self._convert_to_dict(s, stats['columns'])
                ts_start = timeutils.parse_isotime(stats_dict['timestamp'])
                ts_end = ts_start + datetime.timedelta(0, period)
                del stats_dict['timestamp']
                if 'count' in stats_dict:
                    stats_dict['count'] = int(stats_dict['count'])
                yield api_models.Statistics(
                    unit=stats['dimensions'].get('unit'),
                    period=period,
                    period_start=ts_start,
                    period_end=ts_end,
                    duration=period,
                    duration_start=ts_start,
                    duration_end=ts_end,
                    groupby={u'': u''},
                    **stats_dict)
コード例 #11
0
class Connection(base.Connection):
    CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
                                       AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = utils.update_nested(
        base.Connection.STORAGE_CAPABILITIES,
        AVAILABLE_STORAGE_CAPABILITIES,
    )

    def __init__(self, url):
        self.mc = monasca_client.Client(netutils.urlsplit(url))
        self.mon_filter = MonascaDataFilter()

    @staticmethod
    def _convert_to_dict(stats, cols):
        return {c: stats[i] for i, c in enumerate(cols)}

    def _convert_metaquery(self, metaquery):
        """Strip "metadata." from key and convert value to string

        :param metaquery:  { 'metadata.KEY': VALUE, ... }
        :returns: converted metaquery
        """
        query = {}
        for k, v in metaquery.items():
            key = k.split('.')[1]
            if isinstance(v, basestring):
                query[key] = v
            else:
                query[key] = str(int(v))
        return query

    def _match_metaquery_to_value_meta(self, query, value_meta):
        """Check if metaquery matches value_meta

        :param query: metaquery with converted format
        :param value_meta: metadata from monasca
        :returns: True for matched, False for not matched
        """
        if (len(query) > 0 and
           (len(value_meta) == 0 or
           not set(query.items()).issubset(set(value_meta.items())))):
            return False
        else:
            return True

    def upgrade(self):
        pass

    def clear(self):
        pass

    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(_('metering data %(counter_name)s for %(resource_id)s: '
                   '%(counter_volume)s')
                 % ({'counter_name': data['counter_name'],
                     'resource_id': data['resource_id'],
                     'counter_volume': data['counter_volume']}))

        metric = self.mon_filter.process_sample_for_monasca(data)
        self.mc.metrics_create(**metric)

    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_("Dropping data with TTL %d"), ttl)

    def get_resources(self, user=None, project=None, source=None,
                      start_timestamp=None, start_timestamp_op=None,
                      end_timestamp=None, end_timestamp_op=None,
                      metaquery=None, resource=None, pagination=None):
        """Return an iterable of dictionaries containing resource information.

        { 'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          'timestamp': UTC datetime of last update to the resource,
          'metadata': most current metadata for the resource,
          'meter': list of the meters reporting data for the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError('Pagination not implemented')

        q = {}
        if metaquery:
            q = self._convert_metaquery(metaquery)

        if start_timestamp_op and start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 start_timestamp_op)

        if end_timestamp_op and end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 end_timestamp_op)

        if not start_timestamp:
            start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            start_timestamp = timeutils.isotime(start_timestamp)

        if end_timestamp:
            end_timestamp = timeutils.isotime(end_timestamp)

        dims_filter = dict(user_id=user,
                           project_id=project,
                           source=source,
                           resource_id=resource
                           )
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        _search_args = dict(
            start_time=start_timestamp,
            end_time=end_timestamp,
            limit=1)

        _search_args = {k: v for k, v in _search_args.items()
                        if v is not None}

        for metric in self.mc.metrics_list(
                **dict(dimensions=dims_filter)):
            _search_args['name'] = metric['name']
            _search_args['dimensions'] = metric['dimensions']
            try:
                for sample in self.mc.measurements_list(**_search_args):
                    d = sample['dimensions']
                    m = self._convert_to_dict(
                        sample['measurements'][0], sample['columns'])
                    vm = m['value_meta']
                    if not self._match_metaquery_to_value_meta(q, vm):
                        continue
                    if d.get('resource_id'):
                        yield api_models.Resource(
                            resource_id=d.get('resource_id'),
                            first_sample_timestamp=(
                                timeutils.parse_isotime(m['timestamp'])),
                            last_sample_timestamp=timeutils.utcnow(),
                            project_id=d.get('project_id'),
                            source=d.get('source'),
                            user_id=d.get('user_id'),
                            metadata=m['value_meta'],
                        )
            except monasca_exc.HTTPConflict:
                pass

    def get_meters(self, user=None, project=None, resource=None, source=None,
                   limit=None, metaquery=None, pagination=None):
        """Return an iterable of dictionaries containing meter information.

        { 'name': name of the meter,
          'type': type of the meter (gauge, delta, cumulative),
          'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param resource: Optional resource filter.
        :param source: Optional source filter.
        :param limit: Maximum number of results to return.
        :param metaquery: Optional dict with metadata to match on.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError('Pagination not implemented')

        if metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        _dimensions = dict(
            user_id=user,
            project_id=project,
            resource_id=resource,
            source=source
        )

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_kwargs = {'dimensions': _dimensions}

        if limit:
            _search_kwargs['limit'] = limit

        for metric in self.mc.metrics_list(**_search_kwargs):
            yield api_models.Meter(
                name=metric['name'],
                type=metric['dimensions'].get('type') or 'cumulative',
                unit=metric['dimensions'].get('unit'),
                resource_id=metric['dimensions'].get('resource_id'),
                project_id=metric['dimensions'].get('project_id'),
                source=metric['dimensions'].get('source'),
                user_id=metric['dimensions'].get('user_id'))

    def get_measurements(self, result_queue, metric_name, metric_dimensions,
                         meta_q, start_ts, end_ts, start_op, end_op, limit):

        start_ts = timeutils.isotime(start_ts)
        end_ts = timeutils.isotime(end_ts)

        _search_args = dict(name=metric_name,
                            start_time=start_ts,
                            start_timestamp_op=start_op,
                            end_time=end_ts,
                            end_timestamp_op=end_op,
                            merge_metrics=False,
                            limit=limit,
                            dimensions=metric_dimensions)

        _search_args = {k: v for k, v in _search_args.items()
                        if v is not None}

        for sample in self.mc.measurements_list(**_search_args):
            LOG.debug(_('Retrieved sample: %s'), sample)

            d = sample['dimensions']
            for measurement in sample['measurements']:
                meas_dict = self._convert_to_dict(measurement,
                                                  sample['columns'])
                vm = meas_dict['value_meta']
                if not self._match_metaquery_to_value_meta(meta_q, vm):
                    continue
                result_queue.put(api_models.Sample(
                    source=d.get('source'),
                    counter_name=sample['name'],
                    counter_type=d.get('type'),
                    counter_unit=d.get('unit'),
                    counter_volume=meas_dict['value'],
                    user_id=d.get('user_id'),
                    project_id=d.get('project_id'),
                    resource_id=d.get('resource_id'),
                    timestamp=timeutils.parse_isotime(meas_dict['timestamp']),
                    resource_metadata=meas_dict['value_meta'],
                    message_id=sample['id'],
                    message_signature='',
                    recorded_at=(
                        timeutils.parse_isotime(meas_dict['timestamp']))))

    def get_next_time_delta(self, start, end, delta):
        # Gets next time window
        curr = start
        while curr < end:
            next = curr + delta
            yield curr, next
            curr = next

    def get_next_task_args(self, sample_filter, delta, **kwargs):
        # Yields next set of measurement related args
        metrics = self.mc.metrics_list(**kwargs)
        for start, end in self.get_next_time_delta(
                sample_filter.start_timestamp,
                sample_filter.end_timestamp,
                delta):
            for metric in metrics:
                task = {'metric': metric['name'],
                        'dimension': metric['dimensions'],
                        'start_ts': start,
                        'end_ts': end}
                LOG.debug(_('next task is : %s'), task)
                yield task

    def has_more_results(self, result_queue, t_pool):
        if result_queue.empty() and t_pool.pool.running() == 0:
            return False
        return True

    def fetch_from_queue(self, result_queue, t_pool):
        # Fetches result from queue in non-blocking way
        try:
            result = result_queue.get_nowait()
            LOG.debug(_('Retrieved result : %s'), result)
            return result
        except Empty:
            # if no data in queue, yield to work threads
            # to give them a chance
            if t_pool.pool.running() > 0:
                eventlet.sleep(0)

    def get_results(self, result_queue, t_pool, limit=None, result_count=None):
        # Inspect and yield results
        if limit:
            while result_count < limit:
                if not self.has_more_results(result_queue, t_pool):
                    break
                result = self.fetch_from_queue(result_queue, t_pool)
                if result:
                    yield result
                    result_count += 1
        else:
            while True:
                if not self.has_more_results(result_queue, t_pool):
                    break
                result = self.fetch_from_queue(result_queue, t_pool)
                if result:
                    yield result

    def get_samples(self, sample_filter, limit=None):
        """Return an iterable of dictionaries containing sample information.

        {
          'source': source of the resource,
          'counter_name': name of the resource,
          'counter_type': type of the sample (gauge, delta, cumulative),
          'counter_unit': unit of the sample,
          'counter_volume': volume of the sample,
          'user_id': UUID of user owning the resource,
          'project_id': UUID of project owning the resource,
          'resource_id': UUID of the resource,
          'timestamp': timestamp of the sample,
          'resource_metadata': metadata of the sample,
          'message_id': message ID of the sample,
          'message_signature': message signature of the sample,
          'recorded_at': time the sample was recorded
          }

        :param sample_filter: constraints for the sample search.
        :param limit: Maximum number of results to return.
        """
        # Initialize pool of green work threads and queue to handle results
        thread_pool = threadgroup.ThreadGroup(
            thread_pool_size=cfg.CONF.monasca.query_concurrency_limit)
        result_queue = eventlet.queue.Queue()

        if not sample_filter or not sample_filter.meter:
            raise ceilometer.NotImplementedError(
                "Supply meter name at the least")

        if (sample_filter.start_timestamp_op and
                sample_filter.start_timestamp_op != 'ge'):
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 sample_filter.
                                                 start_timestamp_op)

        if (sample_filter.end_timestamp_op and
                sample_filter.end_timestamp_op != 'le'):
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 sample_filter.
                                                 end_timestamp_op)

        q = {}
        if sample_filter.metaquery:
            q = self._convert_metaquery(sample_filter.metaquery)

        if sample_filter.message_id:
            raise ceilometer.NotImplementedError('message_id not '
                                                 'implemented '
                                                 'in get_samples')

        if not sample_filter.start_timestamp:
            sample_filter.start_timestamp = datetime.datetime(1970, 1, 1)

        if not sample_filter.end_timestamp:
            sample_filter.end_timestamp = datetime.datetime.utcnow()

        delta = sample_filter.end_timestamp - sample_filter.start_timestamp
        delta = delta / cfg.CONF.monasca.query_concurrency_limit

        _dimensions = dict(
            user_id=sample_filter.user,
            project_id=sample_filter.project,
            resource_id=sample_filter.resource,
            source=sample_filter.source
        )

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _metric_args = dict(name=sample_filter.meter,
                            dimensions=_dimensions)

        if limit:
            result_count = 0

        for task_cnt, task in enumerate(self.get_next_task_args(
                sample_filter, delta, **_metric_args)):
            # Spawn query_concurrency_limit number of green threads
            # simultaneously to fetch measurements
            thread_pool.add_thread(self.get_measurements,
                                   result_queue,
                                   task['metric'],
                                   task['dimension'],
                                   q,
                                   task['start_ts'],
                                   task['end_ts'],
                                   sample_filter.start_timestamp_op,
                                   sample_filter.end_timestamp_op,
                                   limit)
            # For every query_conncurrency_limit set of tasks,
            # consume data from queue and yield before moving on to
            # next set of tasks.
            if (task_cnt + 1) % cfg.CONF.monasca.query_concurrency_limit == 0:
                for result in self.get_results(result_queue, thread_pool,
                                               limit,
                                               result_count=result_count if
                                               limit else None):
                    yield result

        # Shutdown threadpool
        thread_pool.stop()

    def get_meter_statistics(self, filter, period=None, groupby=None,
                             aggregate=None):
        """Return a dictionary containing meter statistics.

        Meter statistics is described by the query parameters.
        The filter must have a meter value set.

        { 'min':
          'max':
          'avg':
          'sum':
          'count':
          'period':
          'period_start':
          'period_end':
          'duration':
          'duration_start':
          'duration_end':
          }
        """
        if filter:
            if not filter.meter:
                raise ceilometer.NotImplementedError('Query without meter '
                                                     'not implemented')
        else:
            raise ceilometer.NotImplementedError('Query without filter '
                                                 'not implemented')

        if groupby:
            raise ceilometer.NotImplementedError('Groupby not implemented')

        if filter.metaquery:
            raise ceilometer.NotImplementedError('Metaquery not implemented')

        if filter.message_id:
            raise ceilometer.NotImplementedError('Message_id query '
                                                 'not implemented')

        if filter.start_timestamp_op and filter.start_timestamp_op != 'ge':
            raise ceilometer.NotImplementedError(('Start time op %s '
                                                  'not implemented') %
                                                 filter.start_timestamp_op)

        if filter.end_timestamp_op and filter.end_timestamp_op != 'le':
            raise ceilometer.NotImplementedError(('End time op %s '
                                                  'not implemented') %
                                                 filter.end_timestamp_op)

        if not filter.start_timestamp:
            filter.start_timestamp = timeutils.isotime(
                datetime.datetime(1970, 1, 1))

        # TODO(monasca): Add this a config parameter
        allowed_stats = ['avg', 'min', 'max', 'sum', 'count']
        if aggregate:
            not_allowed_stats = [a.func for a in aggregate
                                 if a.func not in allowed_stats]
            if not_allowed_stats:
                raise ceilometer.NotImplementedError(('Aggregate function(s) '
                                                      '%s not implemented') %
                                                     not_allowed_stats)

            statistics = [a.func for a in aggregate
                          if a.func in allowed_stats]
        else:
            statistics = allowed_stats

        dims_filter = dict(user_id=filter.user,
                           project_id=filter.project,
                           source=filter.source,
                           resource_id=filter.resource
                           )
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        period = period if period \
            else cfg.CONF.monasca.default_stats_period

        _search_args = dict(
            name=filter.meter,
            dimensions=dims_filter,
            start_time=filter.start_timestamp,
            end_time=filter.end_timestamp,
            period=period,
            statistics=','.join(statistics),
            merge_metrics=True)

        _search_args = {k: v for k, v in _search_args.items()
                        if v is not None}

        stats_list = self.mc.statistics_list(**_search_args)
        for stats in stats_list:
            for s in stats['statistics']:
                stats_dict = self._convert_to_dict(s, stats['columns'])
                ts_start = timeutils.parse_isotime(stats_dict['timestamp'])
                ts_end = ts_start + datetime.timedelta(0, period)
                del stats_dict['timestamp']
                if 'count' in stats_dict:
                    stats_dict['count'] = int(stats_dict['count'])
                yield api_models.Statistics(
                    unit=stats['dimensions'].get('unit'),
                    period=period,
                    period_start=ts_start,
                    period_end=ts_end,
                    duration=period,
                    duration_start=ts_start,
                    duration_end=ts_end,
                    groupby={u'': u''},
                    **stats_dict
                )
コード例 #12
0
class Connection(base.Connection):
    CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES)
    STORAGE_CAPABILITIES = utils.update_nested(base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES)

    def __init__(self, url):
        self.mc = monasca_client.Client(netutils.urlsplit(url))
        self.mon_filter = MonascaDataFilter()

    @staticmethod
    def _convert_to_dict(stats, cols):
        return {c: stats[i] for i, c in enumerate(cols)}

    def upgrade(self):
        pass

    def clear(self):
        pass

    def record_metering_data(self, data):
        """Write the data to the backend storage system.

        :param data: a dictionary such as returned by
                     ceilometer.meter.meter_message_from_counter.
        """
        LOG.info(
            _("metering data %(counter_name)s for %(resource_id)s: " "%(counter_volume)s")
            % (
                {
                    "counter_name": data["counter_name"],
                    "resource_id": data["resource_id"],
                    "counter_volume": data["counter_volume"],
                }
            )
        )

        metric = self.mon_filter.process_sample_for_monasca(data)
        self.mc.metrics_create(**metric)

    def clear_expired_metering_data(self, ttl):
        """Clear expired data from the backend storage system.

        Clearing occurs according to the time-to-live.
        :param ttl: Number of seconds to keep records for.
        """
        LOG.info(_("Dropping data with TTL %d"), ttl)

    def get_resources(
        self,
        user=None,
        project=None,
        source=None,
        start_timestamp=None,
        start_timestamp_op=None,
        end_timestamp=None,
        end_timestamp_op=None,
        metaquery=None,
        resource=None,
        pagination=None,
        limit=None,
    ):
        """Return an iterable of dictionaries containing resource information.

        { 'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          'timestamp': UTC datetime of last update to the resource,
          'metadata': most current metadata for the resource,
          'meter': list of the meters reporting data for the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param source: Optional source filter.
        :param start_timestamp: Optional modified timestamp start range.
        :param start_timestamp_op: Optional start time operator, like gt, ge.
        :param end_timestamp: Optional modified timestamp end range.
        :param end_timestamp_op: Optional end time operator, like lt, le.
        :param metaquery: Optional dict with metadata to match on.
        :param resource: Optional resource filter.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError("Pagination not implemented")

        if metaquery:
            raise ceilometer.NotImplementedError("Metaquery not implemented")

        if start_timestamp_op and start_timestamp_op != "ge":
            raise ceilometer.NotImplementedError(("Start time op %s " "not implemented") % start_timestamp_op)

        if end_timestamp_op and end_timestamp_op != "le":
            raise ceilometer.NotImplementedError(("End time op %s " "not implemented") % end_timestamp_op)

        if not start_timestamp:
            start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            start_timestamp = timeutils.isotime(start_timestamp)

        if end_timestamp:
            end_timestamp = timeutils.isotime(end_timestamp)

        dims_filter = dict(user_id=user, project_id=project, source=source, resource_id=resource)
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        _search_args = dict(start_time=start_timestamp, end_time=end_timestamp, limit=1)

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        for metric in self.mc.metrics_list(**dict(dimensions=dims_filter)):
            _search_args["name"] = metric["name"]
            _search_args["dimensions"] = metric["dimensions"]
            try:
                for sample in self.mc.measurements_list(**_search_args):
                    d = sample["dimensions"]
                    m = self._convert_to_dict(sample["measurements"][0], sample["columns"])
                    if d.get("resource_id"):
                        yield api_models.Resource(
                            resource_id=d.get("resource_id"),
                            first_sample_timestamp=(timeutils.parse_isotime(m["timestamp"])),
                            last_sample_timestamp=timeutils.utcnow(),
                            project_id=d.get("project_id"),
                            source=d.get("source"),
                            user_id=d.get("user_id"),
                            metadata=m["value_meta"],
                        )
            except monasca_exc.HTTPConflict:
                pass

    def get_meters(
        self, user=None, project=None, resource=None, source=None, limit=None, metaquery=None, pagination=None
    ):
        """Return an iterable of dictionaries containing meter information.

        { 'name': name of the meter,
          'type': type of the meter (gauge, delta, cumulative),
          'resource_id': UUID of the resource,
          'project_id': UUID of project owning the resource,
          'user_id': UUID of user owning the resource,
          }

        :param user: Optional ID for user that owns the resource.
        :param project: Optional ID for project that owns the resource.
        :param resource: Optional resource filter.
        :param source: Optional source filter.
        :param limit: Maximum number of results to return.
        :param metaquery: Optional dict with metadata to match on.
        :param pagination: Optional pagination query.
        """
        if pagination:
            raise ceilometer.NotImplementedError("Pagination not implemented")

        if metaquery:
            raise ceilometer.NotImplementedError("Metaquery not implemented")

        _dimensions = dict(user_id=user, project_id=project, resource_id=resource, source=source)

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_kwargs = {"dimensions": _dimensions}

        if limit:
            _search_kwargs["limit"] = limit

        for metric in self.mc.metrics_list(**_search_kwargs):
            yield api_models.Meter(
                name=metric["name"],
                type=metric["dimensions"].get("type") or "cumulative",
                unit=metric["dimensions"].get("unit"),
                resource_id=metric["dimensions"].get("resource_id"),
                project_id=metric["dimensions"].get("project_id"),
                source=metric["dimensions"].get("source"),
                user_id=metric["dimensions"].get("user_id"),
            )

    def get_samples(self, sample_filter, limit=None):
        """Return an iterable of dictionaries containing sample information.

        {
          'source': source of the resource,
          'counter_name': name of the resource,
          'counter_type': type of the sample (gauge, delta, cumulative),
          'counter_unit': unit of the sample,
          'counter_volume': volume of the sample,
          'user_id': UUID of user owning the resource,
          'project_id': UUID of project owning the resource,
          'resource_id': UUID of the resource,
          'timestamp': timestamp of the sample,
          'resource_metadata': metadata of the sample,
          'message_id': message ID of the sample,
          'message_signature': message signature of the sample,
          'recorded_at': time the sample was recorded
          }

        :param sample_filter: constraints for the sample search.
        :param limit: Maximum number of results to return.
        """

        if not sample_filter or not sample_filter.meter:
            raise ceilometer.NotImplementedError("Supply meter name at the least")

        if sample_filter.start_timestamp_op and sample_filter.start_timestamp_op != "ge":
            raise ceilometer.NotImplementedError(
                ("Start time op %s " "not implemented") % sample_filter.start_timestamp_op
            )

        if sample_filter.end_timestamp_op and sample_filter.end_timestamp_op != "le":
            raise ceilometer.NotImplementedError(("End time op %s " "not implemented") % sample_filter.end_timestamp_op)

        if sample_filter.metaquery:
            raise ceilometer.NotImplementedError("metaquery not " "implemented " "in get_samples")

        if sample_filter.message_id:
            raise ceilometer.NotImplementedError("message_id not " "implemented " "in get_samples")

        if not sample_filter.start_timestamp:
            sample_filter.start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))
        else:
            sample_filter.start_timestamp = timeutils.isotime(sample_filter.start_timestamp)

        if sample_filter.end_timestamp:
            sample_filter.end_timestamp = timeutils.isotime(sample_filter.end_timestamp)

        _dimensions = dict(
            user_id=sample_filter.user,
            project_id=sample_filter.project,
            resource_id=sample_filter.resource,
            source=sample_filter.source,
        )

        _dimensions = {k: v for k, v in _dimensions.items() if v is not None}

        _search_args = dict(
            name=sample_filter.meter,
            start_time=sample_filter.start_timestamp,
            start_timestamp_op=(sample_filter.start_timestamp_op),
            end_time=sample_filter.end_timestamp,
            end_timestamp_op=sample_filter.end_timestamp_op,
            limit=limit,
            merge_metrics=True,
            dimensions=_dimensions,
        )

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        for sample in self.mc.measurements_list(**_search_args):
            LOG.debug(_("Retrieved sample: %s"), sample)

            d = sample["dimensions"]
            for measurement in sample["measurements"]:
                meas_dict = self._convert_to_dict(measurement, sample["columns"])

                yield api_models.Sample(
                    source=d.get("source"),
                    counter_name=sample["name"],
                    counter_type=d.get("type"),
                    counter_unit=d.get("unit"),
                    counter_volume=meas_dict["value"],
                    user_id=d.get("user_id"),
                    project_id=d.get("project_id"),
                    resource_id=d.get("resource_id"),
                    timestamp=timeutils.parse_isotime(meas_dict["timestamp"]),
                    resource_metadata=meas_dict["value_meta"],
                    message_id=sample["id"],
                    message_signature="",
                    recorded_at=(timeutils.parse_isotime(meas_dict["timestamp"])),
                )

    def get_meter_statistics(self, filter, period=None, groupby=None, aggregate=None):
        """Return a dictionary containing meter statistics.

        Meter statistics is described by the query parameters.
        The filter must have a meter value set.

        { 'min':
          'max':
          'avg':
          'sum':
          'count':
          'period':
          'period_start':
          'period_end':
          'duration':
          'duration_start':
          'duration_end':
          }
        """
        if filter:
            if not filter.meter:
                raise ceilometer.NotImplementedError("Query without meter " "not implemented")
        else:
            raise ceilometer.NotImplementedError("Query without filter " "not implemented")

        if groupby:
            raise ceilometer.NotImplementedError("Groupby not implemented")

        if filter.metaquery:
            raise ceilometer.NotImplementedError("Metaquery not implemented")

        if filter.message_id:
            raise ceilometer.NotImplementedError("Message_id query " "not implemented")

        if filter.start_timestamp_op and filter.start_timestamp_op != "ge":
            raise ceilometer.NotImplementedError(("Start time op %s " "not implemented") % filter.start_timestamp_op)

        if filter.end_timestamp_op and filter.end_timestamp_op != "le":
            raise ceilometer.NotImplementedError(("End time op %s " "not implemented") % filter.end_timestamp_op)

        if not filter.start_timestamp:
            filter.start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1))

        # TODO(monasca): Add this a config parameter
        allowed_stats = ["avg", "min", "max", "sum", "count"]
        if aggregate:
            not_allowed_stats = [a.func for a in aggregate if a.func not in allowed_stats]
            if not_allowed_stats:
                raise ceilometer.NotImplementedError(
                    ("Aggregate function(s) " "%s not implemented") % not_allowed_stats
                )

            statistics = [a.func for a in aggregate if a.func in allowed_stats]
        else:
            statistics = allowed_stats

        dims_filter = dict(
            user_id=filter.user, project_id=filter.project, source=filter.source, resource_id=filter.resource
        )
        dims_filter = {k: v for k, v in dims_filter.items() if v is not None}

        period = period if period else cfg.CONF.monasca.default_stats_period

        _search_args = dict(
            name=filter.meter,
            dimensions=dims_filter,
            start_time=filter.start_timestamp,
            end_time=filter.end_timestamp,
            period=period,
            statistics=",".join(statistics),
            merge_metrics=True,
        )

        _search_args = {k: v for k, v in _search_args.items() if v is not None}

        stats_list = self.mc.statistics_list(**_search_args)
        for stats in stats_list:
            for s in stats["statistics"]:
                stats_dict = self._convert_to_dict(s, stats["columns"])
                ts_start = timeutils.parse_isotime(stats_dict["timestamp"])
                ts_end = ts_start + datetime.timedelta(0, period)
                del stats_dict["timestamp"]
                if "count" in stats_dict:
                    stats_dict["count"] = int(stats_dict["count"])
                yield api_models.Statistics(
                    unit=stats["dimensions"].get("unit"),
                    period=period,
                    period_start=ts_start,
                    period_end=ts_end,
                    duration=period,
                    duration_start=ts_start,
                    duration_end=ts_end,
                    groupby={u"": u""},
                    **stats_dict
                )