Esempio n. 1
0
def _get_query_timestamps(args={}):
    # Determine the desired range, if any, from the
    # GET arguments. Set up the query range using
    # the specified offset.
    # [query_start ... start_timestamp ... end_timestamp ... query_end]
    search_offset = int(args.get('search_offset', 0))

    start_timestamp = args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    return dict(query_start=query_start,
                query_end=query_end,
                start_timestamp=start_timestamp,
                end_timestamp=end_timestamp,
                search_offset=search_offset,
                )
Esempio n. 2
0
    def statistics(self, q=[], period=None):
        """Computes the statistics of the samples in the time range given.

        :param q: Filter rules for the data to be returned.
        :param period: Returned result will be an array of statistics for a
                       period long of that number of seconds.

        """
        kwargs = _query_to_kwargs(q, storage.EventFilter.__init__)
        kwargs['meter'] = self._id
        f = storage.EventFilter(**kwargs)
        computed = pecan.request.storage_conn.get_meter_statistics(f, period)
        LOG.debug('computed value coming from %r', pecan.request.storage_conn)
        # Find the original timestamp in the query to use for clamping
        # the duration returned in the statistics.
        start = end = None
        for i in q:
            if i.field == 'timestamp' and i.op in ('lt', 'le'):
                end = timeutils.parse_isotime(i.value).replace(tzinfo=None)
            elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
                start = timeutils.parse_isotime(i.value).replace(tzinfo=None)

        return [
            Statistics(start_timestamp=start, end_timestamp=end, **c.as_dict())
            for c in computed
        ]
Esempio n. 3
0
def _get_query_timestamps(args={}):
    # Determine the desired range, if any, from the
    # GET arguments. Set up the query range using
    # the specified offset.
    # [query_start ... start_timestamp ... end_timestamp ... query_end]
    search_offset = int(args.get('search_offset', 0))

    start_timestamp = args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    return dict(query_start=query_start,
                query_end=query_end,
                start_timestamp=start_timestamp,
                end_timestamp=end_timestamp,
                search_offset=search_offset,
                )
Esempio n. 4
0
    def statistics(self, q=[], period=None):
        """Computes the statistics of the samples in the time range given.

        :param q: Filter rules for the data to be returned.
        :param period: Returned result will be an array of statistics for a
                       period long of that number of seconds.

        """
        kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)
        kwargs['meter'] = self._id
        f = storage.SampleFilter(**kwargs)
        computed = pecan.request.storage_conn.get_meter_statistics(f, period)
        LOG.debug('computed value coming from %r', pecan.request.storage_conn)
        # Find the original timestamp in the query to use for clamping
        # the duration returned in the statistics.
        start = end = None
        for i in q:
            if i.field == 'timestamp' and i.op in ('lt', 'le'):
                end = timeutils.parse_isotime(i.value).replace(tzinfo=None)
            elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
                start = timeutils.parse_isotime(i.value).replace(tzinfo=None)

        return [Statistics(start_timestamp=start,
                           end_timestamp=end,
                           **c.as_dict())
                for c in computed]
Esempio n. 5
0
    def statistics(self, q=[], groupby=[], period=None):
        """Computes the statistics of the samples in the time range given.

        :param q: Filter rules for the data to be returned.
        :param groupby: Fields for group by aggregation
        :param period: Returned result will be an array of statistics for a
                       period long of that number of seconds.
        """
        if period and period < 0:
            error = _("Period must be positive.")
            pecan.response.translatable_error = error
            raise wsme.exc.ClientSideError(error)

        kwargs = _query_to_kwargs(q, storage.SampleFilter.__init__)
        kwargs['meter'] = self._id
        f = storage.SampleFilter(**kwargs)
        g = _validate_groupby_fields(groupby)
        computed = pecan.request.storage_conn.get_meter_statistics(f,
                                                                   period,
                                                                   g)
        LOG.debug('computed value coming from %r', pecan.request.storage_conn)
        # Find the original timestamp in the query to use for clamping
        # the duration returned in the statistics.
        start = end = None
        for i in q:
            if i.field == 'timestamp' and i.op in ('lt', 'le'):
                end = timeutils.parse_isotime(i.value).replace(tzinfo=None)
            elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
                start = timeutils.parse_isotime(i.value).replace(tzinfo=None)

        return [Statistics(start_timestamp=start,
                           end_timestamp=end,
                           **c.as_dict())
                for c in computed]
Esempio n. 6
0
def sanitize_timestamp(timestamp):
    """Return a naive utc datetime object."""
    if not timestamp:
        return timestamp
    if not isinstance(timestamp, datetime.datetime):
        timestamp = timeutils.parse_isotime(timestamp)
    return timeutils.normalize_time(timestamp)
Esempio n. 7
0
    def record_metering_data(self, data):
        # We may have receive only one counter on the wire
        if not isinstance(data, list):
            data = [data]

        for meter in data:
            LOG.debug(
                _('metering data %(counter_name)s '
                  'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s') %
                ({
                    'counter_name': meter['counter_name'],
                    'resource_id': meter['resource_id'],
                    'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
                    'counter_volume': meter['counter_volume']
                }))
            if publisher_utils.verify_signature(
                    meter, self.conf.publisher.metering_secret):
                try:
                    # Convert the timestamp to a datetime instance.
                    # Storage engines are responsible for converting
                    # that value to something they can store.
                    if meter.get('timestamp'):
                        ts = timeutils.parse_isotime(meter['timestamp'])
                        meter['timestamp'] = timeutils.normalize_time(ts)
                    self.storage_conn.record_metering_data(meter)
                except Exception as err:
                    LOG.exception(_('Failed to record metering data: %s'), err)
            else:
                LOG.warning(
                    _('message signature invalid, discarding message: %r'),
                    meter)
Esempio n. 8
0
 def record_metering_data(self, context, data):
     """This method is triggered when metering data is
     cast from an agent.
     """
     #LOG.info('metering data: %r', data)
     LOG.info('metering data %s for %s @ %s: %s',
              data['counter_name'],
              data['resource_id'],
              data.get('timestamp', 'NO TIMESTAMP'),
              data['counter_volume'])
     if not meter.verify_signature(data, cfg.CONF.metering_secret):
         LOG.warning('message signature invalid, discarding message: %r',
                     data)
     else:
         try:
             # Convert the timestamp to a datetime instance.
             # Storage engines are responsible for converting
             # that value to something they can store.
             if data.get('timestamp'):
                 data['timestamp'] = timeutils.parse_isotime(
                     data['timestamp'],
                     )
             self.storage_conn.record_metering_data(data)
         except Exception as err:
             LOG.error('Failed to record metering data: %s', err)
             LOG.exception(err)
Esempio n. 9
0
def _list_resources(source=None, user=None, project=None,
                    start_timestamp=None, end_timestamp=None):
    """Return a list of resource identifiers.
    """
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
    resources = flask.request.storage_conn.get_resources(
        source=source,
        user=user,
        project=project,
        start_timestamp=start_timestamp,
        end_timestamp=end_timestamp,
        )
    return flask.jsonify(resources=list(resources))
Esempio n. 10
0
def sanitize_timestamp(timestamp):
    """Return a naive utc datetime object."""
    if not timestamp:
        return timestamp
    if not isinstance(timestamp, datetime.datetime):
        timestamp = timeutils.parse_isotime(timestamp)
    return timeutils.normalize_time(timestamp)
Esempio n. 11
0
    def handle_sample(self, context, counter, source):
        """Handle a sample, converting if necessary."""
        LOG.debug('handling counter %s', (counter,))
        key = counter.name + counter.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(counter.timestamp)
        self.cache[key] = (counter.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # we only allow negative deltas for noncumulative counters, whereas
            # for cumulative we assume that a reset has occurred in the interim
            # so that the current volume gives a lower bound on growth
            volume_delta = (counter.volume - prev_volume
                            if (prev_volume <= counter.volume or
                                counter.type != ceilocounter.TYPE_CUMULATIVE)
                            else counter.volume)
            rate_of_change = ((1.0 * volume_delta / time_delta)
                              if time_delta else 0.0)

            transformed = self._convert(counter, rate_of_change)
            LOG.debug(_('converted to: %s') % (transformed,))
            counter = self._keep(counter, transformed)
        elif self.replace:
            LOG.warn(_('dropping counter with no predecessor: %s') % counter)
            counter = None
        return counter
Esempio n. 12
0
    def record_metering_data(self, data):
        # We may have receive only one counter on the wire
        if not isinstance(data, list):
            data = [data]

        for meter in data:
            LOG.debug(_(
                'metering data %(counter_name)s '
                'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
                % ({'counter_name': meter['counter_name'],
                    'resource_id': meter['resource_id'],
                    'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
                    'counter_volume': meter['counter_volume']}))
            if publisher_utils.verify_signature(
                    meter,
                    self.conf.publisher.metering_secret):
                try:
                    # Convert the timestamp to a datetime instance.
                    # Storage engines are responsible for converting
                    # that value to something they can store.
                    if meter.get('timestamp'):
                        ts = timeutils.parse_isotime(meter['timestamp'])
                        meter['timestamp'] = timeutils.normalize_time(ts)
                    self.storage_conn.record_metering_data(meter)
                except Exception as err:
                    LOG.exception(_('Failed to record metering data: %s'),
                                  err)
            else:
                LOG.warning(_(
                    'message signature invalid, discarding message: %r'),
                    meter)
Esempio n. 13
0
    def handle_sample(self, context, counter):
        """Handle a sample, converting if necessary."""
        LOG.debug('handling counter %s', (counter, ))
        key = counter.name + counter.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(counter.timestamp)
        self.cache[key] = (counter.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # we only allow negative deltas for noncumulative counters, whereas
            # for cumulative we assume that a reset has occurred in the interim
            # so that the current volume gives a lower bound on growth
            volume_delta = (counter.volume - prev_volume if
                            (prev_volume <= counter.volume
                             or counter.type != sample.TYPE_CUMULATIVE) else
                            counter.volume)
            rate_of_change = ((1.0 * volume_delta /
                               time_delta) if time_delta else 0.0)

            counter = self._convert(counter, rate_of_change)
            LOG.debug(_('converted to: %s') % (counter, ))
        else:
            LOG.warn(
                _('dropping counter with no predecessor: %s') % (counter, ))
            counter = None
        return counter
Esempio n. 14
0
    def handle_sample(self, context, s):
        """Handle a sample, converting if necessary."""
        LOG.debug(_('handling sample %s'), (s,))
        key = s.name + s.resource_id
        prev = self.cache.get(key)
        timestamp = timeutils.parse_isotime(s.timestamp)
        self.cache[key] = (s.volume, timestamp)

        if prev:
            prev_volume = prev[0]
            prev_timestamp = prev[1]
            time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
            # we only allow negative deltas for noncumulative samples, whereas
            # for cumulative we assume that a reset has occurred in the interim
            # so that the current volume gives a lower bound on growth
            volume_delta = (s.volume - prev_volume
                            if (prev_volume <= s.volume or
                                s.type != sample.TYPE_CUMULATIVE)
                            else s.volume)
            rate_of_change = ((1.0 * volume_delta / time_delta)
                              if time_delta else 0.0)

            s = self._convert(s, rate_of_change)
            LOG.debug(_('converted to: %s'), (s,))
        else:
            LOG.warn(_('dropping sample with no predecessor: %s'),
                     (s,))
            s = None
        return s
Esempio n. 15
0
    def record_metering_data(self, context, data):
        """This method is triggered when metering data is
        cast from an agent.
        """
        # We may have receive only one counter on the wire
        if not isinstance(data, list):
            data = [data]

        for meter in data:
            LOG.info('metering data %s for %s @ %s: %s',
                     meter['counter_name'],
                     meter['resource_id'],
                     meter.get('timestamp', 'NO TIMESTAMP'),
                     meter['counter_volume'])
            if meter_api.verify_signature(meter, cfg.CONF.metering_secret):
                try:
                    # Convert the timestamp to a datetime instance.
                    # Storage engines are responsible for converting
                    # that value to something they can store.
                    if meter.get('timestamp'):
                        ts = timeutils.parse_isotime(meter['timestamp'])
                        meter['timestamp'] = timeutils.normalize_time(ts)
                    self.storage_conn.record_metering_data(meter)
                except Exception as err:
                    LOG.error('Failed to record metering data: %s', err)
                    LOG.exception(err)
            else:
                LOG.warning(
                    'message signature invalid, discarding message: %r',
                    meter)
Esempio n. 16
0
    def record_metering_data(self, context, data):
        """This method is triggered when metering data is
        cast from an agent.
        """
        # We may have receive only one counter on the wire
        if not isinstance(data, list):
            data = [data]

        for meter in data:
            LOG.info('metering data %s for %s @ %s: %s',
                     meter['counter_name'],
                     meter['resource_id'],
                     meter.get('timestamp', 'NO TIMESTAMP'),
                     meter['counter_volume'])
            if meter_api.verify_signature(meter, cfg.CONF.metering_secret):
                try:
                    # Convert the timestamp to a datetime instance.
                    # Storage engines are responsible for converting
                    # that value to something they can store.
                    if meter.get('timestamp'):
                        ts = timeutils.parse_isotime(meter['timestamp'])
                        meter['timestamp'] = timeutils.normalize_time(ts)
                    self.storage_conn.record_metering_data(meter)
                except Exception as err:
                    LOG.error('Failed to record metering data: %s', err)
                    LOG.exception(err)
            else:
                LOG.warning(
                    'message signature invalid, discarding message: %r',
                    meter)
Esempio n. 17
0
    def test_multiple_samples(self):
        """Send multiple samples.
        The usecase here is to reduce the chatter and send the counters
        at a slower cadence.
        """
        samples = []
        for x in range(6):
            dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
            s = {
                'counter_name': 'apples',
                'counter_type': 'gauge',
                'counter_unit': 'instance',
                'counter_volume': float(x * 3),
                'source': 'evil',
                'timestamp': dt.isoformat(),
                'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
                'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
                'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
                'resource_metadata': {
                    'name1': str(x),
                    'name2': str(x + 4)
                }
            }
            samples.append(s)

        data = self.post_json('/meters/apples/', samples)

        for x, s in enumerate(samples):
            # source is modified to include the project_id.
            s['source'] = '%s:%s' % (s['project_id'], s['source'])
            # Ignore message id that is randomly generated
            s['message_id'] = data.json[x]['message_id']

            # remove tzinfo to compare generated timestamp
            # with the provided one
            c = data.json[x]
            timestamp = timeutils.parse_isotime(c['timestamp'])
            c['timestamp'] = timestamp.replace(tzinfo=None).isoformat()

            # do the same on the pipeline
            msg = self.published[0][x]
            timestamp = timeutils.parse_isotime(msg['timestamp'])
            msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat()

            self.assertEqual(s, c)
            self.assertEqual(s, self.published[0][x])
Esempio n. 18
0
    def _extract_when(body):
        """Extract the generated datetime from the notification.
        """
        when = body.get('timestamp', body.get('_context_timestamp'))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
Esempio n. 19
0
 def convert_value(cls, trait_type, value):
     if trait_type is cls.INT_TYPE:
         return int(value)
     if trait_type is cls.FLOAT_TYPE:
         return float(value)
     if trait_type is cls.DATETIME_TYPE:
         return timeutils.normalize_time(timeutils.parse_isotime(value))
     return str(value)
Esempio n. 20
0
    def _extract_when(body):
        """Extract the generated datetime from the notification.
        """
        when = body.get('timestamp', body.get('_context_timestamp'))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
Esempio n. 21
0
 def convert_value(cls, trait_type, value):
     if trait_type is cls.INT_TYPE:
         return int(value)
     if trait_type is cls.FLOAT_TYPE:
         return float(value)
     if trait_type is cls.DATETIME_TYPE:
         return timeutils.normalize_time(timeutils.parse_isotime(value))
     return str(value)
    def test_multiple_samples(self):
        """Send multiple samples.

        The usecase here is to reduce the chatter and send the counters
        at a slower cadence.
        """
        samples = []
        for x in range(6):
            dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
            s = {'counter_name': 'apples',
                 'counter_type': 'gauge',
                 'counter_unit': 'instance',
                 'counter_volume': float(x * 3),
                 'source': 'evil',
                 'timestamp': dt.isoformat(),
                 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
                 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
                 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
                 'resource_metadata': {'name1': str(x),
                                       'name2': str(x + 4)}}
            samples.append(s)

        data = self.post_json('/meters/apples/', samples)

        for x, s in enumerate(samples):
            # source is modified to include the project_id.
            s['source'] = '%s:%s' % (s['project_id'],
                                     s['source'])
            # Ignore message id that is randomly generated
            s['message_id'] = data.json[x]['message_id']

            # remove tzinfo to compare generated timestamp
            # with the provided one
            c = data.json[x]
            timestamp = timeutils.parse_isotime(c['timestamp'])
            c['timestamp'] = timestamp.replace(tzinfo=None).isoformat()

            # do the same on the pipeline
            msg = self.published[0][x]
            timestamp = timeutils.parse_isotime(msg['timestamp'])
            msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat()

            self.assertEqual(s, c)
            self.assertEqual(s, self.published[0][x])
Esempio n. 23
0
 def statistics(self, q=[]):
     """Computes the statistics of the meter events in the time range given.
     """
     kwargs = _query_to_kwargs(q, storage.EventFilter.__init__)
     kwargs['meter'] = self._id
     f = storage.EventFilter(**kwargs)
     computed = request.storage_conn.get_meter_statistics(f)
     # Find the original timestamp in the query to use for clamping
     # the duration returned in the statistics.
     start = end = None
     for i in q:
         if i.field == 'timestamp' and i.op in ('lt', 'le'):
             end = timeutils.parse_isotime(i.value).replace(tzinfo=None)
         elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
             start = timeutils.parse_isotime(i.value).replace(tzinfo=None)
     stat = Statistics(start_timestamp=start,
                       end_timestamp=end,
                       **computed)
     return stat
Esempio n. 24
0
def _list_resources(source=None,
                    user=None,
                    project=None,
                    start_timestamp=None,
                    end_timestamp=None):
    """Return a list of resource identifiers.
    """
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
    resources = flask.request.storage_conn.get_resources(
        source=source,
        user=user,
        project=project,
        start_timestamp=start_timestamp,
        end_timestamp=end_timestamp,
    )
    return flask.jsonify(resources=list(resources))
Esempio n. 25
0
def _get_query_timestamps(args={}):
    """Return any optional timestamp information in the request.

    Determine the desired range, if any, from the GET arguments. Set
    up the query range using the specified offset.

    [query_start ... start_timestamp ... end_timestamp ... query_end]

    Returns a dictionary containing:

    query_start: First timestamp to use for query
    start_timestamp: start_timestamp parameter from request
    query_end: Final timestamp to use for query
    end_timestamp: end_timestamp parameter from request
    search_offset: search_offset parameter from request

    """
    search_offset = int(args.get('search_offset', 0))

    start_timestamp = args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    return {
        'query_start': query_start,
        'query_end': query_end,
        'start_timestamp': start_timestamp,
        'end_timestamp': end_timestamp,
        'search_offset': search_offset,
    }
Esempio n. 26
0
def _get_query_timestamps(args={}):
    """Return any optional timestamp information in the request.

    Determine the desired range, if any, from the GET arguments. Set
    up the query range using the specified offset.

    [query_start ... start_timestamp ... end_timestamp ... query_end]

    Returns a dictionary containing:

    query_start: First timestamp to use for query
    start_timestamp: start_timestamp parameter from request
    query_end: Final timestamp to use for query
    end_timestamp: end_timestamp parameter from request
    search_offset: search_offset parameter from request

    """
    search_offset = int(args.get('search_offset', 0))

    start_timestamp = args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    return {'query_start': query_start,
            'query_end': query_end,
            'start_timestamp': start_timestamp,
            'end_timestamp': end_timestamp,
            'search_offset': search_offset,
            }
    def test_query_with_isotime(self):
        date_time = datetime.datetime(2012, 7, 2, 10, 41)
        isotime = date_time.isoformat()

        data = self.post_json(self.url, params={"filter": '{">=": {"timestamp": "' + isotime + '"}}'})

        self.assertEqual(1, len(data.json))
        for sample in data.json:
            result_time = timeutils.parse_isotime(sample["timestamp"])
            result_time = result_time.replace(tzinfo=None)
            self.assertTrue(result_time >= date_time)
    def test_filter_with_isotime(self):
        date_time = datetime.datetime(2013, 1, 1)
        isotime = date_time.isoformat()

        data = self.post_json(self.url, params={"filter": '{">": {"timestamp":"' + isotime + '"}}'})

        self.assertEqual(4, len(data.json))
        for history in data.json:
            result_time = timeutils.parse_isotime(history["timestamp"])
            result_time = result_time.replace(tzinfo=None)
            self.assertTrue(result_time > date_time)
Esempio n. 29
0
    def _extract_when(body):
        """Extract the generated datetime from the notification."""
        # NOTE: I am keeping the logic the same as it was in the collector,
        # However, *ALL* notifications should have a 'timestamp' field, it's
        # part of the notification envelope spec. If this was put here because
        # some openstack project is generating notifications without a
        # timestamp, then that needs to be filed as a bug with the offending
        # project (mdragon)
        when = body.get("timestamp", body.get("_context_timestamp"))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
Esempio n. 30
0
    def _extract_when(body):
        """Extract the generated datetime from the notification."""
        # NOTE: I am keeping the logic the same as it was in the collector,
        # However, *ALL* notifications should have a 'timestamp' field, it's
        # part of the notification envelope spec. If this was put here because
        # some openstack project is generating notifications without a
        # timestamp, then that needs to be filed as a bug with the offending
        # project (mdragon)
        when = body.get('timestamp', body.get('_context_timestamp'))
        if when:
            return timeutils.normalize_time(timeutils.parse_isotime(when))

        return timeutils.utcnow()
    def test_filter_with_isotime(self):
        date_time = datetime.datetime(2013, 1, 1)
        isotime = date_time.isoformat()

        data = self.post_json(
            self.url,
            params={"filter": '{">": {"timestamp":"' + isotime + '"}}'})

        self.assertEqual(4, len(data.json))
        for history in data.json:
            result_time = timeutils.parse_isotime(history['timestamp'])
            result_time = result_time.replace(tzinfo=None)
            self.assertTrue(result_time > date_time)
    def test_query_with_isotime(self):
        date_time = datetime.datetime(2012, 7, 2, 10, 41)
        isotime = date_time.isoformat()

        data = self.post_json(
            self.url,
            params={"filter": '{">=": {"timestamp": "' + isotime + '"}}'})

        self.assertEqual(2, len(data.json))
        for sample_item in data.json:
            result_time = timeutils.parse_isotime(sample_item['timestamp'])
            result_time = result_time.replace(tzinfo=None)
            self.assertTrue(result_time >= date_time)
    def test_filter_with_isotime_state_timestamp(self):
        date_time = datetime.datetime(2013, 1, 1)
        isotime = date_time.isoformat()

        data = self.post_json(self.alarm_url,
                              params={"filter":
                                      '{">": {"state_timestamp": "'
                                      + isotime + '"}}'})

        self.assertEqual(6, len(data.json))
        for alarm in data.json:
            result_time = timeutils.parse_isotime(alarm['state_timestamp'])
            result_time = result_time.replace(tzinfo=None)
            self.assertTrue(result_time > date_time)
Esempio n. 34
0
    def __init__(self, counter_volume=None, resource_metadata={}, timestamp=None, **kwds):
        if counter_volume is not None:
            counter_volume = float(counter_volume)
        resource_metadata = _flatten_metadata(resource_metadata)
        # this is to make it easier for clients to pass a timestamp in
        if timestamp and isinstance(timestamp, basestring):
            timestamp = timeutils.parse_isotime(timestamp)

        super(Sample, self).__init__(
            counter_volume=counter_volume, resource_metadata=resource_metadata, timestamp=timestamp, **kwds
        )
        # Seems the mandatory option doesn't work so do it manually
        for m in ("counter_volume", "counter_unit", "counter_name", "counter_type", "resource_id"):
            if getattr(self, m) in (wsme.Unset, None):
                raise wsme.exc.MissingArgument(m)

        if self.resource_metadata in (wtypes.Unset, None):
            self.resource_metadata = {}
Esempio n. 35
0
    def test_multiple_samples(self):
        '''
        send multiple samples.
        The usecase here is to reduce the chatter and send the counters
        at a slower cadence.
        '''
        samples = []
        stamps = []
        for x in range(6):
            dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
            stamps.append(dt)
            s = {
                'counter_name': 'apples',
                'counter_type': 'gauge',
                'counter_unit': 'instance',
                'counter_volume': float(x * 3),
                'source': 'evil',
                'timestamp': dt.isoformat(),
                'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
                'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
                'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
                'resource_metadata': {
                    'name1': str(x),
                    'name2': str(x + 4)
                }
            }
            samples.append(s)

        data = self.post_json('/meters/apples/', samples)

        # source is modified to include the project_id.
        for x in range(6):
            for (k, v) in samples[x].iteritems():
                if k == 'timestamp':
                    timestamp = timeutils.parse_isotime(data.json[x][k])
                    self.assertEquals(stamps[x].replace(tzinfo=None),
                                      timestamp.replace(tzinfo=None))
                elif k == 'source':
                    self.assertEquals(
                        data.json[x][k], '%s:%s' %
                        (samples[x]['project_id'], samples[x]['source']))
                else:
                    self.assertEquals(v, data.json[x][k])
Esempio n. 36
0
    def __init__(self, counter_volume=None, resource_metadata={},
                 timestamp=None, **kwds):
        if counter_volume is not None:
            counter_volume = float(counter_volume)
        resource_metadata = _flatten_metadata(resource_metadata)
        # this is to make it easier for clients to pass a timestamp in
        if timestamp and isinstance(timestamp, basestring):
            timestamp = timeutils.parse_isotime(timestamp)

        super(Sample, self).__init__(counter_volume=counter_volume,
                                     resource_metadata=resource_metadata,
                                     timestamp=timestamp, **kwds)
        # Seems the mandatory option doesn't work so do it manually
        for m in ('counter_volume', 'counter_unit',
                  'counter_name', 'counter_type', 'resource_id'):
            if getattr(self, m) in (wsme.Unset, None):
                raise wsme.exc.MissingArgument(m)

        if self.resource_metadata in (wtypes.Unset, None):
            self.resource_metadata = {}
Esempio n. 37
0
    def handle_sample(self, context, sample_):
        if not self.initial_timestamp:
            self.initial_timestamp = timeutils.parse_isotime(sample_.timestamp)

        self.aggregated_samples += 1
        key = self._get_unique_key(sample_)
        self.counts[key] += 1
        if key not in self.samples:
            self.samples[key] = self._convert(sample_)
            if self.merged_attribute_policy[
                    'resource_metadata'] == 'drop':
                self.samples[key].resource_metadata = {}
        else:
            if sample_.type == sample.TYPE_CUMULATIVE:
                self.samples[key].volume = self._scale(sample_)
            else:
                self.samples[key].volume += self._scale(sample_)
            for field in self.merged_attribute_policy:
                if self.merged_attribute_policy[field] == 'last':
                    setattr(self.samples[key], field,
                            getattr(sample_, field))
Esempio n. 38
0
    def test_multiple_samples(self):
        '''
        send multiple samples.
        The usecase here is to reduce the chatter and send the counters
        at a slower cadence.
        '''
        samples = []
        stamps = []
        for x in range(6):
            dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
            stamps.append(dt)
            s = {'counter_name': 'apples',
                 'counter_type': 'gauge',
                 'counter_unit': 'instance',
                 'counter_volume': float(x * 3),
                 'source': 'evil',
                 'timestamp': dt.isoformat(),
                 'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
                 'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
                 'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
                 'resource_metadata': {'name1': str(x),
                                       'name2': str(x + 4)}}
            samples.append(s)

        data = self.post_json('/meters/apples/', samples)

        # source is modified to include the project_id.
        for x in range(6):
            for (k, v) in samples[x].iteritems():
                if k == 'timestamp':
                    timestamp = timeutils.parse_isotime(data.json[x][k])
                    self.assertEquals(stamps[x].replace(tzinfo=None),
                                      timestamp.replace(tzinfo=None))
                elif k == 'source':
                    self.assertEquals(data.json[x][k],
                                      '%s:%s' % (samples[x]['project_id'],
                                                 samples[x]['source']))
                else:
                    self.assertEquals(v, data.json[x][k])
Esempio n. 39
0
 def record_metering_data(self, context, data):
     """This method is triggered when metering data is
     cast from an agent.
     """
     #LOG.info('metering data: %r', data)
     LOG.info('metering data %s for %s @ %s: %s',
              data['counter_name'], data['resource_id'],
              data.get('timestamp', 'NO TIMESTAMP'), data['counter_volume'])
     if not meter.verify_signature(data):
         LOG.warning('message signature invalid, discarding message: %r',
                     data)
     else:
         try:
             # Convert the timestamp to a datetime instance.
             # Storage engines are responsible for converting
             # that value to something they can store.
             if data.get('timestamp'):
                 data['timestamp'] = timeutils.parse_isotime(
                     data['timestamp'], )
             self.storage_conn.record_metering_data(data)
         except Exception as err:
             LOG.error('Failed to record metering data: %s', err)
             LOG.exception(err)
Esempio n. 40
0
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(description='generate metering data', )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='the period between events, in minutes',
    )
    parser.add_argument(
        '--start',
        default=31,
        help='the number of days in the past to start timestamps',
    )
    parser.add_argument(
        '--end',
        default=2,
        help='the number of days into the future to continue timestamps',
    )
    parser.add_argument(
        '--type',
        choices=('gauge', 'cumulative'),
        default='gauge',
        help='counter type',
    )
    parser.add_argument(
        '--unit',
        default=None,
        help='counter unit',
    )
    parser.add_argument(
        '--project',
        help='project id of owner',
    )
    parser.add_argument(
        '--user',
        help='user id of owner',
    )
    parser.add_argument(
        'resource',
        help='the resource id for the meter data',
    )
    parser.add_argument(
        'counter',
        help='the counter name for the meter data',
    )
    parser.add_argument(
        'volume',
        help='the amount to attach to the meter',
        type=int,
        default=1,
    )
    args = parser.parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Connect to the metering database
    conn = storage.get_connection(cfg.CONF)

    # Find the user and/or project for a real resource
    if not (args.user or args.project):
        for r in conn.get_resources():
            if r['resource_id'] == args.resource:
                args.user = r['user_id']
                args.project = r['project_id']
                break

    # Compute start and end timestamps for the
    # new data.
    timestamp = timeutils.parse_isotime(args.start)
    end = timeutils.parse_isotime(args.end)
    increment = datetime.timedelta(minutes=args.interval)

    # Generate events
    n = 0
    while timestamp <= end:
        c = sample.Sample(
            name=args.counter,
            type=args.type,
            unit=args.unit,
            volume=args.volume,
            user_id=args.user,
            project_id=args.project,
            resource_id=args.resource,
            timestamp=timestamp,
            resource_metadata={},
            source='artificial',
        )
        data = rpc.meter_message_from_counter(
            c, cfg.CONF.publisher_rpc.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment

    print 'Added %d new events' % n

    return 0
 def _assert_times_match(self, actual, expected):
     actual = timeutils.parse_isotime(actual).replace(tzinfo=None)
     self.assertEqual(actual, expected)
Esempio n. 42
0
def compute_duration_by_resource(resource, meter):
    """Return the earliest timestamp, last timestamp,
    and duration for the resource and meter.

    :param resource: The ID of the resource.
    :param meter: The name of the meter.
    :param start_timestamp: ISO-formatted string of the
        earliest timestamp to return.
    :param end_timestamp: ISO-formatted string of the
        latest timestamp to return.
    :param search_offset: Number of minutes before
        and after start and end timestamps to query.
    """
    # Determine the desired range, if any, from the
    # GET arguments. Set up the query range using
    # the specified offset.
    # [query_start ... start_timestamp ... end_timestamp ... query_end]
    search_offset = int(flask.request.args.get('search_offset', 0))

    start_timestamp = flask.request.args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = flask.request.args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    # Query the database for the interval of timestamps
    # within the desired range.
    f = storage.EventFilter(meter=meter,
                            resource=resource,
                            start=query_start,
                            end=query_end,
                            )
    min_ts, max_ts = flask.request.storage_conn.get_event_interval(f)

    # "Clamp" the timestamps we return to the original time
    # range, excluding the offset.
    LOG.debug('start_timestamp %s, end_timestamp %s, min_ts %s, max_ts %s',
              start_timestamp, end_timestamp, min_ts, max_ts)
    if start_timestamp and min_ts and min_ts < start_timestamp:
        min_ts = start_timestamp
        LOG.debug('clamping min timestamp to range')
    if end_timestamp and max_ts and max_ts > end_timestamp:
        max_ts = end_timestamp
        LOG.debug('clamping max timestamp to range')

    # If we got valid timestamps back, compute a duration in minutes.
    #
    # If the min > max after clamping then we know the
    # timestamps on the events fell outside of the time
    # range we care about for the query, so treat them as
    # "invalid."
    #
    # If the timestamps are invalid, return None as a
    # sentinal indicating that there is something "funny"
    # about the range.
    if min_ts and max_ts and (min_ts <= max_ts):
        # Can't use timedelta.total_seconds() because
        # it is not available in Python 2.6.
        diff = max_ts - min_ts
        duration = (diff.seconds + (diff.days * 24 * 60 ** 2)) / 60
    else:
        min_ts = max_ts = duration = None

    return flask.jsonify(start_timestamp=min_ts,
                         end_timestamp=max_ts,
                         duration=duration,
                         )
Esempio n. 43
0
def main():
    cfg.CONF([], project="ceilometer")

    parser = argparse.ArgumentParser(description="generate metering data")
    parser.add_argument("--interval", default=10, type=int, help="the period between events, in minutes")
    parser.add_argument("--start", default=31, help="the number of days in the past to start timestamps")
    parser.add_argument("--end", default=2, help="the number of days into the future to continue timestamps")
    parser.add_argument("--type", choices=("gauge", "cumulative"), default="gauge", help="counter type")
    parser.add_argument("--unit", default=None, help="counter unit")
    parser.add_argument("--project", help="project id of owner")
    parser.add_argument("--user", help="user id of owner")
    parser.add_argument("resource", help="the resource id for the meter data")
    parser.add_argument("counter", help="the counter name for the meter data")
    parser.add_argument("volume", help="the amount to attach to the meter", type=int, default=1)
    args = parser.parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(message)s")
    console.setFormatter(formatter)
    root_logger = logging.getLogger("")
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Connect to the metering database
    conn = storage.get_connection(cfg.CONF)

    # Find the user and/or project for a real resource
    if not (args.user or args.project):
        for r in conn.get_resources():
            if r["resource_id"] == args.resource:
                args.user = r["user_id"]
                args.project = r["project_id"]
                break

    # Compute start and end timestamps for the
    # new data.
    timestamp = timeutils.parse_isotime(args.start)
    end = timeutils.parse_isotime(args.end)
    increment = datetime.timedelta(minutes=args.interval)

    # Generate events
    n = 0
    while timestamp <= end:
        c = counter.Counter(
            name=args.counter,
            type=args.type,
            unit=args.unit,
            volume=args.volume,
            user_id=args.user,
            project_id=args.project,
            resource_id=args.resource,
            timestamp=timestamp,
            resource_metadata={},
        )
        data = meter.meter_message_from_counter(c, cfg.CONF.metering_secret, "artificial")
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment

    print "Added %d new events" % n

    return 0
Esempio n. 44
0
 def _assert_times_match(self, actual, expected):
     actual = timeutils.parse_isotime(actual).replace(tzinfo=None)
     assert actual == expected
Esempio n. 45
0
def compute_duration_by_resource(resource, meter):
    """Return the earliest timestamp, last timestamp,
    and duration for the resource and meter.

    :param resource: The ID of the resource.
    :param meter: The name of the meter.
    :param start_timestamp: ISO-formatted string of the
        earliest timestamp to return.
    :param end_timestamp: ISO-formatted string of the
        latest timestamp to return.
    :param search_offset: Number of minutes before
        and after start and end timestamps to query.
    """
    # Determine the desired range, if any, from the
    # GET arguments. Set up the query range using
    # the specified offset.
    # [query_start ... start_timestamp ... end_timestamp ... query_end]
    search_offset = int(flask.request.args.get('search_offset', 0))

    start_timestamp = flask.request.args.get('start_timestamp')
    if start_timestamp:
        start_timestamp = timeutils.parse_isotime(start_timestamp)
        start_timestamp = start_timestamp.replace(tzinfo=None)
        query_start = (start_timestamp -
                       datetime.timedelta(minutes=search_offset))
    else:
        query_start = None

    end_timestamp = flask.request.args.get('end_timestamp')
    if end_timestamp:
        end_timestamp = timeutils.parse_isotime(end_timestamp)
        end_timestamp = end_timestamp.replace(tzinfo=None)
        query_end = end_timestamp + datetime.timedelta(minutes=search_offset)
    else:
        query_end = None

    # Query the database for the interval of timestamps
    # within the desired range.
    f = storage.EventFilter(
        meter=meter,
        resource=resource,
        start=query_start,
        end=query_end,
    )
    min_ts, max_ts = flask.request.storage_conn.get_event_interval(f)

    # "Clamp" the timestamps we return to the original time
    # range, excluding the offset.
    LOG.debug('start_timestamp %s, end_timestamp %s, min_ts %s, max_ts %s',
              start_timestamp, end_timestamp, min_ts, max_ts)
    if start_timestamp and min_ts and min_ts < start_timestamp:
        min_ts = start_timestamp
        LOG.debug('clamping min timestamp to range')
    if end_timestamp and max_ts and max_ts > end_timestamp:
        max_ts = end_timestamp
        LOG.debug('clamping max timestamp to range')

    # If we got valid timestamps back, compute a duration in minutes.
    #
    # If the min > max after clamping then we know the
    # timestamps on the events fell outside of the time
    # range we care about for the query, so treat them as
    # "invalid."
    #
    # If the timestamps are invalid, return None as a
    # sentinal indicating that there is something "funny"
    # about the range.
    if min_ts and max_ts and (min_ts <= max_ts):
        # Can't use timedelta.total_seconds() because
        # it is not available in Python 2.6.
        diff = max_ts - min_ts
        duration = (diff.seconds + (diff.days * 24 * 60**2)) / 60
    else:
        min_ts = max_ts = duration = None

    return flask.jsonify(
        start_timestamp=min_ts,
        end_timestamp=max_ts,
        duration=duration,
    )
 def _assert_times_match(self, actual, expected):
     if actual:
         actual = timeutils.parse_isotime(actual)
     actual = actual.replace(tzinfo=None)
     assert actual == expected
Esempio n. 47
0
 def _assert_times_match(self, actual, expected):
     if actual:
         actual = timeutils.parse_isotime(actual)
     actual = actual.replace(tzinfo=None)
     self.assertEqual(expected, actual)
Esempio n. 48
0
def main():
    cfg.CONF([], project='ceilometer')

    parser = argparse.ArgumentParser(
        description='generate metering data',
    )
    parser.add_argument(
        '--interval',
        default=10,
        type=int,
        help='the period between events, in minutes',
    )
    parser.add_argument(
        '--start',
        default=31,
        help='the number of days in the past to start timestamps',
    )
    parser.add_argument(
        '--end',
        default=2,
        help='the number of days into the future to continue timestamps',
    )
    parser.add_argument(
        '--type',
        choices=('gauge', 'cumulative'),
        default='gauge',
        help='counter type',
    )
    parser.add_argument(
        '--unit',
        default=None,
        help='counter unit',
    )
    parser.add_argument(
        '--project',
        help='project id of owner',
    )
    parser.add_argument(
        '--user',
        help='user id of owner',
    )
    parser.add_argument(
        'resource',
        help='the resource id for the meter data',
    )
    parser.add_argument(
        'counter',
        help='the counter name for the meter data',
    )
    parser.add_argument(
        'volume',
        help='the amount to attach to the meter',
        type=int,
        default=1,
    )
    args = parser.parse_args()

    # Set up logging to use the console
    console = logging.StreamHandler(sys.stderr)
    console.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(message)s')
    console.setFormatter(formatter)
    root_logger = logging.getLogger('')
    root_logger.addHandler(console)
    root_logger.setLevel(logging.DEBUG)

    # Connect to the metering database
    conn = storage.get_connection(cfg.CONF)

    # Find the user and/or project for a real resource
    if not (args.user or args.project):
        for r in conn.get_resources():
            if r['resource_id'] == args.resource:
                args.user = r['user_id']
                args.project = r['project_id']
                break

    # Compute start and end timestamps for the
    # new data.
    timestamp = timeutils.parse_isotime(args.start)
    end = timeutils.parse_isotime(args.end)
    increment = datetime.timedelta(minutes=args.interval)

    # Generate events
    n = 0
    while timestamp <= end:
        c = sample.Sample(name=args.counter,
                            type=args.type,
                            unit=args.unit,
                            volume=args.volume,
                            user_id=args.user,
                            project_id=args.project,
                            resource_id=args.resource,
                            timestamp=timestamp,
                            resource_metadata={},
                            source='artificial',
                            )
        data = utils.meter_message_from_counter(
            c,
            cfg.CONF.publisher.metering_secret)
        conn.record_metering_data(data)
        n += 1
        timestamp = timestamp + increment

    print('Added %d new events' % n)

    return 0