Esempio n. 1
0
def show_resources(db, args):
    if args:
        users = args
    else:
        users = sorted(db.get_users())
    for u in users:
        print u
        for resource in db.get_resources(user=u):
            print '  %(resource_id)s %(timestamp)s' % resource
            for k, v in sorted(resource['metadata'].iteritems()):
                print '      %-10s : %s' % (k, v)
            for meter in resource['meter']:
                # FIXME(dhellmann): Need a way to tell whether to use
                # max() or sum() by meter name without hard-coding.
                if meter['counter_name'] in ['cpu', 'disk']:
                    totals = db.get_volume_max(storage.EventFilter(
                            user=u,
                            meter=meter['counter_name'],
                            resource=resource['resource_id'],
                            ))
                else:
                    totals = db.get_volume_sum(storage.EventFilter(
                            user=u,
                            meter=meter['counter_name'],
                            resource=resource['resource_id'],
                            ))
                print '    %s (%s): %s' % \
                    (meter['counter_name'], meter['counter_type'],
                     totals.next()['value'])
Esempio n. 2
0
    def test_storage_can_handle_large_values(self):
        f = storage.EventFilter(meter='dummyBigCounter', )
        results = list(self.conn.get_samples(f))
        self.assertEqual(results[0].counter_volume, 3372036854775807)

        f = storage.EventFilter(meter='dummySmallCounter', )
        results = list(self.conn.get_samples(f))
        self.assertEqual(results[0].counter_volume, -3372036854775807)
Esempio n. 3
0
    def get_one(self, message_id):
        """Return a single event with the given message id.

        :param message_id: Message ID of the Event to be returned
        """
        rbac.enforce("events:show", pecan.request)
        filters = _build_rbac_query_filters()
        t_filter = filters['t_filter']
        admin_proj = filters['admin_proj']
        event_filter = storage.EventFilter(traits_filter=t_filter,
                                           admin_proj=admin_proj,
                                           message_id=message_id)
        events = [
            event for event in pecan.request.event_storage_conn.get_events(
                event_filter)
        ]
        if not events:
            raise base.EntityNotFound(_("Event"), message_id)

        if len(events) > 1:
            LOG.error(
                _("More than one event with "
                  "id %s returned from storage driver") % message_id)

        event = events[0]

        return Event(message_id=event.message_id,
                     event_type=event.event_type,
                     generated=event.generated,
                     traits=event.traits,
                     raw=event.raw)
Esempio n. 4
0
    def statistics(self, q=[], period=None):
        """Computes the statistics of the samples in the time range given.

        :param q: Filter rules for the data to be returned.
        :param period: Returned result will be an array of statistics for a
                       period long of that number of seconds.

        """
        kwargs = _query_to_kwargs(q, storage.EventFilter.__init__)
        kwargs['meter'] = self._id
        f = storage.EventFilter(**kwargs)
        computed = pecan.request.storage_conn.get_meter_statistics(f, period)
        LOG.debug('computed value coming from %r', pecan.request.storage_conn)
        # Find the original timestamp in the query to use for clamping
        # the duration returned in the statistics.
        start = end = None
        for i in q:
            if i.field == 'timestamp' and i.op in ('lt', 'le'):
                end = timeutils.parse_isotime(i.value).replace(tzinfo=None)
            elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
                start = timeutils.parse_isotime(i.value).replace(tzinfo=None)

        return [
            Statistics(start_timestamp=start, end_timestamp=end, **c.as_dict())
            for c in computed
        ]
Esempio n. 5
0
 def test_by_user_period_start_end(self):
     f = storage.EventFilter(
         user='******',
         meter='volume.size',
         start='2012-09-25T10:28:00',
         end='2012-09-25T11:28:00',
     )
     results = list(self.conn.get_meter_statistics(f, period=1800))
     self.assertEqual(len(results), 1)
     r = results[0]
     self.assertEqual(r.period_start,
                      datetime.datetime(2012, 9, 25, 10, 28))
     self.assertEqual(r.count, 1)
     self.assertEqual(r.avg, 8)
     self.assertEqual(r.min, 8)
     self.assertEqual(r.max, 8)
     self.assertEqual(r.sum, 8)
     self.assertEqual(r.period, 1800)
     self.assertEqual(r.period_end,
                      r.period_start + datetime.timedelta(seconds=1800))
     self.assertEqual(r.duration, 0)
     self.assertEqual(r.duration_start,
                      datetime.datetime(2012, 9, 25, 10, 30))
     self.assertEqual(r.duration_end,
                      datetime.datetime(2012, 9, 25, 10, 30))
Esempio n. 6
0
 def test_no_period_in_query(self):
     f = storage.EventFilter(
         user='******',
         meter='volume.size',
     )
     results = list(self.conn.get_meter_statistics(f))[0]
     assert results.period == 0
Esempio n. 7
0
def _event_query_to_event_filter(q):
    evt_model_filter = {
        'event_type': None,
        'message_id': None,
        'start_timestamp': None,
        'end_timestamp': None
    }
    traits_filter = []

    for i in q:
        if not i.op:
            i.op = 'eq'
        elif i.op not in base.operation_kind:
            error = _("operator {} is incorrect").format(i.op)
            raise base.ClientSideError(error)
        if i.field in evt_model_filter:
            evt_model_filter[i.field] = i.value
        else:
            trait_type = i.type or 'string'
            traits_filter.append({
                "key": i.field,
                trait_type: i._get_value_as_type(),
                "op": i.op
            })
    return storage.EventFilter(traits_filter=traits_filter, **evt_model_filter)
Esempio n. 8
0
 def test_get_raw_events_by_resource(self):
     f = storage.EventFilter(user='******', resource='resource-id')
     results = list(self.conn.get_raw_events(f))
     assert results
     meter = results[0]
     assert meter is not None
     assert meter == self.msg
Esempio n. 9
0
    def get_one(self, message_id):
        """Return a single event with the given message id.

        :param message_id: Message ID of the Event to be returned
        """
        event_filter = storage.EventFilter(message_id=message_id)
        events = [
            event for event in pecan.request.event_storage_conn.get_events(
                event_filter)
        ]
        if not events:
            raise base.EntityNotFound(_("Event"), message_id)

        if len(events) > 1:
            LOG.error(
                _("More than one event with "
                  "id %s returned from storage driver") % message_id)

        event = events[0]

        return Event(message_id=event.message_id,
                     event_type=event.event_type,
                     generated=event.generated,
                     traits=event.traits,
                     raw=event.raw)
Esempio n. 10
0
 def test_get_raw_events_by_meter2(self):
     f = storage.EventFilter(
         user='******',
         meter='instance',
     )
     results = list(self.conn.get_raw_events(f))
     assert results
Esempio n. 11
0
    def setUp(self):
        super(TestGetEventInterval, self).setUp()

        # NOTE(dhellmann): mim requires spidermonkey to implement the
        # map-reduce functions, so if we can't import it then just
        # skip these tests unless we aren't using mim.
        try:
            import spidermonkey
        except:
            if isinstance(self.conn.conn, mim.Connection):
                raise skip.SkipTest('requires spidermonkey')

        # Create events relative to the range and pretend
        # that the intervening events exist.

        self.start = datetime.datetime(2012, 8, 28, 0, 0)
        self.end = datetime.datetime(2012, 8, 29, 0, 0)

        self.early1 = self.start - datetime.timedelta(minutes=20)
        self.early2 = self.start - datetime.timedelta(minutes=10)

        self.middle1 = self.start + datetime.timedelta(minutes=10)
        self.middle2 = self.end - datetime.timedelta(minutes=10)

        self.late1 = self.end + datetime.timedelta(minutes=10)
        self.late2 = self.end + datetime.timedelta(minutes=20)

        self._filter = storage.EventFilter(
            resource='resource-id',
            meter='instance',
            start=self.start,
            end=self.end,
        )
Esempio n. 12
0
def _list_samples(meter,
                  project=None,
                  resource=None,
                  source=None,
                  user=None):
    """Return a list of raw samples.

    Note: the API talks about "events" these are equivelent to samples.
    but we still need to return the samples within the "events" dict
    to maintain API compatibilty.
    """
    q_ts = _get_query_timestamps(flask.request.args)
    f = storage.EventFilter(
        user=user,
        project=project,
        source=source,
        meter=meter,
        resource=resource,
        start=q_ts['start_timestamp'],
        end=q_ts['end_timestamp'],
        metaquery=_get_metaquery(flask.request.args),
    )
    events = flask.request.storage_conn.get_samples(f)
    jsonified = flask.jsonify(events=[e.as_dict() for e in events])
    if request_wants_html():
        return flask.templating.render_template('list_event.html',
                                                user=user,
                                                project=project,
                                                source=source,
                                                meter=meter,
                                                resource=resource,
                                                events=jsonified)
    return jsonified
Esempio n. 13
0
 def test_get_raw_events_by_meter(self):
     f = storage.EventFilter(
         user='******',
         meter='no-such-meter',
     )
     results = list(self.conn.get_raw_events(f))
     assert not results
Esempio n. 14
0
def _event_query_to_event_filter(q):
    evt_model_filter = {
        'event_type': None,
        'message_id': None,
        'start_timestamp': None,
        'end_timestamp': None
    }
    filters = _build_rbac_query_filters()
    traits_filter = filters['t_filter']
    admin_proj = filters['admin_proj']

    for i in q:
        if not i.op:
            i.op = 'eq'
        elif i.op not in base.operation_kind:
            error = (_('Operator %(operator)s is not supported. The supported'
                       ' operators are: %(supported)s') %
                     {'operator': i.op, 'supported': base.operation_kind})
            raise base.ClientSideError(error)
        if i.field in evt_model_filter:
            if i.op != 'eq':
                error = (_('Operator %(operator)s is not supported. Only'
                           ' equality operator is available for field'
                           ' %(field)s') %
                         {'operator': i.op, 'field': i.field})
                raise base.ClientSideError(error)
            evt_model_filter[i.field] = i.value
        else:
            trait_type = i.type or 'string'
            traits_filter.append({"key": i.field,
                                  trait_type: i._get_value_as_type(),
                                  "op": i.op})
    return storage.EventFilter(traits_filter=traits_filter,
                               admin_proj=admin_proj, **evt_model_filter)
Esempio n. 15
0
def _event_query_to_event_filter(q):
    evt_model_filter = {
        'event_type': None,
        'message_id': None,
        'start_timestamp': None,
        'end_timestamp': None
    }
    traits_filter = []

    for i in q:
        if not i.op:
            i.op = 'eq'
        elif i.op not in base.operation_kind:
            error = (_('operator %(operator)s is not supported. the supported'
                       ' operators are: %(supported)s') % {
                           'operator': i.op,
                           'supported': base.operation_kind
                       })
            raise base.ClientSideError(error)
        if i.field in evt_model_filter:
            evt_model_filter[i.field] = i.value
        else:
            trait_type = i.type or 'string'
            traits_filter.append({
                "key": i.field,
                trait_type: i._get_value_as_type(),
                "op": i.op
            })
    return storage.EventFilter(traits_filter=traits_filter, **evt_model_filter)
Esempio n. 16
0
def compute_duration_by_resource(resource, meter):
    """Return the earliest timestamp, last timestamp,
    and duration for the resource and meter.

    :param resource: The ID of the resource.
    :param meter: The name of the meter.
    :param start_timestamp: ISO-formatted string of the
        earliest timestamp to return.
    :param end_timestamp: ISO-formatted string of the
        latest timestamp to return.
    :param search_offset: Number of minutes before
        and after start and end timestamps to query.
    """
    q_ts = _get_query_timestamps(flask.request.args)
    start_timestamp = q_ts['start_timestamp']
    end_timestamp = q_ts['end_timestamp']

    # Query the database for the interval of timestamps
    # within the desired range.
    f = storage.EventFilter(
        meter=meter,
        project=acl.get_limited_to_project(flask.request.headers),
        resource=resource,
        start=q_ts['query_start'],
        end=q_ts['query_end'],
    )
    stats = flask.request.storage_conn.get_meter_statistics(f)
    min_ts, max_ts = stats.duration_start, stats.duration_end

    # "Clamp" the timestamps we return to the original time
    # range, excluding the offset.
    LOG.debug('start_timestamp %s, end_timestamp %s, min_ts %s, max_ts %s',
              start_timestamp, end_timestamp, min_ts, max_ts)
    if start_timestamp and min_ts and min_ts < start_timestamp:
        min_ts = start_timestamp
        LOG.debug('clamping min timestamp to range')
    if end_timestamp and max_ts and max_ts > end_timestamp:
        max_ts = end_timestamp
        LOG.debug('clamping max timestamp to range')

    # If we got valid timestamps back, compute a duration in minutes.
    #
    # If the min > max after clamping then we know the
    # timestamps on the samples fell outside of the time
    # range we care about for the query, so treat them as
    # "invalid."
    #
    # If the timestamps are invalid, return None as a
    # sentinal indicating that there is something "funny"
    # about the range.
    if min_ts and max_ts and (min_ts <= max_ts):
        duration = timeutils.delta_seconds(min_ts, max_ts)
    else:
        min_ts = max_ts = duration = None

    return flask.jsonify(start_timestamp=min_ts,
                         end_timestamp=max_ts,
                         duration=duration,
                         )
Esempio n. 17
0
 def test_get_samples_by_start_time(self):
     f = storage.EventFilter(
         user='******',
         start=datetime.datetime(2012, 7, 2, 10, 41),
     )
     results = list(self.conn.get_samples(f))
     assert len(results) == 1
     assert results[0].timestamp == datetime.datetime(2012, 7, 2, 10, 41)
Esempio n. 18
0
 def test_get_raw_events_by_end_time(self):
     f = storage.EventFilter(
         user='******',
         end=datetime.datetime(2012, 7, 2, 10, 41),
     )
     results = list(self.conn.get_raw_events(f))
     length = len(results)
     assert length == 1
     assert results[0]['timestamp'] == datetime.datetime(2012, 7, 2, 10, 40)
Esempio n. 19
0
 def test_get_raw_events_by_both_times(self):
     f = storage.EventFilter(
         start=datetime.datetime(2012, 7, 2, 10, 42),
         end=datetime.datetime(2012, 7, 2, 10, 43),
     )
     results = list(self.conn.get_raw_events(f))
     length = len(results)
     assert length == 1
     assert results[0]['timestamp'] == datetime.datetime(2012, 7, 2, 10, 42)
Esempio n. 20
0
 def test_get_event_trait_filter(self):
     trait_filters = {'key': 'trait_B', 't_int': 101}
     event_filter = storage.EventFilter(self.start,
                                        self.end,
                                        traits=trait_filters)
     events = self.conn.get_events(event_filter)
     self.assertEquals(1, len(events))
     self.assertEquals(events[0].event_name, "Bar")
     self.assertEquals(4, len(events[0].traits))
Esempio n. 21
0
def show_total_resources(db, args):
    if args:
        users = args
    else:
        users = sorted(db.get_users())
    for u in users:
        print u
        for meter in ['disk', 'cpu', 'instance']:
            if meter in ['cpu', 'disk']:
                total = db.get_volume_max(storage.EventFilter(
                        user=u,
                        meter=meter,
                        ))
            else:
                total = db.get_volume_sum(storage.EventFilter(
                        user=u,
                        meter=meter,
                        ))
            for t in total:
                print '  ', meter, t['resource_id'], t['value']
Esempio n. 22
0
def show_raw(db, args):
    fmt = '    %(timestamp)s %(counter_name)10s %(counter_volume)s'
    for u in sorted(db.get_users()):
        print u
        for resource in db.get_resources(user=u):
            print '  ', resource['resource_id']
            for event in db.get_raw_events(storage.EventFilter(
                    user=u,
                    resource=resource['resource_id'],
                    )):
                print fmt % event
Esempio n. 23
0
 def test_one_resource(self):
     f = storage.EventFilter(
         user='******',
         meter='instance',
         resource='resource-id',
     )
     results = list(self.conn.get_volume_sum(f))
     assert results
     counts = dict((r['resource_id'], r['value']) for r in results)
     assert counts['resource-id'] == 1
     assert set(counts.keys()) == set(['resource-id'])
Esempio n. 24
0
    def get_all(self, q=[]):
        """Return samples for the meter.

        :param q: Filter rules for the data to be returned.
        """
        kwargs = _query_to_kwargs(q, storage.EventFilter.__init__)
        kwargs['meter'] = self._id
        f = storage.EventFilter(**kwargs)
        return [
            Sample.from_db_model(e)
            for e in pecan.request.storage_conn.get_samples(f)
        ]
Esempio n. 25
0
 def test_get_samples_by_metaquery(self):
     q = {'metadata.display_name': 'test-server'}
     f = storage.EventFilter(metaquery=q)
     got_not_imp = False
     try:
         results = list(self.conn.get_samples(f))
         assert results
         for meter in results:
             assert meter.as_dict() in self.msgs
     except NotImplementedError:
         got_not_imp = True
         self.assertTrue(got_not_imp)
Esempio n. 26
0
 def test_by_project(self):
     f = storage.EventFilter(
         project='project-id',
         meter='instance',
     )
     results = list(self.conn.get_volume_sum(f))
     assert results
     counts = dict((r['resource_id'], r['value']) for r in results)
     assert counts['resource-id'] == 1
     assert counts['resource-id-alternate'] == 2
     assert set(counts.keys()) == set(
         ['resource-id', 'resource-id-alternate'])
Esempio n. 27
0
 def test_simple_get(self):
     event_filter = storage.EventFilter(self.start, self.end)
     events = self.conn.get_events(event_filter)
     self.assertEquals(3, len(events))
     start_time = None
     for i, name in enumerate(["Foo", "Bar", "Zoo"]):
         self.assertEquals(events[i].event_name, name)
         self.assertEquals(4, len(events[i].traits))
         # Ensure sorted results ...
         if start_time is not None:
             # Python 2.6 has no assertLess :(
             self.assertTrue(start_time < events[i].generated)
         start_time = events[i].generated
Esempio n. 28
0
 def test_by_project(self):
     f = storage.EventFilter(
         meter='volume.size',
         resource='resource-id',
         start='2012-09-25T11:30:00',
         end='2012-09-25T11:32:00',
     )
     results = list(self.conn.get_meter_statistics(f))[0]
     self.assertEqual(results.duration, 0)
     assert results.count == 1
     assert results.min == 6
     assert results.max == 6
     assert results.sum == 6
     assert results.avg == 6
Esempio n. 29
0
 def test_one_resource(self):
     f = storage.EventFilter(
         user='******',
         meter='volume.size',
     )
     results = list(self.conn.get_meter_statistics(f))[0]
     self.assertEqual(results.duration,
                      (datetime.datetime(2012, 9, 25, 12, 32) -
                       datetime.datetime(2012, 9, 25, 10, 30)).seconds)
     assert results.count == 3
     assert results.min == 5
     assert results.max == 7
     assert results.sum == 18
     assert results.avg == 6
Esempio n. 30
0
def show_total_resources(db, args):
    if args:
        users = args
    else:
        users = sorted(db.get_users())
    for u in users:
        print u
        for meter in ['disk', 'cpu', 'instance']:
            stats = db.get_statistics(storage.EventFilter(
                    user=u,
                    meter=meter,
                ))
            if meter in ['cpu', 'disk']:
                total = stats['max']
            else:
                total = stats['sum']
            print '  ', meter, total