def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set(['user_id', 'project_id', 'resource_id', 'source'])): raise ceilometer.NotImplementedError( "Unable to group by these fields") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') q = pymongo_utils.make_query_from_filter(sample_filter) if period: if sample_filter.start: period_start = sample_filter.start else: period_start = self.db.meter.find(limit=1, sort=[('timestamp', pymongo.ASCENDING) ])[0]['timestamp'] if groupby: sort_keys = ['counter_name'] + groupby + ['timestamp'] else: sort_keys = ['counter_name', 'timestamp'] sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='asc') meters = self.db.meter.find(q, sort=sort_instructions) def _group_key(meter): # the method to define a key for groupby call key = {} for y in sort_keys: if y == 'timestamp' and period: key[y] = ( timeutils.delta_seconds(period_start, meter[y]) // period) elif y != 'timestamp': key[y] = meter[y] return key def _to_offset(periods): return { 'days': (periods * period) // self.SECONDS_IN_A_DAY, 'seconds': (periods * period) % self.SECONDS_IN_A_DAY } for key, grouped_meters in itertools.groupby(meters, key=_group_key): stat = models.Statistics(unit=None, min=sys.maxint, max=-sys.maxint, avg=0, sum=0, count=0, period=0, period_start=0, period_end=0, duration=0, duration_start=0, duration_end=0, groupby=None) for meter in grouped_meters: stat.unit = meter.get('counter_unit', '') m_volume = meter.get('counter_volume') if stat.min > m_volume: stat.min = m_volume if stat.max < m_volume: stat.max = m_volume stat.sum += m_volume stat.count += 1 if stat.duration_start == 0: stat.duration_start = meter['timestamp'] stat.duration_end = meter['timestamp'] if groupby and not stat.groupby: stat.groupby = {} for group_key in groupby: stat.groupby[group_key] = meter[group_key] stat.duration = timeutils.delta_seconds(stat.duration_start, stat.duration_end) stat.avg = stat.sum / stat.count if period: stat.period = period periods = key.get('timestamp') stat.period_start = ( period_start + datetime.timedelta(**(_to_offset(periods)))) stat.period_end = ( period_start + datetime.timedelta(**(_to_offset(periods + 1)))) else: stat.period_start = stat.duration_start stat.period_end = stat.duration_end yield stat
def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ if (groupby and set(groupby) - set([ 'user_id', 'project_id', 'resource_id', 'source', 'resource_metadata.instance_type' ])): raise ceilometer.NotImplementedError( "Unable to group by these fields") q = pymongo_utils.make_query_from_filter(sample_filter) if period: if sample_filter.start_timestamp: period_start = sample_filter.start_timestamp else: period_start = self.db.meter.find(limit=1, sort=[('timestamp', pymongo.ASCENDING) ])[0]['timestamp'] period_start = int(calendar.timegm(period_start.utctimetuple())) map_params = { 'period': period, 'period_first': period_start, 'groupby_fields': json.dumps(groupby) } if groupby: map_fragment = self.MAP_STATS_PERIOD_GROUPBY else: map_fragment = self.MAP_STATS_PERIOD else: if groupby: map_params = {'groupby_fields': json.dumps(groupby)} map_fragment = self.MAP_STATS_GROUPBY else: map_params = dict() map_fragment = self.MAP_STATS sub = self._aggregate_param map_params['aggregate_initial_val'] = sub('emit_initial', aggregate) map_params['aggregate_body_val'] = sub('emit_body', aggregate) map_stats = map_fragment % map_params reduce_params = dict(aggregate_initial_val=sub('reduce_initial', aggregate), aggregate_body_val=sub('reduce_body', aggregate), aggregate_computation_val=sub( 'reduce_computation', aggregate)) reduce_stats = self.REDUCE_STATS % reduce_params finalize_params = dict(aggregate_val=sub('finalize', aggregate)) finalize_stats = self.FINALIZE_STATS % finalize_params results = self.db.meter.map_reduce( map_stats, reduce_stats, {'inline': 1}, finalize=finalize_stats, query=q, ) # FIXME(terriyu) Fix get_meter_statistics() so we don't use sorted() # to return the results return sorted( (self._stats_result_to_model(r['value'], groupby, aggregate) for r in results['results']), key=operator.attrgetter('period_start'))
def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, pagination=None): """Return an iterable of models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param pagination: Optional pagination query. """ if pagination: raise ceilometer.NotImplementedError('Pagination not implemented') metaquery = metaquery or {} q = {} if user is not None: q['user_id'] = user if project is not None: q['project_id'] = project if source is not None: q['source'] = source if resource is not None: q['resource_id'] = resource # Add resource_ prefix so it matches the field in the db q.update( dict(('resource_' + k, v) for (k, v) in six.iteritems(metaquery))) if start_timestamp or end_timestamp: # Look for resources matching the above criteria and with # samples in the time range we care about, then change the # resource query to return just those resources by id. ts_range = pymongo_utils.make_timestamp_range( start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: q['timestamp'] = ts_range sort_keys = base._handle_sort_key('resource', 'timestamp') sort_keys.insert(0, 'resource_id') sort_instructions = self._build_sort_instructions(sort_keys=sort_keys, sort_dir='desc') resource = lambda x: x['resource_id'] meters = self.db.meter.find(q, sort=sort_instructions) for resource_id, r_meters in itertools.groupby(meters, key=resource): # Because we have to know first/last timestamp, and we need a full # list of references to the resource's meters, we need a tuple # here. r_meters = tuple(r_meters) latest_meter = r_meters[0] last_ts = latest_meter['timestamp'] first_ts = r_meters[-1]['timestamp'] yield models.Resource(resource_id=latest_meter['resource_id'], project_id=latest_meter['project_id'], first_sample_timestamp=first_ts, last_sample_timestamp=last_ts, source=latest_meter['source'], user_id=latest_meter['user_id'], metadata=latest_meter['resource_metadata'])
def get_events(event_filter): """Return an iterable of model.Event objects.""" raise ceilometer.NotImplementedError('Events not implemented.')
def get_event_types(): """Return all event types as an iterable of strings.""" raise ceilometer.NotImplementedError('Events not implemented.')
def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, pagination=None): """Return an iterable of api_models.Resource instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param pagination: Optional pagination query. """ if pagination: raise ceilometer.NotImplementedError('Pagination not implemented') s_filter = storage.SampleFilter(user=user, project=project, source=source, start=start_timestamp, start_timestamp_op=start_timestamp_op, end=end_timestamp, end_timestamp_op=end_timestamp_op, metaquery=metaquery, resource=resource) session = self._engine_facade.get_session() # get list of resource_ids res_q = session.query(distinct(models.Resource.resource_id)).join( models.Sample, models.Sample.resource_id == models.Resource.internal_id) res_q = make_query_from_filter(session, res_q, s_filter, require_meter=False) for res_id in res_q.all(): # get latest Sample max_q = (session.query(models.Sample).join( models.Resource, models.Resource.internal_id == models.Sample. resource_id).filter(models.Resource.resource_id == res_id[0])) max_q = make_query_from_filter(session, max_q, s_filter, require_meter=False) max_q = max_q.order_by(models.Sample.timestamp.desc(), models.Sample.id.desc()).limit(1) # get the min timestamp value. min_q = (session.query(models.Sample.timestamp).join( models.Resource, models.Resource.internal_id == models.Sample. resource_id).filter(models.Resource.resource_id == res_id[0])) min_q = make_query_from_filter(session, min_q, s_filter, require_meter=False) min_q = min_q.order_by(models.Sample.timestamp.asc()).limit(1) sample = max_q.first() if sample: yield api_models.Resource( resource_id=sample.resource.resource_id, project_id=sample.resource.project_id, first_sample_timestamp=min_q.first().timestamp, last_sample_timestamp=sample.timestamp, source=sample.resource.source_id, user_id=sample.resource.user_id, metadata=sample.resource.resource_metadata)
def get_meter_statistics(self, filter, period=None, groupby=None, aggregate=None): """Return a dictionary containing meter statistics. Meter statistics is described by the query parameters. The filter must have a meter value set. { 'min': 'max': 'avg': 'sum': 'count': 'period': 'period_start': 'period_end': 'duration': 'duration_start': 'duration_end': } """ if filter: if not filter.meter: raise ceilometer.NotImplementedError('Query without meter ' 'not implemented') else: raise ceilometer.NotImplementedError('Query without filter ' 'not implemented') if groupby: raise ceilometer.NotImplementedError('Groupby not implemented') if filter.metaquery: raise ceilometer.NotImplementedError('Metaquery not implemented') if filter.message_id: raise ceilometer.NotImplementedError('Message_id query ' 'not implemented') if filter.start_timestamp_op and filter.start_timestamp_op != 'ge': raise ceilometer.NotImplementedError( ('Start time op %s ' 'not implemented') % filter.start_timestamp_op) if filter.end_timestamp_op and filter.end_timestamp_op != 'le': raise ceilometer.NotImplementedError( ('End time op %s ' 'not implemented') % filter.end_timestamp_op) if not filter.start_timestamp: filter.start_timestamp = timeutils.isotime( datetime.datetime(1970, 1, 1)) # TODO(monasca): Add this a config parameter allowed_stats = ['avg', 'min', 'max', 'sum', 'count'] if aggregate: not_allowed_stats = [ a.func for a in aggregate if a.func not in allowed_stats ] if not_allowed_stats: raise ceilometer.NotImplementedError( ('Aggregate function(s) ' '%s not implemented') % not_allowed_stats) statistics = [a.func for a in aggregate if a.func in allowed_stats] else: statistics = allowed_stats dims_filter = dict(user_id=filter.user, project_id=filter.project, source=filter.source, resource_id=filter.resource) dims_filter = {k: v for k, v in dims_filter.items() if v is not None} period = period if period \ else cfg.CONF.monasca.default_stats_period _search_args = dict(name=filter.meter, dimensions=dims_filter, start_time=filter.start_timestamp, end_time=filter.end_timestamp, period=period, statistics=','.join(statistics), merge_metrics=True) _search_args = {k: v for k, v in _search_args.items() if v is not None} stats_list = self.mc.statistics_list(**_search_args) for stats in stats_list: for s in stats['statistics']: stats_dict = self._convert_to_dict(s, stats['columns']) ts_start = timeutils.parse_isotime(stats_dict['timestamp']) ts_end = ts_start + datetime.timedelta(0, period) del stats_dict['timestamp'] if 'count' in stats_dict: stats_dict['count'] = int(stats_dict['count']) yield api_models.Statistics( unit=stats['dimensions'].get('unit'), period=period, period_start=ts_start, period_end=ts_end, duration=period, duration_start=ts_start, duration_end=ts_end, groupby={u'': u''}, **stats_dict)
def create_alarm(alarm): """Create an alarm. Returns the alarm as created. :param alarm: The alarm to create. """ raise ceilometer.NotImplementedError('Alarms not implemented')
def update_alarm(alarm): """Update alarm.""" raise ceilometer.NotImplementedError('Alarms not implemented')
def _parse_to_sample_filter(self, simple_filters): """Parse to simple filters to sample filter. For i.e.: parse [{"=":{"counter_name":"cpu"}},{"=":{"counter_volume": 1}}] to SampleFilter(counter_name="cpu", counter_volume=1) """ equal_only_fields = ( 'counter_name', 'counter_unit', 'counter_type', 'project_id', 'user_id', 'source', 'resource_id', # These fields are supported by Ceilometer but cannot supported # by Monasca. # 'message_id', # 'message_signature', # 'recorded_at', ) field_map = { "project_id": "project", "user_id": "user", "resource_id": "resource", "counter_name": "meter", "counter_type": "type", "counter_unit": "unit", } msg = "operand %s cannot be applied to field %s" kwargs = {'metaquery': {}} for sf in simple_filters: op = sf.keys()[0] field, value = sf.values()[0].items()[0] if field in equal_only_fields: if op != '=': raise ceilometer.NotImplementedError(msg % (op, field)) field = field_map.get(field, field) kwargs[field] = value elif field == 'timestamp': if op == '>=': kwargs['start_timestamp'] = value kwargs['start_timestamp_op'] = 'ge' elif op == '<=': kwargs['end_timestamp'] = value kwargs['end_timestamp_op'] = 'le' else: raise ceilometer.NotImplementedError(msg % (op, field)) elif field == 'counter_volume': kwargs['volume'] = value kwargs['volume_op'] = op elif (field.startswith('resource_metadata.') or field.startswith('metadata.')): kwargs['metaquery'][field] = value else: ra_msg = "field %s is not supported" % field raise ceilometer.NotImplementedError(ra_msg) sample_type = kwargs.pop('type', None) sample_unit = kwargs.pop('unit', None) sample_volume = kwargs.pop('volume', None) sample_volume_op = kwargs.pop('volume_op', None) sample_filter = storage.SampleFilter(**kwargs) # Add some dynamic attributes, type and unit attributes can be used # when query Monasca API, volume and volime_op attributes can # be used for volume comparison. sample_filter.type = sample_type sample_filter.unit = sample_unit sample_filter.volume = sample_volume sample_filter.volume_op = sample_volume_op return sample_filter
def record_alarm_change(alarm_change): """Record alarm change event.""" raise ceilometer.NotImplementedError('Alarm history not implemented')
def get_meter_statistics(self, filter, period=None, groupby=None, aggregate=None): """Return a dictionary containing meter statistics. Meter statistics is described by the query parameters. The filter must have a meter value set. { 'min': 'max': 'avg': 'sum': 'count': 'period': 'period_start': 'period_end': 'duration': 'duration_start': 'duration_end': } """ if filter: if not filter.meter: raise ceilometer.NotImplementedError('Query without meter ' 'not implemented') else: raise ceilometer.NotImplementedError('Query without filter ' 'not implemented') allowed_groupby = ['user_id', 'project_id', 'resource_id', 'source'] if groupby: if len(groupby) > 1: raise ceilometer.NotImplementedError('Only one groupby ' 'supported') groupby = groupby[0] if groupby not in allowed_groupby: raise ceilometer.NotImplementedError('Groupby %s not' ' implemented' % groupby) if filter.metaquery: raise ceilometer.NotImplementedError('Metaquery not implemented') if filter.message_id: raise ceilometer.NotImplementedError('Message_id query ' 'not implemented') if filter.start_timestamp_op and filter.start_timestamp_op != 'ge': raise ceilometer.NotImplementedError(('Start time op %s ' 'not implemented') % filter.start_timestamp_op) if filter.end_timestamp_op and filter.end_timestamp_op != 'le': raise ceilometer.NotImplementedError(('End time op %s ' 'not implemented') % filter.end_timestamp_op) if not filter.start_timestamp: filter.start_timestamp = timeutils.isotime( datetime.datetime(1970, 1, 1)) else: filter.start_timestamp = timeutils.isotime(filter.start_timestamp) if filter.end_timestamp: filter.end_timestamp = timeutils.isotime(filter.end_timestamp) # TODO(monasca): Add this a config parameter allowed_stats = ['avg', 'min', 'max', 'sum', 'count'] if aggregate: not_allowed_stats = [a.func for a in aggregate if a.func not in allowed_stats] if not_allowed_stats: raise ceilometer.NotImplementedError(('Aggregate function(s) ' '%s not implemented') % not_allowed_stats) statistics = [a.func for a in aggregate if a.func in allowed_stats] else: statistics = allowed_stats dims_filter = dict(user_id=filter.user, project_id=filter.project, source=filter.source, resource_id=filter.resource ) dims_filter = {k: v for k, v in dims_filter.items() if v is not None} period = period if period \ else cfg.CONF.monasca.default_stats_period if groupby: _metric_args = dict(name=filter.meter, dimensions=dims_filter) group_stats_list = [] for metric in self.mc.metrics_list(**_metric_args): _search_args = dict( name=metric['name'], dimensions=metric['dimensions'], start_time=filter.start_timestamp, end_time=filter.end_timestamp, period=period, statistics=','.join(statistics), merge_metrics=False) _search_args = {k: v for k, v in _search_args.items() if v is not None} stats_list = self.mc.statistics_list(**_search_args) group_stats_list.extend(stats_list) group_stats_dict = {} for stats in group_stats_list: groupby_val = stats['dimensions'].get(groupby) stats_list = group_stats_dict.get(groupby_val) if stats_list: stats_list.append(stats) else: group_stats_dict[groupby_val] = [stats] def get_max(items): return max(items) def get_min(items): return min(items) def get_avg(items): return sum(items)/len(items) def get_sum(items): return sum(items) def get_count(items): count = 0 for item in items: count = count + item return count for group_key, stats_group in group_stats_dict.iteritems(): max_list = [] min_list = [] avg_list = [] sum_list = [] count_list = [] ts_list = [] group_statistics = {} for stats in stats_group: for s in stats['statistics']: stats_dict = self._convert_to_dict(s, stats['columns']) if 'max' in stats['columns']: max_list.append(stats_dict['max']) if 'min' in stats['columns']: min_list.append(stats_dict['min']) if 'avg' in stats['columns']: avg_list.append(stats_dict['avg']) if 'sum' in stats['columns']: sum_list.append(stats_dict['sum']) if 'count' in stats['columns']: count_list.append(stats_dict['count']) ts_list.append(stats_dict['timestamp']) group_statistics['unit'] = (stats['dimensions']. get('unit')) if len(max_list): group_statistics['max'] = get_max(max_list) if len(min_list): group_statistics['min'] = get_min(min_list) if len(avg_list): group_statistics['avg'] = get_avg(avg_list) if len(sum_list): group_statistics['sum'] = get_sum(sum_list) if len(count_list): group_statistics['count'] = get_count(count_list) group_statistics['end_timestamp'] = get_max(ts_list) group_statistics['timestamp'] = get_min(ts_list) ts_start = timeutils.parse_isotime( group_statistics['timestamp']).replace(tzinfo=None) ts_end = timeutils.parse_isotime( group_statistics['end_timestamp']).replace(tzinfo=None) del group_statistics['end_timestamp'] if 'count' in group_statistics: group_statistics['count'] = int(group_statistics['count']) unit = group_statistics['unit'] del group_statistics['unit'] if aggregate: group_statistics['aggregate'] = {} for a in aggregate: key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') group_statistics['aggregate'][key] = ( group_statistics.get(key)) yield api_models.Statistics( unit=unit, period=period, period_start=ts_start, period_end=ts_end, duration=period, duration_start=ts_start, duration_end=ts_end, groupby={groupby: group_key}, **group_statistics ) else: _search_args = dict( name=filter.meter, dimensions=dims_filter, start_time=filter.start_timestamp, end_time=filter.end_timestamp, period=period, statistics=','.join(statistics), merge_metrics=True) _search_args = {k: v for k, v in _search_args.items() if v is not None} stats_list = self.mc.statistics_list(**_search_args) for stats in stats_list: for s in stats['statistics']: stats_dict = self._convert_to_dict(s, stats['columns']) ts_start = timeutils.parse_isotime( stats_dict['timestamp']).replace(tzinfo=None) ts_end = (ts_start + datetime.timedelta( 0, period)).replace(tzinfo=None) del stats_dict['timestamp'] if 'count' in stats_dict: stats_dict['count'] = int(stats_dict['count']) if aggregate: stats_dict['aggregate'] = {} for a in aggregate: key = '%s%s' % (a.func, '/%s' % a.param if a.param else '') stats_dict['aggregate'][key] = stats_dict.get(key) yield api_models.Statistics( unit=stats['dimensions'].get('unit'), period=period, period_start=ts_start, period_end=ts_end, duration=period, duration_start=ts_start, duration_end=ts_end, groupby={u'': u''}, **stats_dict )
def get_samples(self, sample_filter, limit=None): """Return an iterable of dictionaries containing sample information. { 'source': source of the resource, 'counter_name': name of the resource,if groupby: raise ceilometer.NotImplementedError('Groupby not implemented') 'counter_type': type of the sample (gauge, delta, cumulative), 'counter_unit': unit of the sample, 'counter_volume': volume of the sample, 'user_id': UUID of user owning the resource, 'project_id': UUID of project owning the resource, 'resource_id': UUID of the resource, 'timestamp': timestamp of the sample, 'resource_metadata': metadata of the sample, 'message_id': message ID of the sample, 'message_signature': message signature of the sample, 'recorded_at': time the sample was recorded } :param sample_filter: constraints for the sample search. :param limit: Maximum number of results to return. """ if limit == 0: return if not sample_filter or not sample_filter.meter: raise ceilometer.NotImplementedError( "Supply meter name at the least") if (sample_filter.start_timestamp_op and sample_filter.start_timestamp_op != 'ge'): raise ceilometer.NotImplementedError(('Start time op %s ' 'not implemented') % sample_filter. start_timestamp_op) if (sample_filter.end_timestamp_op and sample_filter.end_timestamp_op != 'le'): raise ceilometer.NotImplementedError(('End time op %s ' 'not implemented') % sample_filter. end_timestamp_op) q = {} if sample_filter.metaquery: q = self._convert_metaquery(sample_filter.metaquery) if sample_filter.message_id: raise ceilometer.NotImplementedError('message_id not ' 'implemented ' 'in get_samples') if not sample_filter.start_timestamp: sample_filter.start_timestamp = datetime.datetime(1970, 1, 1) if not sample_filter.end_timestamp: sample_filter.end_timestamp = datetime.datetime.utcnow() _dimensions = dict( user_id=sample_filter.user, project_id=sample_filter.project, resource_id=sample_filter.resource, source=sample_filter.source, # Dynamic sample filter attributes, these fields are useful for # filtering result. unit=getattr(sample_filter, 'unit', None), type=getattr(sample_filter, 'type', None), ) _dimensions = {k: v for k, v in _dimensions.items() if v is not None} _metric_args = dict(name=sample_filter.meter, dimensions=_dimensions) start_ts = timeutils.isotime(sample_filter.start_timestamp) end_ts = timeutils.isotime(sample_filter.end_timestamp) _search_args = dict( start_time=start_ts, start_timestamp_op=sample_filter.start_timestamp_op, end_time=end_ts, end_timestamp_op=sample_filter.end_timestamp_op, merge_metrics=False ) result_count = 0 for metric in self.mc.metrics_list( **_metric_args): _search_args['name'] = metric['name'] _search_args['dimensions'] = metric['dimensions'] _search_args = {k: v for k, v in _search_args.items() if v is not None} for sample in self.mc.measurements_list(**_search_args): d = sample['dimensions'] for meas in sample['measurements']: m = self._convert_to_dict( meas, sample['columns']) vm = m['value_meta'] if not self._match_metaquery_to_value_meta(q, vm): continue result_count += 1 yield api_models.Sample( source=d.get('source'), counter_name=sample['name'], counter_type=d.get('type'), counter_unit=d.get('unit'), counter_volume=m['value'], user_id=d.get('user_id'), project_id=d.get('project_id'), resource_id=d.get('resource_id'), timestamp=timeutils.parse_isotime(m['timestamp']), resource_metadata=m['value_meta'], message_id=sample['id'], message_signature='', recorded_at=(timeutils.parse_isotime(m['timestamp']))) if result_count == limit: return
def get_resources(self, user=None, project=None, source=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, metaquery=None, resource=None, limit=None): """Return an iterable of dictionaries containing resource information. { 'resource_id': UUID of the resource, 'project_id': UUID of project owning the resource, 'user_id': UUID of user owning the resource, 'timestamp': UTC datetime of last update to the resource, 'metadata': most current metadata for the resource, 'meter': list of the meters reporting data for the resource, } :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param source: Optional source filter. :param start_timestamp: Optional modified timestamp start range. :param start_timestamp_op: Optional start time operator, like gt, ge. :param end_timestamp: Optional modified timestamp end range. :param end_timestamp_op: Optional end time operator, like lt, le. :param metaquery: Optional dict with metadata to match on. :param resource: Optional resource filter. :param limit: Maximum number of results to return. """ if limit == 0: return q = {} if metaquery: q = self._convert_metaquery(metaquery) if start_timestamp_op and start_timestamp_op != 'ge': raise ceilometer.NotImplementedError(('Start time op %s ' 'not implemented') % start_timestamp_op) if end_timestamp_op and end_timestamp_op != 'le': raise ceilometer.NotImplementedError(('End time op %s ' 'not implemented') % end_timestamp_op) if not start_timestamp: start_timestamp = timeutils.isotime(datetime.datetime(1970, 1, 1)) else: start_timestamp = timeutils.isotime(start_timestamp) if end_timestamp: end_timestamp = timeutils.isotime(end_timestamp) dims_filter = dict(user_id=user, project_id=project, source=source, resource_id=resource ) dims_filter = {k: v for k, v in dims_filter.items() if v is not None} _search_args = dict( start_time=start_timestamp, end_time=end_timestamp, limit=1) _search_args = {k: v for k, v in _search_args.items() if v is not None} result_count = 0 for metric in self.mc.metrics_list( **dict(dimensions=dims_filter)): _search_args['name'] = metric['name'] _search_args['dimensions'] = metric['dimensions'] try: for sample in self.mc.measurements_list(**_search_args): d = sample['dimensions'] m = self._convert_to_dict( sample['measurements'][0], sample['columns']) vm = m['value_meta'] if not self._match_metaquery_to_value_meta(q, vm): continue if d.get('resource_id'): result_count += 1 yield api_models.Resource( resource_id=d.get('resource_id'), first_sample_timestamp=( timeutils.parse_isotime(m['timestamp'])), last_sample_timestamp=timeutils.utcnow(), project_id=d.get('project_id'), source=d.get('source'), user_id=d.get('user_id'), metadata=m['value_meta'] ) if result_count == limit: return except monasca_exc.HTTPConflict: pass
def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instance. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. """ # NOTE(zqfan): We already have checked at API level, but # still leave it here in case of directly storage calls. if aggregate: for a in aggregate: if a.func not in self.AGGREGATES: msg = _('Invalid aggregation function: %s') % a.func raise storage.StorageBadAggregate(msg) if (groupby and set(groupby) - set([ 'user_id', 'project_id', 'resource_id', 'source', 'resource_metadata.instance_type' ])): raise ceilometer.NotImplementedError( "Unable to group by these fields") q = pymongo_utils.make_query_from_filter(sample_filter) group_stage = {} project_stage = { "unit": "$_id.unit", "name": "$_id.name", "first_timestamp": "$first_timestamp", "last_timestamp": "$last_timestamp", "period_start": "$_id.period_start", } # Add timestamps to $group stage group_stage.update({ "first_timestamp": { "$min": "$timestamp" }, "last_timestamp": { "$max": "$timestamp" } }) # Define a _id field for grouped documents unique_group_field = {"name": "$counter_name", "unit": "$counter_unit"} # Define a first timestamp for periods if sample_filter.start_timestamp: first_timestamp = sample_filter.start_timestamp else: first_timestamp_cursor = self.db.meter.find(limit=1, sort=[ ('timestamp', pymongo.ASCENDING) ]) if first_timestamp_cursor.count(): first_timestamp = first_timestamp_cursor[0]['timestamp'] else: first_timestamp = utils.EPOCH_TIME # Add a start_period field to unique identifier of grouped documents if period: period_dict = self._make_period_dict(period, first_timestamp) unique_group_field.update(period_dict) # Add a groupby fields to unique identifier of grouped documents if groupby: unique_group_field.update( dict((field.replace(".", "/"), "$%s" % field) for field in groupby)) group_stage.update({"_id": unique_group_field}) self._compile_aggregate_stages(aggregate, group_stage, project_stage) # Aggregation stages list. It's work one by one and uses documents # from previous stages. aggregation_query = [{ '$match': q }, { "$sort": { "timestamp": 1 } }, { "$group": group_stage }, { "$sort": { "_id.period_start": 1 } }, { "$project": project_stage }] # results is dict in pymongo<=2.6.3 and CommandCursor in >=3.0 results = self.db.meter.aggregate(aggregation_query, **self._make_aggregation_params()) return [ self._stats_result_to_model(point, groupby, aggregate, period, first_timestamp) for point in self._get_results(results) ]
def delete_alarm(alarm_id): """Delete an alarm.""" raise ceilometer.NotImplementedError('Alarms not implemented')
def get_meter_statistics(self, sample_filter, period=None, groupby=None, aggregate=None): """Return an iterable of models.Statistics instances. Items are containing meter statistics described by the query parameters. The filter must have a meter value set. .. note:: Due to HBase limitations the aggregations are implemented in the driver itself, therefore this method will be quite slow because of all the Thrift traffic it is going to create. """ if groupby: raise ceilometer.NotImplementedError("Group by not implemented.") if aggregate: raise ceilometer.NotImplementedError( 'Selectable aggregates not implemented') with self.conn_pool.connection() as conn: meter_table = conn.table(self.METER_TABLE) q, start, stop, columns = (hbase_utils. make_sample_query_from_filter (sample_filter)) # These fields are used in statistics' calculating columns.extend(['f:timestamp', 'f:counter_volume', 'f:counter_unit']) meters = map(hbase_utils.deserialize_entry, list(meter for (ignored, meter) in meter_table.scan( filter=q, row_start=start, row_stop=stop, columns=columns))) if sample_filter.start_timestamp: start_time = sample_filter.start_timestamp elif meters: start_time = meters[-1][0]['timestamp'] else: start_time = None if sample_filter.end_timestamp: end_time = sample_filter.end_timestamp elif meters: end_time = meters[0][0]['timestamp'] else: end_time = None results = [] if not period: period = 0 period_start = start_time period_end = end_time # As our HBase meters are stored as newest-first, we need to iterate # in the reverse order for meter in meters[::-1]: ts = meter[0]['timestamp'] if period: offset = int(timeutils.delta_seconds( start_time, ts) / period) * period period_start = start_time + datetime.timedelta(0, offset) if not results or not results[-1].period_start == period_start: if period: period_end = period_start + datetime.timedelta( 0, period) results.append( models.Statistics(unit='', count=0, min=0, max=0, avg=0, sum=0, period=period, period_start=period_start, period_end=period_end, duration=None, duration_start=None, duration_end=None, groupby=None) ) self._update_meter_stats(results[-1], meter[0]) return results
def record_events(events): """Write the events to the backend storage system. :param events: a list of model.Event objects. """ raise ceilometer.NotImplementedError('Events not implemented.')
def get_meters(self, user=None, project=None, resource=None, source=None, metaquery=None, pagination=None): """Return an iterable of api_models.Meter instances :param user: Optional ID for user that owns the resource. :param project: Optional ID for project that owns the resource. :param resource: Optional ID of the resource. :param source: Optional source filter. :param metaquery: Optional dict with metadata to match on. :param pagination: Optional pagination query. """ if pagination: raise ceilometer.NotImplementedError('Pagination not implemented') s_filter = storage.SampleFilter(user=user, project=project, source=source, metaquery=metaquery, resource=resource) # NOTE(gordc): get latest sample of each meter/resource. we do not # filter here as we want to filter only on latest record. session = self._engine_facade.get_session() subq = session.query(func.max(models.Sample.id).label('id')).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id).group_by( models.Sample.meter_id, models.Resource.resource_id) if resource: subq = subq.filter(models.Resource.resource_id == resource) subq = subq.subquery() # get meter details for samples. query_sample = (session.query( models.Sample.meter_id, models.Meter.name, models.Meter.type, models.Meter.unit, models.Resource.resource_id, models.Resource.project_id, models.Resource.source_id, models.Resource.user_id).join( subq, subq.c.id == models.Sample.id).join( models.Meter, models.Meter.id == models.Sample.meter_id).join( models.Resource, models.Resource.internal_id == models.Sample.resource_id)) query_sample = make_query_from_filter(session, query_sample, s_filter, require_meter=False) for row in query_sample.all(): yield api_models.Meter(name=row.name, type=row.type, unit=row.unit, resource_id=row.resource_id, project_id=row.project_id, source=row.source_id, user_id=row.user_id)
def get_samples(self, sample_filter, limit=None): """Return an iterable of dictionaries containing sample information. { 'source': source of the resource, 'counter_name': name of the resource, 'counter_type': type of the sample (gauge, delta, cumulative), 'counter_unit': unit of the sample, 'counter_volume': volume of the sample, 'user_id': UUID of user owning the resource, 'project_id': UUID of project owning the resource, 'resource_id': UUID of the resource, 'timestamp': timestamp of the sample, 'resource_metadata': metadata of the sample, 'message_id': message ID of the sample, 'message_signature': message signature of the sample, 'recorded_at': time the sample was recorded } :param sample_filter: constraints for the sample search. :param limit: Maximum number of results to return. """ if not sample_filter or not sample_filter.meter: raise ceilometer.NotImplementedError( "Supply meter name at the least") if (sample_filter.start_timestamp_op and sample_filter.start_timestamp_op != 'ge'): raise ceilometer.NotImplementedError( ('Start time op %s ' 'not implemented') % sample_filter.start_timestamp_op) if (sample_filter.end_timestamp_op and sample_filter.end_timestamp_op != 'le'): raise ceilometer.NotImplementedError( ('End time op %s ' 'not implemented') % sample_filter.end_timestamp_op) q = {} if sample_filter.metaquery: q = self._convert_metaquery(sample_filter.metaquery) if sample_filter.message_id: raise ceilometer.NotImplementedError('message_id not ' 'implemented ' 'in get_samples') if not sample_filter.start_timestamp: sample_filter.start_timestamp = \ timeutils.isotime(datetime.datetime(1970, 1, 1)) else: sample_filter.start_timestamp = \ timeutils.isotime(sample_filter.start_timestamp) if sample_filter.end_timestamp: sample_filter.end_timestamp = \ timeutils.isotime(sample_filter.end_timestamp) _dimensions = dict(user_id=sample_filter.user, project_id=sample_filter.project, resource_id=sample_filter.resource, source=sample_filter.source) _dimensions = {k: v for k, v in _dimensions.items() if v is not None} _search_args = dict( name=sample_filter.meter, start_time=sample_filter.start_timestamp, start_timestamp_op=(sample_filter.start_timestamp_op), end_time=sample_filter.end_timestamp, end_timestamp_op=sample_filter.end_timestamp_op, limit=limit, merge_metrics=True, dimensions=_dimensions) _search_args = {k: v for k, v in _search_args.items() if v is not None} for sample in self.mc.measurements_list(**_search_args): LOG.debug(_('Retrieved sample: %s'), sample) d = sample['dimensions'] for measurement in sample['measurements']: meas_dict = self._convert_to_dict(measurement, sample['columns']) vm = meas_dict['value_meta'] if not self._match_metaquery_to_value_meta(q, vm): continue yield api_models.Sample( source=d.get('source'), counter_name=sample['name'], counter_type=d.get('type'), counter_unit=d.get('unit'), counter_volume=meas_dict['value'], user_id=d.get('user_id'), project_id=d.get('project_id'), resource_id=d.get('resource_id'), timestamp=timeutils.parse_isotime(meas_dict['timestamp']), resource_metadata=meas_dict['value_meta'], message_id=sample['id'], message_signature='', recorded_at=(timeutils.parse_isotime( meas_dict['timestamp'])))