def prepare_alarms(self): self.alarms = [ models.Alarm(name='or-alarm', description='the or alarm', type='combination', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( alarm_ids=[ '9cfc3e51-2ff1-4b1d-ac01-c1bd4c6d0d1e', '1d441595-d069-4e05-95ab-8693ba6a8302' ], operator='or', ), severity='critical'), models.Alarm(name='and-alarm', description='the and alarm', type='combination', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( alarm_ids=[ 'b82734f4-9d06-48f3-8a86-fa59a0c99dc8', '15a700e5-2fe8-4b3d-8c55-9e92831f6a2b' ], operator='and', ), severity='critical') ]
def get_alarms(self, name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, pagination=None): if pagination: raise NotImplementedError('Pagination not implemented') if meter: raise NotImplementedError('Filter by meter not implemented') q = hbase_utils.make_query(alarm_id=alarm_id, name=name, enabled=enabled, user_id=user, project_id=project, state=state) with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) gen = alarm_table.scan(filter=q) for ignored, data in gen: stored_alarm = hbase_utils.deserialize_entry(data)[0] yield models.Alarm(**stored_alarm)
def get_alarms(self, name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, pagination=None, alarm_type=None): if pagination: raise ceilometer.NotImplementedError('Pagination not implemented') if meter: raise ceilometer.NotImplementedError( 'Filter by meter not implemented') q = hbase_utils.make_query(alarm_id=alarm_id, name=name, enabled=enabled, user_id=user, project_id=project, state=state, type=alarm_type) with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) gen = alarm_table.scan(filter=q) alarms = [ hbase_utils.deserialize_entry(data)[0] for ignored, data in gen ] for alarm in sorted(alarms, key=operator.itemgetter('timestamp'), reverse=True): yield models.Alarm(**alarm)
def _make_alarm(uuid): return models.Alarm(name='instance_running_hot', type='threshold', user_id='foobar', project_id='snafu', enabled=True, description='', repeat_actions=False, state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=[], insufficient_data_actions=[], alarm_id=uuid, severity='critical', time_constraints=[], rule=dict( statistic='avg', comparison_operator='gt', threshold=80.0, evaluation_periods=5, period=60, query=[], ))
def put(self, data): """Modify this alarm. :param data: an alarm within the request body. """ rbac.enforce('change_alarm', pecan.request) # Ensure alarm exists alarm_in = self._alarm() now = timeutils.utcnow() data.alarm_id = self._id user, project = rbac.get_limited_to(pecan.request.headers) if user: data.user_id = user elif data.user_id == wtypes.Unset: data.user_id = alarm_in.user_id if project: data.project_id = project elif data.project_id == wtypes.Unset: data.project_id = alarm_in.project_id data.timestamp = now if alarm_in.state != data.state: data.state_timestamp = now else: data.state_timestamp = alarm_in.state_timestamp alarm_in.severity = data.severity # make sure alarms are unique by name per project. if alarm_in.name != data.name: alarms = list( self.conn.get_alarms(name=data.name, project=data.project_id)) if alarms: raise base.ClientSideError(_("Alarm with name=%s exists") % data.name, status_code=409) ALARMS_RULES[data.type].plugin.update_hook(data) old_alarm = Alarm.from_db_model(alarm_in).as_dict(alarm_models.Alarm) updated_alarm = data.as_dict(alarm_models.Alarm) try: alarm_in = alarm_models.Alarm(**updated_alarm) except Exception: LOG.exception(_("Error while putting alarm: %s") % updated_alarm) raise base.ClientSideError(_("Alarm incorrect")) alarm = self.conn.update_alarm(alarm_in) change = dict( (k, v) for k, v in updated_alarm.items() if v != old_alarm[k] and k not in ['timestamp', 'state_timestamp']) self._record_change(change, now, on_behalf_of=alarm.project_id) return Alarm.from_db_model(alarm)
def post(self, data): """Create a new alarm. :param data: an alarm within the request body. """ rbac.enforce('create_alarm', pecan.request) conn = pecan.request.alarm_storage_conn now = timeutils.utcnow() data.alarm_id = str(uuid.uuid4()) user_limit, project_limit = rbac.get_limited_to(pecan.request.headers) def _set_ownership(aspect, owner_limitation, header): attr = '%s_id' % aspect requested_owner = getattr(data, attr) explicit_owner = requested_owner != wtypes.Unset caller = pecan.request.headers.get(header) if (owner_limitation and explicit_owner and requested_owner != caller): raise base.ProjectNotAuthorized(requested_owner, aspect) actual_owner = (owner_limitation or requested_owner if explicit_owner else caller) setattr(data, attr, actual_owner) _set_ownership('user', user_limit, 'X-User-Id') _set_ownership('project', project_limit, 'X-Project-Id') # Check if there's room for one more alarm if is_over_quota(conn, data.project_id, data.user_id): raise OverQuota(data) data.timestamp = now data.state_timestamp = now ALARMS_RULES[data.type].plugin.create_hook(data) data.update_actions() change = data.as_dict(alarm_models.Alarm) # make sure alarms are unique by name per project. alarms = list(conn.get_alarms(name=data.name, project=data.project_id)) if alarms: raise base.ClientSideError( _("Alarm with name='%s' exists") % data.name, status_code=409) try: alarm_in = alarm_models.Alarm(**change) except Exception: LOG.exception(_("Error while posting alarm: %s") % change) raise base.ClientSideError(_("Alarm incorrect")) alarm = conn.create_alarm(alarm_in) self._record_creation(conn, change, alarm.alarm_id, now) return Alarm.from_db_model(alarm)
def update_alarm(self, alarm): """Update alarm.""" data = alarm.as_dict() self.db.alarm.update({'alarm_id': alarm.alarm_id}, {'$set': data}, upsert=True) stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0] del stored_alarm['_id'] self._ensure_encapsulated_rule_format(stored_alarm) self._ensure_time_constraints(stored_alarm) return models.Alarm(**stored_alarm)
def update_alarm(self, alarm): """Create an alarm. :param alarm: The alarm to create. It is Alarm object, so we need to call as_dict() """ _id = alarm.alarm_id alarm_to_store = hbase_utils.serialize_entry(alarm.as_dict()) with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) alarm_table.put(_id, alarm_to_store) stored_alarm = hbase_utils.deserialize_entry( alarm_table.row(_id))[0] return models.Alarm(**stored_alarm)
def _retrieve_alarms(self, query_filter, orderby, limit): if limit is not None: alarms = self.db.alarm.find(query_filter, limit=limit, sort=orderby) else: alarms = self.db.alarm.find(query_filter, sort=orderby) for alarm in alarms: a = {} a.update(alarm) del a['_id'] self._ensure_encapsulated_rule_format(a) self._ensure_time_constraints(a) yield models.Alarm(**a)
def _row_to_alarm_model(row): return alarm_api_models.Alarm( alarm_id=row.alarm_id, enabled=row.enabled, type=row.type, name=row.name, description=row.description, timestamp=row.timestamp, user_id=row.user_id, project_id=row.project_id, state=row.state, state_timestamp=row.state_timestamp, ok_actions=row.ok_actions, alarm_actions=row.alarm_actions, insufficient_data_actions=(row.insufficient_data_actions), rule=row.rule, time_constraints=row.time_constraints, repeat_actions=row.repeat_actions)
def setUp(self): super(TestQueryAlarmsController, self).setUp() self.alarm_url = '/query/alarms' for state in ['ok', 'alarm', 'insufficient data']: for date in [ datetime.datetime(2013, 1, 1), datetime.datetime(2013, 2, 2) ]: for id in [1, 2]: alarm_id = "-".join([state, date.isoformat(), str(id)]) project_id = "project-id%d" % id alarm = models.Alarm(name=alarm_id, type='threshold', enabled=True, alarm_id=alarm_id, description='a', state=state, state_timestamp=date, timestamp=date, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id="user-id%d" % id, project_id=project_id, time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.test', query=[{ 'field': 'project_id', 'op': 'eq', 'value': project_id }]), severity='critical') self.alarm_conn.update_alarm(alarm)
def prepare_alarms(self): self.alarms = [ models.Alarm(name='instance_running_hot', description='instance_running_hot', type='gnocchi_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=5, aggregation_method='mean', granularity=60, metric='cpu_util', resource_type='instance', resource_id='my_instance') ), models.Alarm(name='group_running_idle', description='group_running_idle', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, user_id='foobar', project_id='snafu', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule=dict( comparison_operator='le', threshold=10.0, evaluation_periods=4, aggregation_method='max', granularity=300, metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), ), models.Alarm(name='instance_not_running', description='instance_running_hot', type='gnocchi_aggregation_by_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=6, aggregation_method='mean', granularity=50, metric='cpu_util', resource_type='instance', query='{"=": {"server_group": ' '"my_autoscaling_group"}}') ), ]
def prepare_alarms(self): self.alarms = [ models.Alarm(name='instance_running_hot', description='instance_running_hot', type='threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='gt', threshold=80.0, evaluation_periods=5, statistic='avg', period=60, meter_name='cpu_util', query=[{ 'field': 'meter', 'op': 'eq', 'value': 'cpu_util' }, { 'field': 'resource_id', 'op': 'eq', 'value': 'my_instance' }])), models.Alarm(name='group_running_idle', description='group_running_idle', type='threshold', enabled=True, user_id='foobar', project_id='snafu', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule=dict(comparison_operator='le', threshold=10.0, evaluation_periods=4, statistic='max', period=300, meter_name='cpu_util', query=[{ 'field': 'meter', 'op': 'eq', 'value': 'cpu_util' }, { 'field': 'metadata.user_metadata.AS', 'op': 'eq', 'value': 'my_group' }])), ]