def test_show_watch_metric(self): # Insert dummy watch rule into the DB values = { 'stack_id': self.stack.id, 'state': 'NORMAL', 'name': u'HttpFailureAlarm', 'rule': { u'EvaluationPeriods': u'1', u'AlarmActions': [u'WebServerRestartPolicy'], u'AlarmDescription': u'Restart the WikiDatabase', u'Namespace': u'system/linux', u'Period': u'300', u'ComparisonOperator': u'GreaterThanThreshold', u'Statistic': u'SampleCount', u'Threshold': u'2', u'MetricName': u'ServiceFailure' } } db_ret = db_api.watch_rule_create(self.ctx, values) self.assertNotEqual(db_ret, None) # And add a metric datapoint watch = db_api.watch_rule_get_by_name(self.ctx, "HttpFailureAlarm") self.assertNotEqual(watch, None) values = { 'watch_rule_id': watch.id, 'data': { u'Namespace': u'system/linux', u'ServiceFailure': { u'Units': u'Counter', u'Value': 1 } } } watch = db_api.watch_data_create(self.ctx, values) # Check there is one result returned result = self.eng.show_watch_metric(self.ctx, metric_namespace=None, metric_name=None) self.assertEqual(1, len(result)) # Create another metric datapoint and check we get two watch = db_api.watch_data_create(self.ctx, values) result = self.eng.show_watch_metric(self.ctx, metric_namespace=None, metric_name=None) self.assertEqual(2, len(result)) # Cleanup, delete the dummy rule db_api.watch_rule_delete(self.ctx, "HttpFailureAlarm") # Check the response has all keys defined in the engine API for key in engine_api.WATCH_DATA_KEYS: self.assertTrue(key in result[0])
def create_watch_data(self, data): if self.state == self.CEILOMETER_CONTROLLED: # this is a short term measure for those that have cfn-push-stats # within their templates, but want to use Ceilometer alarms. self._to_ceilometer(data) return if self.state == self.SUSPENDED: LOG.debug('Ignoring metric data for %s, SUSPENDED state' % self.name) return [] if self.rule['MetricName'] not in data: # Our simplified cloudwatch implementation only expects a single # Metric associated with each alarm, but some cfn-push-stats # options, e.g --haproxy try to push multiple metrics when we # actually only care about one (the one we're alarming on) # so just ignore any data which doesn't contain MetricName LOG.debug('Ignoring metric data (only accept %(metric)s) ' ': %(data)s' % {'metric': self.rule['MetricName'], 'data': data}) return watch_data = { 'data': data, 'watch_rule_id': self.id } wd = db_api.watch_data_create(None, watch_data) LOG.debug('new watch:%(name)s data:%(data)s' % {'name': self.name, 'data': str(wd.data)})
def create_watch_data(self, data): if self.state == self.CEILOMETER_CONTROLLED: # this is a short term measure for those that have cfn-push-stats # within their templates, but want to use Ceilometer alarms. self._to_ceilometer(data) return if self.state == self.SUSPENDED: LOG.debug('Ignoring metric data for %s, SUSPENDED state' % self.name) return [] if self.rule['MetricName'] not in data: # Our simplified cloudwatch implementation only expects a single # Metric associated with each alarm, but some cfn-push-stats # options, e.g --haproxy try to push multiple metrics when we # actually only care about one (the one we're alarming on) # so just ignore any data which doesn't contain MetricName LOG.debug('Ignoring metric data (only accept %(metric)s) ' ': %(data)s' % { 'metric': self.rule['MetricName'], 'data': data }) return watch_data = {'data': data, 'watch_rule_id': self.id} wd = db_api.watch_data_create(None, watch_data) LOG.debug('new watch:%(name)s data:%(data)s' % { 'name': self.name, 'data': str(wd.data) })
def create_watch_data(self, context, watch_name, stats_data): ''' This could be used by CloudWatch and WaitConditions and treat HA service events like any other CloudWatch. ''' wr = db_api.watch_rule_get(None, watch_name) if wr is None: logger.warn('NoSuch watch:%s' % (watch_name)) return ['NoSuch Watch Rule', None] if not wr.rule['MetricName'] in stats_data: logger.warn('new data has incorrect metric:%s' % (wr.rule['MetricName'])) return ['MetricName %s missing' % wr.rule['MetricName'], None] watch_data = { 'data': stats_data, 'watch_rule_id': wr.id } wd = db_api.watch_data_create(None, watch_data) logger.debug('new watch:%s data:%s' % (watch_name, str(wd.data))) if wr.rule['Statistic'] == 'SampleCount': self.run_rule(None, wr) return [None, wd.data]
def test_show_watch_metric(self): # Insert dummy watch rule into the DB values = {'stack_id': self.stack.id, 'state': 'NORMAL', 'name': u'HttpFailureAlarm', 'rule': {u'EvaluationPeriods': u'1', u'AlarmActions': [u'WebServerRestartPolicy'], u'AlarmDescription': u'Restart the WikiDatabase', u'Namespace': u'system/linux', u'Period': u'300', u'ComparisonOperator': u'GreaterThanThreshold', u'Statistic': u'SampleCount', u'Threshold': u'2', u'MetricName': u'ServiceFailure'}} db_ret = db_api.watch_rule_create(self.ctx, values) self.assertNotEqual(db_ret, None) # And add a metric datapoint watch = db_api.watch_rule_get_by_name(self.ctx, "HttpFailureAlarm") self.assertNotEqual(watch, None) values = {'watch_rule_id': watch.id, 'data': {u'Namespace': u'system/linux', u'ServiceFailure': { u'Units': u'Counter', u'Value': 1}}} watch = db_api.watch_data_create(self.ctx, values) # Check there is one result returned result = self.eng.show_watch_metric(self.ctx, metric_namespace=None, metric_name=None) self.assertEqual(1, len(result)) # Create another metric datapoint and check we get two watch = db_api.watch_data_create(self.ctx, values) result = self.eng.show_watch_metric(self.ctx, metric_namespace=None, metric_name=None) self.assertEqual(2, len(result)) # Cleanup, delete the dummy rule db_api.watch_rule_delete(self.ctx, "HttpFailureAlarm") # Check the response has all keys defined in the engine API for key in engine_api.WATCH_DATA_KEYS: self.assertTrue(key in result[0])
def create_watch_data(self, data): if not self.rule['MetricName'] in data: logger.warn('new data has incorrect metric:%s' % (self.rule['MetricName'])) raise AttributeError('MetricName %s missing' % self.rule['MetricName']) watch_data = {'data': data, 'watch_rule_id': self.id} wd = db_api.watch_data_create(None, watch_data) logger.debug('new watch:%s data:%s' % (self.name, str(wd.data))) if self.rule['Statistic'] == 'SampleCount': self.run_rule()
def create_watch_data(self, data): if not self.rule['MetricName'] in data: logger.warn('new data has incorrect metric:%s' % (self.rule['MetricName'])) raise ValueError('MetricName %s missing' % self.rule['MetricName']) watch_data = { 'data': data, 'watch_rule_id': self.id } wd = db_api.watch_data_create(None, watch_data) logger.debug('new watch:%s data:%s' % (self.name, str(wd.data)))
def create_watch_data(self, data): if not self.rule['MetricName'] in data: # Our simplified cloudwatch implementation only expects a single # Metric associated with each alarm, but some cfn-push-stats # options, e.g --haproxy try to push multiple metrics when we # actually only care about one (the one we're alarming on) # so just ignore any data which doesn't contain MetricName logger.debug('Ignoring metric data (only accept %s) : %s' % (self.rule['MetricName'], data)) return watch_data = {'data': data, 'watch_rule_id': self.id} wd = db_api.watch_data_create(None, watch_data) logger.debug('new watch:%s data:%s' % (self.name, str(wd.data)))
def create_watch_data(self, data): if not self.rule['MetricName'] in data: # Our simplified cloudwatch implementation only expects a single # Metric associated with each alarm, but some cfn-push-stats # options, e.g --haproxy try to push multiple metrics when we # actually only care about one (the one we're alarming on) # so just ignore any data which doesn't contain MetricName logger.debug('Ignoring metric data (only accept %s) : %s' % (self.rule['MetricName'], data)) return watch_data = { 'data': data, 'watch_rule_id': self.id } wd = db_api.watch_data_create(None, watch_data) logger.debug('new watch:%s data:%s' % (self.name, str(wd.data)))
def create(cls, context, values): db_data = db_api.watch_data_create(context, values) return cls._from_db_object(context, cls(), db_data)