class NotificationEngine(object): def __init__(self, config): self._topics = {} self._topics['notification_topic'] = config['kafka']['notification_topic'] self._topics['retry_topic'] = config['kafka']['notification_retry_topic'] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_path'], config['kafka']['group'], config['kafka']['alarm_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._alarm_ttl = config['processors']['alarm']['ttl'] self._alarms = AlarmProcessor(self._alarm_ttl, config) self._notifier = NotificationProcessor(config['notification_types']) def run(self): finished_count = self._statsd.get_counter(name='alarms_finished_count') for alarm in self._consumer: log.debug('Received alarm >|%s|<', str(alarm)) notifications, partition, offset = self._alarms.to_notification(alarm) if notifications: sent, failed = self._notifier.send(notifications) self._producer.publish(self._topics['notification_topic'], [i.to_json() for i in sent]) self._producer.publish(self._topics['retry_topic'], [i.to_json() for i in failed]) self._consumer.commit() finished_count.increment()
class NotificationEngine(object): def __init__(self, config): self._topics = {} self._topics['notification_topic'] = config['kafka'][ 'notification_topic'] self._topics['retry_topic'] = config['kafka'][ 'notification_retry_topic'] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer( config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_path'], config['kafka']['group'], config['kafka']['alarm_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._alarm_ttl = config['processors']['alarm']['ttl'] self._alarms = AlarmProcessor(self._alarm_ttl, config) self._notifier = NotificationProcessor(config['notification_types']) def run(self): finished_count = self._statsd.get_counter(name='alarms_finished_count') for alarm in self._consumer: log.debug('Received alarm >|%s|<', str(alarm)) notifications, partition, offset = self._alarms.to_notification( alarm) if notifications: sent, failed = self._notifier.send(notifications) self._producer.publish(self._topics['notification_topic'], [i.to_json() for i in sent]) self._producer.publish(self._topics['retry_topic'], [i.to_json() for i in failed]) self._consumer.commit() finished_count.increment()
def setUp(self): self.kafka_client_patcher = mock.patch('kafka.client') self.kafka_producer_patcher = mock.patch('kafka.producer') self.mock_kafka_client = self.kafka_client_patcher.start() self.mock_kafka_producer = self.kafka_producer_patcher.start() self.producer = self.mock_kafka_producer.KeyedProducer.return_value self.client = self.mock_kafka_client.KafkaClient.return_value self.monasca_kafka_producer = KafkaProducer(FAKE_KAFKA_URL)
class TestKafkaProducer(unittest.TestCase): def setUp(self): self.kafka_client_patcher = mock.patch('kafka.client') self.kafka_producer_patcher = mock.patch('kafka.producer') self.mock_kafka_client = self.kafka_client_patcher.start() self.mock_kafka_producer = self.kafka_producer_patcher.start() self.producer = self.mock_kafka_producer.KeyedProducer.return_value self.client = self.mock_kafka_client.KafkaClient.return_value self.monasca_kafka_producer = KafkaProducer(FAKE_KAFKA_URL) def tearDown(self): self.kafka_producer_patcher.stop() self.kafka_client_patcher.stop() def test_kafka_producer_init(self): self.assertTrue(self.mock_kafka_client.KafkaClient.called) self.assertTrue(self.mock_kafka_producer.KeyedProducer.called) def test_kafka_producer_publish(self): topic = FAKE_KAFKA_TOPIC messages = ['message'] key = 'key' self.monasca_kafka_producer.publish(topic, messages, key) self.producer.send_messages.assert_called_once_with( topic, key, *messages) @mock.patch('monasca_common.kafka.producer.time') def test_kafka_producer_publish_one_message_without_key(self, mock_time): topic = FAKE_KAFKA_TOPIC message = 'not_a_list' mock_time.time.return_value = 1 expected_key = '1000' self.monasca_kafka_producer.publish(topic, message) self.assertTrue(mock_time.time.called) self.producer.send_messages.assert_called_once_with( topic, expected_key, message) @mock.patch('monasca_common.kafka.producer.log') def test_kafka_producer_publish_exception(self, mock_logger): class MockException(Exception): pass topic = FAKE_KAFKA_TOPIC messages = ['message'] key = 'key' self.producer.send_messages.side_effect = MockException self.assertRaises(MockException, self.monasca_kafka_producer.publish, topic, messages, key) mock_logger.exception.assert_called_once_with( 'Error publishing to {} topic.'. format(topic))
def __init__(self, config): self._topics = {} self._topics['notification_topic'] = config['kafka'][ 'notification_topic'] self._topics['retry_topic'] = config['kafka'][ 'notification_retry_topic'] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer( config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_path'], config['kafka']['group'], config['kafka']['alarm_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._alarm_ttl = config['processors']['alarm']['ttl'] self._alarms = AlarmProcessor(self._alarm_ttl, config) self._notifier = NotificationProcessor(config['notification_types'])
def __init__(self, config, interval): self._topic_name = config['kafka']['periodic'][interval] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) zookeeper_path = config['zookeeper']['periodic_path'][interval] self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], zookeeper_path, config['kafka']['group'], self._topic_name) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types']) self._db_repo = get_db_repo(config)
def __init__(self, config): self._retry_interval = config['retry']['interval'] self._retry_max = config['retry']['max_attempts'] self._topics = {} self._topics['notification_topic'] = config['kafka'][ 'notification_topic'] self._topics['retry_topic'] = config['kafka'][ 'notification_retry_topic'] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer( config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_retry_path'], config['kafka']['group'], config['kafka']['notification_retry_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types'])
def __init__(self, config): self._topics = {} self._topics['notification_topic'] = config['kafka']['notification_topic'] self._topics['retry_topic'] = config['kafka']['notification_retry_topic'] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_path'], config['kafka']['group'], config['kafka']['alarm_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._alarm_ttl = config['processors']['alarm']['ttl'] self._alarms = AlarmProcessor(self._alarm_ttl, config) self._notifier = NotificationProcessor(config['notification_types'])
def __init__(self, config, interval): self._topic_name = config['kafka']['periodic'][interval] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) zookeeper_path = config['zookeeper']['periodic_path'][interval] self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], zookeeper_path, config['kafka']['group'], self._topic_name) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types']) self._db_repo = get_db_repo(config)
def __init__(self, config): self._retry_interval = config['retry']['interval'] self._retry_max = config['retry']['max_attempts'] self._topics = {} self._topics['notification_topic'] = config['kafka']['notification_topic'] self._topics['retry_topic'] = config['kafka']['notification_retry_topic'] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_retry_path'], config['kafka']['group'], config['kafka']['notification_retry_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types'])
class PeriodicEngine(object): def __init__(self, config, period): self._topic_name = config['kafka']['periodic'][period] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) zookeeper_path = config['zookeeper']['periodic_path'][period] self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], zookeeper_path, config['kafka']['group'], self._topic_name) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config) self._db_repo = get_db_repo(config) self._period = period def _keep_sending(self, alarm_id, original_state, type, period): try: current_state = self._db_repo.get_alarm_current_state(alarm_id) except exceptions.DatabaseException: log.debug('Database Error. Attempting reconnect') current_state = self._db_repo.get_alarm_current_state(alarm_id) # Alarm was deleted if current_state is None: return False # Alarm state changed if current_state != original_state: return False # Period changed if period != self._period: return False if type != "webhook": return False return True def run(self): for raw_notification in self._consumer: message = raw_notification[1].message.value notification_data = json.loads(message) notification = construct_notification_object( self._db_repo, notification_data) if notification is None: self._consumer.commit() continue if self._keep_sending(notification.alarm_id, notification.state, notification.type, notification.period): wait_duration = notification.period - ( time.time() - notification_data['notification_timestamp']) if wait_duration > 0: time.sleep(wait_duration) notification.notification_timestamp = time.time() self._notifier.send([notification]) self._producer.publish(self._topic_name, [notification.to_json()]) self._consumer.commit()
class RetryEngine(object): def __init__(self, config): self._retry_interval = config['retry']['interval'] self._retry_max = config['retry']['max_attempts'] self._topics = {} self._topics['notification_topic'] = config['kafka'][ 'notification_topic'] self._topics['retry_topic'] = config['kafka'][ 'notification_retry_topic'] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer( config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_retry_path'], config['kafka']['group'], config['kafka']['notification_retry_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config) self._db_repo = get_db_repo(config) def run(self): for raw_notification in self._consumer: message = raw_notification[1].message.value notification_data = json.loads(message) notification = construct_notification_object( self._db_repo, notification_data) if notification is None: self._consumer.commit() continue wait_duration = self._retry_interval - ( time.time() - notification_data['notification_timestamp']) if wait_duration > 0: time.sleep(wait_duration) sent, failed = self._notifier.send([notification]) if sent: self._producer.publish(self._topics['notification_topic'], [notification.to_json()]) if failed: notification.retry_count += 1 notification.notification_timestamp = time.time() if notification.retry_count < self._retry_max: log.error(u"retry failed for {} with name {} " u"at {}. " u"Saving for later retry.".format( notification.type, notification.name, notification.address)) self._producer.publish(self._topics['retry_topic'], [notification.to_json()]) else: log.error(u"retry failed for {} with name {} " u"at {} after {} retries. " u"Giving up on retry.".format( notification.type, notification.name, notification.address, self._retry_max)) self._consumer.commit()
class RetryEngine(object): def __init__(self, config): self._retry_interval = config['retry']['interval'] self._retry_max = config['retry']['max_attempts'] self._topics = {} self._topics['notification_topic'] = config['kafka']['notification_topic'] self._topics['retry_topic'] = config['kafka']['notification_retry_topic'] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], config['zookeeper']['notification_retry_path'], config['kafka']['group'], config['kafka']['notification_retry_topic']) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types']) def run(self): for raw_notification in self._consumer: partition = raw_notification[0] offset = raw_notification[1].offset message = raw_notification[1].message.value notification_data = json.loads(message) ntype = notification_data['type'] name = notification_data['name'] addr = notification_data['address'] notification = Notification(ntype, partition, offset, name, addr, notification_data['retry_count'], notification_data['raw_alarm']) wait_duration = self._retry_interval - ( time.time() - notification_data['notification_timestamp']) if wait_duration > 0: time.sleep(wait_duration) sent, failed = self._notifier.send([notification]) if sent: self._producer.publish(self._topics['notification_topic'], sent) if failed: notification.retry_count += 1 notification.notification_timestamp = time.time() if notification.retry_count < self._retry_max: log.error(u"retry failed for {} with name {} " u"at {}. " u"Saving for later retry.".format(ntype, name, addr)) self._producer.publish(self._topics['retry_topic'], [notification.to_json()]) else: log.error(u"retry failed for {} with name {} " u"at {} after {} retries. " u"Giving up on retry." .format(ntype, name, addr, self._retry_max)) self._consumer.commit()
class PeriodicEngine(object): def __init__(self, config, interval): self._topic_name = config['kafka']['periodic'][interval] self._statsd = monascastatsd.Client( name='monasca', dimensions=BaseProcessor.dimensions) zookeeper_path = config['zookeeper']['periodic_path'][interval] self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], zookeeper_path, config['kafka']['group'], self._topic_name) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types']) self._db_repo = get_db_repo(config) def _keep_sending(self, alarm_id, original_state): # Go to DB and check alarm state try: current_state = self._db_repo.get_alarm_current_state(alarm_id) except exceptions.DatabaseException: log.debug('Database Error. Attempting reconnect') current_state = self._db_repo.get_alarm_current_state(alarm_id) # Alarm was deleted if current_state is None: return False # Alarm state changed if current_state != original_state: return False return True def run(self): for raw_notification in self._consumer: partition = raw_notification[0] offset = raw_notification[1].offset message = raw_notification[1].message.value notification_data = json.loads(message) ntype = notification_data['type'] name = notification_data['name'] addr = notification_data['address'] period = notification_data['period'] notification = Notification(ntype, partition, offset, name, addr, period, notification_data['retry_count'], notification_data['raw_alarm']) if self._keep_sending(notification.alarm_id, notification.state): wait_duration = period - ( time.time() - notification_data['notification_timestamp']) if wait_duration > 0: time.sleep(wait_duration) notification.notification_timestamp = time.time() self._notifier.send([notification]) self._producer.publish(self._topic_name, [notification.to_json()]) self._consumer.commit()
class PeriodicEngine(object): def __init__(self, config, interval): self._topic_name = config['kafka']['periodic'][interval] self._statsd = monascastatsd.Client(name='monasca', dimensions=BaseProcessor.dimensions) zookeeper_path = config['zookeeper']['periodic_path'][interval] self._consumer = KafkaConsumer(config['kafka']['url'], config['zookeeper']['url'], zookeeper_path, config['kafka']['group'], self._topic_name) self._producer = KafkaProducer(config['kafka']['url']) self._notifier = NotificationProcessor(config['notification_types']) self._db_repo = get_db_repo(config) def _keep_sending(self, alarm_id, original_state): # Go to DB and check alarm state try: current_state = self._db_repo.get_alarm_current_state(alarm_id) except exceptions.DatabaseException: log.debug('Database Error. Attempting reconnect') current_state = self._db_repo.get_alarm_current_state(alarm_id) # Alarm was deleted if current_state is None: return False # Alarm state changed if current_state != original_state: return False return True def run(self): for raw_notification in self._consumer: partition = raw_notification[0] offset = raw_notification[1].offset message = raw_notification[1].message.value notification_data = json.loads(message) ntype = notification_data['type'] name = notification_data['name'] addr = notification_data['address'] period = notification_data['period'] notification = Notification(ntype, partition, offset, name, addr, period, notification_data['retry_count'], notification_data['raw_alarm']) if self._keep_sending(notification.alarm_id, notification.state): wait_duration = period - ( time.time() - notification_data['notification_timestamp']) if wait_duration > 0: time.sleep(wait_duration) notification.notification_timestamp = time.time() self._notifier.send([notification]) self._producer.publish(self._topic_name, [notification.to_json()]) self._consumer.commit()