def setUp(self): global pulse_cfg self.proc = None self.publisher = None self.management_api = PulseManagementAPI( management_url='http://{}:{}/api'.format( pulse_cfg['host'], pulse_cfg['management_port']), user=pulse_cfg['user'], password=pulse_cfg['password'] ) self.guardian = PulseGuardian(self.management_api, warn_queue_size=TEST_WARN_SIZE, del_queue_size=TEST_DELETE_SIZE, emails=False) # Hack in a test config. dbinit.pulse_management = self.management_api dbinit.init_and_clear_db() self.consumer_cfg = pulse_cfg.copy() self.consumer_cfg['applabel'] = str(uuid.uuid1()) # Configure/create the test user to be used for message consumption. self.consumer_cfg['user'] = CONSUMER_USER self.consumer_cfg['password'] = CONSUMER_PASSWORD self.user = User.new_user(email=CONSUMER_EMAIL, admin=False) db_session.add(self.user) db_session.commit() self.pulse_user = PulseUser.new_user( username=CONSUMER_USER, password=CONSUMER_PASSWORD, owner=self.user, management_api=self.management_api) db_session.add(self.pulse_user) db_session.commit()
class GuardianTest(unittest.TestCase): """Launches a consumer process that creates a queue then disconnects, and then floods the exchange with messages and checks that PulseGuardian warns the queue's owner and deletes the queue if it gets over the maximum size. """ # Defaults; can be overridden for particular tests. consumer_class = consumers.PulseTestConsumer publisher_class = publishers.PulseTestPublisher proc = None QUEUE_CHECK_PERIOD = 0.05 QUEUE_CHECK_ATTEMPTS = 4000 QUEUE_RECORD_CHECK_PERIOD = 0.1 QUEUE_RECORD_CHECK_ATTEMPTS = 50 PUBLISHER_CONNECT_ATTEMPTS = 50 def setUp(self): global pulse_cfg self.proc = None self.publisher = None self.management_api = PulseManagementAPI( management_url='http://{}:{}/api'.format( pulse_cfg['host'], pulse_cfg['management_port']), user=pulse_cfg['user'], password=pulse_cfg['password'] ) self.guardian = PulseGuardian(self.management_api, warn_queue_size=TEST_WARN_SIZE, del_queue_size=TEST_DELETE_SIZE, emails=False) # Hack in a test config. dbinit.pulse_management = self.management_api dbinit.init_and_clear_db() self.consumer_cfg = pulse_cfg.copy() self.consumer_cfg['applabel'] = str(uuid.uuid1()) # Configure/create the test user to be used for message consumption. self.consumer_cfg['user'] = CONSUMER_USER self.consumer_cfg['password'] = CONSUMER_PASSWORD self.user = User.new_user(email=CONSUMER_EMAIL, admin=False) db_session.add(self.user) db_session.commit() self.pulse_user = PulseUser.new_user( username=CONSUMER_USER, password=CONSUMER_PASSWORD, owner=self.user, management_api=self.management_api) db_session.add(self.pulse_user) db_session.commit() def tearDown(self): self._terminate_consumer_proc() # Just in case. for queue in Queue.query.all(): self.management_api.delete_queue(vhost=DEFAULT_RABBIT_VHOST, queue=queue.name) def _build_message(self, msg_id): msg = TestMessage() msg.set_data('id', msg_id) return msg def _create_publisher(self, create_exchange=True): self.publisher = self.publisher_class(**pulse_cfg) if create_exchange: attempts = 0 exc = None while attempts < self.PUBLISHER_CONNECT_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(0.1) try: self.publisher.publish(self._build_message(0)) except socket.error as e: exc = e else: exc = None break if exc: raise exc def _create_consumer_proc(self, durable=False): self.proc = ConsumerSubprocess(self.consumer_class, self.consumer_cfg, durable) self.proc.start() def _terminate_consumer_proc(self): if self.proc: self.proc.terminate() self.proc.join() self.proc = None def _create_passive_consumer(self): cfg = self.consumer_cfg.copy() cfg['connect'] = False consumer = self.consumer_class(**self.consumer_cfg) consumer.configure(topic='#', callback=lambda x, y: None) return consumer def _wait_for_queue(self, queue_should_exist=True): '''Wait until queue has been created by consumer process.''' consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_CHECK_PERIOD) if consumer.queue_exists() == queue_should_exist: break self.assertEqual(consumer.queue_exists(), queue_should_exist) def _wait_for_queue_record(self): '''Wait until one or more queues have been added to the database.''' consumer = self._create_passive_consumer() attempts = 0 while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS: attempts += 1 if attempts > 1: time.sleep(self.QUEUE_RECORD_CHECK_PERIOD) self.guardian.monitor_queues(self.management_api.queues()) if Queue.query.filter(Queue.name == consumer.queue_name).first(): break def test_abnormal_queue_name(self): self.consumer_class = AbnormalQueueConsumer # Use account with full permissions. self.consumer_cfg['user'] = pulse_cfg['user'] self.consumer_cfg['password'] = pulse_cfg['password'] self._create_publisher() self._create_consumer_proc() self._wait_for_queue() self._wait_for_queue_record() queue = Queue.query.filter(Queue.name == AbnormalQueueConsumer.QUEUE_NAME).first() owner = queue.owner # Queue is not durable and will be cleaned up when consumer process # exits; delete it from the queue to avoid assertion failure in # tearDown(). self._terminate_consumer_proc() self._wait_for_queue(False) db_session.delete(queue) db_session.commit() self.assertEqual(owner, None) def test_warning(self): self._create_publisher() self._create_consumer_proc(durable=True) self._wait_for_queue() self._wait_for_queue_record() self._terminate_consumer_proc() # Queue should still exist. self._wait_for_queue() # Get the queue's object. db_session.refresh(self.pulse_user) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.warn_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait for messages to be taken into account and get the warned # messages, if any. for i in xrange(10): time.sleep(0.3) queues_to_warn = set(q_data['name'] for q_data in self.management_api.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) if queues_to_warn: break # Test that no queue has been warned at the beginning of the process. self.assertTrue(not any(q.warned for q in self.pulse_user.queues)) # ... but some queues should be now. self.assertTrue(len(queues_to_warn) > 0) # Monitor the queues; this should detect queues that should be warned. self.guardian.monitor_queues(self.management_api.queues()) # Refresh the user's queues state. db_session.refresh(self.pulse_user) # Test that the queues that had to be "warned" were. self.assertTrue(all(q.warned for q in self.pulse_user.queues if q in queues_to_warn)) # The queues that needed to be warned haven't been deleted. queues_to_warn_bis = set(q_data['name'] for q_data in self.management_api.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) self.assertEqual(queues_to_warn_bis, queues_to_warn) def test_delete(self): self._create_publisher() self._create_consumer_proc(durable=True) self._wait_for_queue() self._wait_for_queue_record() self._terminate_consumer_proc() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertTrue(len(self.pulse_user.queues) > 0) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in self.management_api.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertTrue(len(queues_to_delete) > 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should create the queue object and assign # it to the user. for i in xrange(20): self.guardian.monitor_queues(self.management_api.queues()) time.sleep(0.2) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in self.management_api.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [q_data['name'] for q_data in self.management_api.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] self.assertTrue(len(queues_to_delete) == 0)