def test_delete(self): self._create_publisher() self._create_consumer_proc(durable=True) self._wait_for_queue() self._wait_for_queue_record() self._terminate_consumer_proc() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertTrue(len(self.pulse_user.queues) > 0) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in self.management_api.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertTrue(len(queues_to_delete) > 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should create the queue object and assign # it to the user. for i in xrange(20): self.guardian.monitor_queues(self.management_api.queues()) time.sleep(0.2) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in self.management_api.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [q_data['name'] for q_data in self.management_api.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] self.assertTrue(len(queues_to_delete) == 0)
def test_delete_skip_unbounded(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.rabbitmq_account) self.assertGreater(len(self.rabbitmq_account.queues), 0) # set queues as unbound so they won't be deleted for queue in self.rabbitmq_account.queues: queue.unbound = 1 # Queue multiple messages while no consumer exists. for i in range(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in range(100): time.sleep(0.3) queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Run through the code that decides whether to delete a queue # that has grown too large. # In this case, it should run the check and decide to not delete # any queues. self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) # Test that none of the queues were deleted... self.assertTrue( all(q in queues_to_delete for q in pulse_management.queues())) # And that they were not deleted by guardian... self.assertGreater(len(queues_to_delete), 0)
def test_delete(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertTrue(len(self.pulse_user.queues) > 0) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertTrue(len(queues_to_delete) > 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should create the queue object and assign # it to the user. for i in xrange(20): self.guardian.monitor_queues(pulse_management.queues()) time.sleep(0.2) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in pulse_management.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [ q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size ] self.assertTrue(len(queues_to_delete) == 0)
def test_delete_skip_unbounded(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertGreater(len(self.pulse_user.queues), 0) # set queues as unbound so they won't be deleted for queue in self.pulse_user.queues: queue.unbound = 1 # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(10): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Run through the code that decides whether to delete a queue # that has grown too large. # In this case, it should run the check and decide to not delete # any queues. self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) # Test that none of the queues were deleted... self.assertTrue(all(q in queues_to_delete for q in pulse_management.queues())) # And that they were not deleted by guardian... self.assertGreater(len(queues_to_delete), 0)
def test_warning(self): self._create_publisher() self._create_consumer_proc(durable=True) self._wait_for_queue() self._wait_for_queue_record() self._terminate_consumer_proc() # Queue should still exist. self._wait_for_queue() # Get the queue's object. db_session.refresh(self.pulse_user) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.warn_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait for messages to be taken into account and get the warned # messages, if any. for i in xrange(10): time.sleep(0.3) queues_to_warn = set(q_data['name'] for q_data in self.management_api.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) if queues_to_warn: break # Test that no queue has been warned at the beginning of the process. self.assertTrue(not any(q.warned for q in self.pulse_user.queues)) # ... but some queues should be now. self.assertTrue(len(queues_to_warn) > 0) # Monitor the queues; this should detect queues that should be warned. self.guardian.monitor_queues(self.management_api.queues()) # Refresh the user's queues state. db_session.refresh(self.pulse_user) # Test that the queues that had to be "warned" were. self.assertTrue(all(q.warned for q in self.pulse_user.queues if q in queues_to_warn)) # The queues that needed to be warned haven't been deleted. queues_to_warn_bis = set(q_data['name'] for q_data in self.management_api.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) self.assertEqual(queues_to_warn_bis, queues_to_warn)
def test_delete(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object db_session.refresh(self.rabbitmq_account) self.assertGreater(len(self.rabbitmq_account.queues), 0) # Queue multiple messages while no consumer exists. for i in xrange(self.guardian.del_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait some time for published messages to be taken into account. for i in xrange(100): time.sleep(0.3) queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] if queues_to_delete: break # Test that there are some queues that should be deleted. self.assertGreater(len(queues_to_delete), 0) # Setting up a callback to capture deleted queues deleted_queues = [] def on_delete(queue): deleted_queues.append(queue) self.guardian.on_delete = on_delete # Monitor the queues; this should delete overgrown queues self.guardian.monitor_queues(pulse_management.queues(), pulse_management.bindings()) # Test that the queues that had to be deleted were deleted... self.assertTrue(not any(q in queues_to_delete for q in pulse_management.queues())) # And that they were deleted by guardian... self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues)) # And that no queue has overgrown. queues_to_delete = [q_data['name'] for q_data in pulse_management.queues() if q_data['messages_ready'] > self.guardian.del_queue_size] self.assertEqual(len(queues_to_delete), 0)
def test_warning(self): self._setup_queue() # Queue should still exist. self._wait_for_queue() # Get the queue's object. db_session.refresh(self.rabbitmq_account) # Queue multiple messages while no consumer exists. for i in range(self.guardian.warn_queue_size + 1): msg = self._build_message(i) self.publisher.publish(msg) # Wait for messages to be taken into account and get the warned # messages, if any. for i in range(100): time.sleep(0.3) queues_to_warn = set( q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) if queues_to_warn: break # Test that no queue has been warned at the beginning of the process. self.assertTrue(not any(q.warned for q in self.rabbitmq_account.queues)) # ... but some queues should be now. self.assertGreater(len(queues_to_warn), 0) # Monitor the queues; this should detect queues that should be warned. self.guardian.monitor_queues(pulse_management.queues(vhost='/'), pulse_management.bindings(vhost='/')) # Refresh the user's queues state. db_session.refresh(self.rabbitmq_account) # Test that the queues that had to be "warned" were. self.assertTrue( all(q.warned for q in self.rabbitmq_account.queues if q in queues_to_warn)) # The queues that needed to be warned haven't been deleted. queues_to_warn_bis = set( q_data['name'] for q_data in pulse_management.queues() if self.guardian.warn_queue_size < q_data['messages_ready'] <= self.guardian.del_queue_size) self.assertEqual(queues_to_warn_bis, queues_to_warn)
def test_binding(self): """Test that you can get the bindings for a queue""" self._setup_queue() # Get the queue's object db_session.refresh(self.pulse_user) self.assertEqual(len(self.pulse_user.queues), 1) # check queue bindings in the DB queues = Queue.query.all() self.assertEqual(len(queues), 1) queue = queues[0] bindings = queue.bindings self.assertEqual(len(bindings), 1) self.assertEqual(bindings[0].routing_key, "#") self.assertEqual(bindings[0].exchange, "exchange/pulse/test")
def test_binding(self): """Test that you can get the bindings for a queue""" self._setup_queue() # Get the queue's object db_session.refresh(self.rabbitmq_account) self.assertEqual(len(self.rabbitmq_account.queues), 1) # check queue bindings in the DB queues = Queue.query.all() self.assertEqual(len(queues), 1) queue = queues[0] bindings = queue.bindings self.assertEqual(len(bindings), 1) self.assertEqual(bindings[0].routing_key, "#") self.assertEqual(bindings[0].exchange, "exchange/pulse/test")