コード例 #1
0
    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.guardian = PulseGuardian(warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)
        # As a default owner for rabbitmq_account in some tests where an owner
        # is not provided.
        self.admin = User.new_user(email=ADMIN_EMAIL, admin=True)

        db_session.add(self.user)
        db_session.commit()

        self.rabbitmq_account = RabbitMQAccount.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owners=self.user)

        db_session.add(self.rabbitmq_account)
        db_session.commit()
コード例 #2
0
    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.guardian = PulseGuardian(warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)

        db_session.add(self.user)
        db_session.commit()

        self.pulse_user = PulseUser.new_user(username=CONSUMER_USER,
                                             password=CONSUMER_PASSWORD,
                                             owner=self.user)

        db_session.add(self.pulse_user)
        db_session.commit()
コード例 #3
0
ファイル: runtests.py プロジェクト: camd/pulseguardian
    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.guardian = PulseGuardian(warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)
        # As a default owner for pulse_user in some tests where an owner is not
        # provided.
        self.admin = User.new_user(email=ADMIN_EMAIL, admin=True)

        db_session.add(self.user)
        db_session.commit()

        self.pulse_user = PulseUser.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owners=self.user)

        db_session.add(self.pulse_user)
        db_session.commit()
コード例 #4
0
ファイル: runtests.py プロジェクト: yaoweizhen/pulseguardian
    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.management_api = PulseManagementAPI(
            management_url='http://{}:{}/api'.format(
                pulse_cfg['host'], pulse_cfg['management_port']),
            user=pulse_cfg['user'],
            password=pulse_cfg['password']
        )
        self.guardian = PulseGuardian(self.management_api,
                                      warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        # Hack in a test config.
        dbinit.pulse_management = self.management_api
        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)

        db_session.add(self.user)
        db_session.commit()

        self.pulse_user = PulseUser.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owner=self.user,
            management_api=self.management_api)

        db_session.add(self.pulse_user)
        db_session.commit()
コード例 #5
0
ファイル: runtests.py プロジェクト: camd/pulseguardian
class GuardianTest(unittest.TestCase):

    """Launches a consumer process that creates a queue then disconnects,
    and then floods the exchange with messages and checks that PulseGuardian
    warns the queue's owner and deletes the queue if it gets over the maximum
    size.
    """

    # Defaults; can be overridden for particular tests.
    consumer_class = consumers.PulseTestConsumer
    publisher_class = publishers.PulseTestPublisher

    proc = None
    QUEUE_CHECK_PERIOD = 0.05
    QUEUE_CHECK_ATTEMPTS = 4000
    QUEUE_RECORD_CHECK_PERIOD = 0.1
    QUEUE_RECORD_CHECK_ATTEMPTS = 50
    PUBLISHER_CONNECT_ATTEMPTS = 50

    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.guardian = PulseGuardian(warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)
        # As a default owner for pulse_user in some tests where an owner is not
        # provided.
        self.admin = User.new_user(email=ADMIN_EMAIL, admin=True)

        db_session.add(self.user)
        db_session.commit()

        self.pulse_user = PulseUser.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owners=self.user)

        db_session.add(self.pulse_user)
        db_session.commit()

    def tearDown(self):
        self._terminate_consumer_proc()  # Just in case.
        for queue in Queue.query.all():
            pulse_management.delete_queue(vhost=DEFAULT_RABBIT_VHOST,
                                          queue=queue.name)

    def _setup_queue(self):
        """Setup a publisher, consumer and Queue, then terminate consumer"""
        self._create_publisher()
        self._create_consumer_proc(durable=True)
        self._wait_for_queue()
        self._wait_for_queue_record()
        self._terminate_consumer_proc()

    def _build_message(self, msg_id):
        msg = TestMessage()
        msg.set_data('id', msg_id)
        return msg

    def _create_publisher(self, create_exchange=True):
        self.publisher = self.publisher_class(**pulse_cfg)

        if create_exchange:
            attempts = 0
            exc = None

            while attempts < self.PUBLISHER_CONNECT_ATTEMPTS:
                attempts += 1
                if attempts > 1:
                    time.sleep(0.1)

                try:
                    self.publisher.publish(self._build_message(0))
                except socket.error as e:
                    exc = e
                else:
                    exc = None
                    break

            if exc:
                raise exc

    def _create_consumer_proc(self, durable=False):
        self.proc = ConsumerSubprocess(self.consumer_class, self.consumer_cfg,
                                       durable)
        self.proc.start()

    def _terminate_consumer_proc(self):
        if self.proc:
            self.proc.terminate()
            self.proc.join()
            self.proc = None

    def _create_passive_consumer(self):
        cfg = self.consumer_cfg.copy()
        cfg['connect'] = False
        consumer = self.consumer_class(**self.consumer_cfg)
        consumer.configure(topic='#', callback=lambda x, y: None)
        return consumer

    def _wait_for_queue(self, queue_should_exist=True):
        '''Wait until queue has been created by consumer process.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_CHECK_PERIOD)
            if consumer.queue_exists() == queue_should_exist:
                break
        self.assertEqual(consumer.queue_exists(), queue_should_exist)

    def _wait_for_queue_record(self):
        '''Wait until one or more queues have been added to the database.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.monitor_queues(pulse_management.queues(),
                                         pulse_management.bindings())
            if Queue.query.filter(Queue.name == consumer.queue_name).first():
                break

    def _wait_for_binding_record(self, queue_name, exchange_name, routing_key):
        """Wait until a binding has been added to the database"""
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.monitor_queues(pulse_management.queues(),
                                         pulse_management.bindings())
            if Binding.query.filter(
                Binding.queue_name == queue_name,
                Binding.exchange == exchange_name,
                Binding.routing_key == routing_key).first():
                break

    def _wait_for_binding_delete(self, queue_name, exchange_name, routing_key):
        """Wait until a binding has been removed from the database"""
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.clear_deleted_queues(pulse_management.queues(),
                                               pulse_management.bindings())
            if not Binding.query.filter(
                Binding.queue_name == queue_name,
                Binding.exchange == exchange_name,
                Binding.routing_key == routing_key).first():
                break

    def test_abnormal_queue_name(self):
        self.consumer_class = AbnormalQueueConsumer
        # Use account with full permissions.
        self.consumer_cfg['user'] = pulse_cfg['user']
        self.consumer_cfg['password'] = pulse_cfg['password']

        self._create_publisher()
        self._create_consumer_proc()
        self._wait_for_queue()
        self._wait_for_queue_record()

        queue = Queue.query.filter(Queue.name ==
                                   AbnormalQueueConsumer.QUEUE_NAME).first()
        owner = queue.owner

        # Queue is not durable and will be cleaned up when consumer process
        # exits; delete it from the queue to avoid assertion failure in
        # tearDown().
        self._terminate_consumer_proc()
        self._wait_for_queue(False)
        db_session.delete(queue)
        db_session.commit()

        self.assertEqual(owner, None)

    def test_warning(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object.
        db_session.refresh(self.pulse_user)

        # Queue multiple messages while no consumer exists.
        for i in xrange(self.guardian.warn_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait for messages to be taken into account and get the warned
        # messages, if any.
        for i in xrange(10):
            time.sleep(0.3)
            queues_to_warn = set(q_data['name'] for q_data
                                 in pulse_management.queues()
                                 if self.guardian.warn_queue_size
                                 < q_data['messages_ready']
                                 <= self.guardian.del_queue_size)
            if queues_to_warn:
                break

        # Test that no queue has been warned at the beginning of the process.
        self.assertTrue(not any(q.warned for q in self.pulse_user.queues))
        # ... but some queues should be now.
        self.assertGreater(len(queues_to_warn), 0)

        # Monitor the queues; this should detect queues that should be warned.
        self.guardian.monitor_queues(pulse_management.queues(),
                                     pulse_management.bindings())

        # Refresh the user's queues state.
        db_session.refresh(self.pulse_user)

        # Test that the queues that had to be "warned" were.
        self.assertTrue(all(q.warned for q in self.pulse_user.queues
                            if q in queues_to_warn))

        # The queues that needed to be warned haven't been deleted.
        queues_to_warn_bis = set(q_data['name'] for q_data
                                 in pulse_management.queues()
                                 if self.guardian.warn_queue_size
                                    < q_data['messages_ready']
                                    <= self.guardian.del_queue_size)
        self.assertEqual(queues_to_warn_bis, queues_to_warn)

    def test_delete(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object
        db_session.refresh(self.pulse_user)

        self.assertGreater(len(self.pulse_user.queues), 0)

        # Queue multiple messages while no consumer exists.
        for i in xrange(self.guardian.del_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait some time for published messages to be taken into account.
        for i in xrange(10):
            time.sleep(0.3)
            queues_to_delete = [q_data['name'] for q_data
                                in pulse_management.queues()
                                if q_data['messages_ready']
                                   > self.guardian.del_queue_size]
            if queues_to_delete:
                break

        # Test that there are some queues that should be deleted.
        self.assertGreater(len(queues_to_delete), 0)

        # Setting up a callback to capture deleted queues
        deleted_queues = []
        def on_delete(queue):
            deleted_queues.append(queue)
        self.guardian.on_delete = on_delete

        # Monitor the queues; this should delete overgrown queues
        self.guardian.monitor_queues(pulse_management.queues(),
                                     pulse_management.bindings())

        # Test that the queues that had to be deleted were deleted...
        self.assertTrue(not any(q in queues_to_delete for q
                                in pulse_management.queues()))
        # And that they were deleted by guardian...
        self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues))
        # And that no queue has overgrown.
        queues_to_delete = [q_data['name'] for q_data
                            in pulse_management.queues()
                            if q_data['messages_ready'] >
                               self.guardian.del_queue_size]
        self.assertEqual(len(queues_to_delete), 0)

    def test_delete_skip_unbounded(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object
        db_session.refresh(self.pulse_user)

        self.assertGreater(len(self.pulse_user.queues), 0)

        # set queues as unbound so they won't be deleted
        for queue in self.pulse_user.queues:
            queue.unbound = 1

        # Queue multiple messages while no consumer exists.
        for i in xrange(self.guardian.del_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait some time for published messages to be taken into account.
        for i in xrange(10):
            time.sleep(0.3)
            queues_to_delete = [q_data['name'] for q_data
                                in pulse_management.queues()
                                if q_data['messages_ready']
                                   > self.guardian.del_queue_size]
            if queues_to_delete:
                break

        # Test that there are some queues that should be deleted.
        self.assertGreater(len(queues_to_delete), 0)

        # Setting up a callback to capture deleted queues
        deleted_queues = []

        def on_delete(queue):
            deleted_queues.append(queue)
        self.guardian.on_delete = on_delete

        # Run through the code that decides whether to delete a queue
        # that has grown too large.
        # In this case, it should run the check and decide to not delete
        # any queues.
        self.guardian.monitor_queues(pulse_management.queues(),
                                     pulse_management.bindings())

        # Test that none of the queues were deleted...
        self.assertTrue(all(q in queues_to_delete for q
                            in pulse_management.queues()))

        # And that they were not deleted by guardian...
        self.assertGreater(len(queues_to_delete), 0)

    def test_binding(self):
        """Test that you can get the bindings for a queue"""
        self._setup_queue()

        # Get the queue's object
        db_session.refresh(self.pulse_user)

        self.assertEqual(len(self.pulse_user.queues), 1)

        # check queue bindings in the DB
        queues = Queue.query.all()
        self.assertEqual(len(queues), 1)
        queue = queues[0]
        bindings = queue.bindings
        self.assertEqual(len(bindings), 1)
        self.assertEqual(bindings[0].routing_key, "#")
        self.assertEqual(bindings[0].exchange, "exchange/pulse/test")

    def test_create_binding_different_queue_same_exchange_routing_key(self):
        """
        Test create bindings for two queues with same exchange and routing key
        """
        self.consumer_class = consumers.PulseTestConsumer
        self._setup_queue()

        # create a second queue with the same binding
        self.consumer_class = SecondaryQueueConsumer
        self._setup_queue()

        # check queue bindings in the DB
        queues = Queue.query.all()
        self.assertEqual(len(queues), 2)
        for queue in queues:
            bindings = queue.bindings
            self.assertEqual(len(bindings), 1)
            self.assertEqual(bindings[0].routing_key, "#")
            self.assertEqual(bindings[0].exchange, "exchange/pulse/test")

    def test_add_delete_binding(self):
        """Test adding and removing a binding from a queue"""
        self._setup_queue()

        consumer = self._create_passive_consumer()
        exchange = Exchange(consumer.exchange, type='topic')
        queue = consumer._create_queue(exchange)
        bound = queue.bind(consumer.connection.channel())
        routing_key = "foo"
        bound.bind_to(exchange=consumer.exchange, routing_key=routing_key)

        self._wait_for_binding_record(queue.name, exchange.name, routing_key)

        def test_bindings(exp_routing_keys):
            queues = Queue.query.all()
            self.assertEqual(len(queues), 1)
            self.assertEqual(len(queues[0].bindings), len(exp_routing_keys))
            db_queue = queues[0]

            mgmt_bindings = pulse_management.queue_bindings('/', db_queue.name)
            mgmt_routing_keys = {x["routing_key"] for x in mgmt_bindings}
            self.assertEqual(mgmt_routing_keys, exp_routing_keys)

            db_routing_keys = {x.routing_key for x in db_queue.bindings}
            self.assertEqual(db_routing_keys, exp_routing_keys)

        test_bindings({"#", "foo"})

        # test deleting one of the bindings
        bound.unbind_from(exchange=exchange, routing_key=routing_key)
        self._wait_for_binding_delete(queue.name, exchange.name, routing_key)

        test_bindings({"#"})
コード例 #6
0
class GuardianTest(unittest.TestCase):
    """Launches a consumer process that creates a queue then disconnects,
    and then floods the exchange with messages and checks that PulseGuardian
    warns the queue's owner and deletes the queue if it gets over the maximum
    size.
    """

    # Defaults; can be overridden for particular tests.
    consumer_class = consumers.PulseTestConsumer
    publisher_class = publishers.PulseTestPublisher

    proc = None
    QUEUE_CHECK_PERIOD = 0.05
    QUEUE_CHECK_ATTEMPTS = 4000
    QUEUE_RECORD_CHECK_PERIOD = 0.1
    QUEUE_RECORD_CHECK_ATTEMPTS = 50
    PUBLISHER_CONNECT_ATTEMPTS = 50

    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.guardian = PulseGuardian(warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)
        # As a default owner for rabbitmq_account in some tests where an owner
        # is not provided.
        self.admin = User.new_user(email=ADMIN_EMAIL, admin=True)

        db_session.add(self.user)
        db_session.commit()

        self.rabbitmq_account = RabbitMQAccount.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owners=self.user)

        db_session.add(self.rabbitmq_account)
        db_session.commit()

    def tearDown(self):
        self._terminate_consumer_proc()  # Just in case.
        for queue in Queue.query.all():
            pulse_management.delete_queue(vhost=DEFAULT_RABBIT_VHOST,
                                          queue=queue.name)

    def _setup_queue(self):
        """Setup a publisher, consumer and Queue, then terminate consumer"""
        self._create_publisher()
        self._create_consumer_proc(durable=True)
        self._wait_for_queue()
        self._wait_for_queue_record()
        self._terminate_consumer_proc()

    def _build_message(self, msg_id):
        msg = TestMessage()
        msg.set_data('id', msg_id)
        return msg

    def _create_publisher(self, create_exchange=True):
        self.publisher = self.publisher_class(**pulse_cfg)

        if create_exchange:
            attempts = 0
            exc = None

            while attempts < self.PUBLISHER_CONNECT_ATTEMPTS:
                attempts += 1
                if attempts > 1:
                    time.sleep(0.1)

                try:
                    self.publisher.publish(self._build_message(0))
                except socket.error as e:
                    exc = e
                else:
                    exc = None
                    break

            if exc:
                raise exc

    def _create_consumer_proc(self, durable=False):
        self.proc = ConsumerSubprocess(self.consumer_class, self.consumer_cfg,
                                       durable)
        self.proc.start()

    def _terminate_consumer_proc(self):
        if self.proc:
            self.proc.terminate()
            self.proc.join()
            self.proc = None

    def _create_passive_consumer(self):
        cfg = self.consumer_cfg.copy()
        cfg['connect'] = False
        consumer = self.consumer_class(**self.consumer_cfg)
        consumer.configure(topic='#', callback=lambda x, y: None)
        return consumer

    def _wait_for_queue(self, queue_should_exist=True):
        '''Wait until queue has been created by consumer process.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_CHECK_PERIOD)
            if consumer.queue_exists() == queue_should_exist:
                break
        self.assertEqual(consumer.queue_exists(), queue_should_exist)

    def _wait_for_queue_record(self):
        '''Wait until one or more queues have been added to the database.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.monitor_queues(pulse_management.queues(vhost='/'),
                                         pulse_management.bindings(vhost='/'))
            if Queue.query.filter(Queue.name == consumer.queue_name).first():
                break

    def _wait_for_binding_record(self, queue_name, exchange_name, routing_key):
        """Wait until a binding has been added to the database"""
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.monitor_queues(pulse_management.queues(vhost='/'),
                                         pulse_management.bindings(vhost='/'))
            if Binding.query.filter(
                    Binding.queue_name == queue_name,
                    Binding.exchange == exchange_name,
                    Binding.routing_key == routing_key).first():
                break

    def _wait_for_binding_delete(self, queue_name, exchange_name, routing_key):
        """Wait until a binding has been removed from the database"""
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.clear_deleted_queues(
                pulse_management.queues(vhost='/'),
                pulse_management.bindings(vhost='/'))
            if not Binding.query.filter(Binding.queue_name == queue_name,
                                        Binding.exchange == exchange_name,
                                        Binding.routing_key
                                        == routing_key).first():
                break

    def test_abnormal_queue_name(self):
        self.consumer_class = AbnormalQueueConsumer
        # Use account with full permissions.
        self.consumer_cfg['user'] = pulse_cfg['user']
        self.consumer_cfg['password'] = pulse_cfg['password']

        self._create_publisher()
        self._create_consumer_proc()
        self._wait_for_queue()
        self._wait_for_queue_record()

        queue = Queue.query.filter(
            Queue.name == AbnormalQueueConsumer.QUEUE_NAME).first()
        owner = queue.owner

        # Queue is not durable and will be cleaned up when consumer process
        # exits; delete it from the queue to avoid assertion failure in
        # tearDown().
        self._terminate_consumer_proc()
        self._wait_for_queue(False)
        db_session.delete(queue)
        db_session.commit()

        self.assertEqual(owner, None)

    def test_reserved_queue_name(self):
        self.consumer_class = ReservedQueueConsumer
        # Use account with full permissions.
        self.consumer_cfg['user'] = pulse_cfg['user']
        self.consumer_cfg['password'] = pulse_cfg['password']

        config.reserved_users_regex = 'reserved-.*'
        try:
            self._create_publisher()
            self._create_consumer_proc()
            self._wait_for_queue()
            self._wait_for_queue_record()
        finally:
            config.reserved_users_regex = None

        queue = Queue.query.filter(
            Queue.name == ReservedQueueConsumer.QUEUE_NAME).first()

        # Queue is not durable and will be cleaned up when consumer process
        # exits; delete it from the queue to avoid assertion failure in
        # tearDown().
        self._terminate_consumer_proc()
        self._wait_for_queue(False)

        self.assertEqual(queue, None)

    def test_warning(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object.
        db_session.refresh(self.rabbitmq_account)

        # Queue multiple messages while no consumer exists.
        for i in range(self.guardian.warn_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait for messages to be taken into account and get the warned
        # messages, if any.
        for i in range(100):
            time.sleep(0.3)
            queues_to_warn = set(
                q_data['name'] for q_data in pulse_management.queues()
                if self.guardian.warn_queue_size < q_data['messages_ready'] <=
                self.guardian.del_queue_size)
            if queues_to_warn:
                break

        # Test that no queue has been warned at the beginning of the process.
        self.assertTrue(not any(q.warned
                                for q in self.rabbitmq_account.queues))
        # ... but some queues should be now.
        self.assertGreater(len(queues_to_warn), 0)

        # Monitor the queues; this should detect queues that should be warned.
        self.guardian.monitor_queues(pulse_management.queues(vhost='/'),
                                     pulse_management.bindings(vhost='/'))

        # Refresh the user's queues state.
        db_session.refresh(self.rabbitmq_account)

        # Test that the queues that had to be "warned" were.
        self.assertTrue(
            all(q.warned for q in self.rabbitmq_account.queues
                if q in queues_to_warn))

        # The queues that needed to be warned haven't been deleted.
        queues_to_warn_bis = set(
            q_data['name'] for q_data in pulse_management.queues()
            if self.guardian.warn_queue_size < q_data['messages_ready'] <=
            self.guardian.del_queue_size)
        self.assertEqual(queues_to_warn_bis, queues_to_warn)

    def test_delete(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object
        db_session.refresh(self.rabbitmq_account)

        self.assertGreater(len(self.rabbitmq_account.queues), 0)

        # Queue multiple messages while no consumer exists.
        for i in range(self.guardian.del_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait some time for published messages to be taken into account.
        for i in range(100):
            time.sleep(0.3)
            queues_to_delete = [
                q_data['name'] for q_data in pulse_management.queues()
                if q_data['messages_ready'] > self.guardian.del_queue_size
            ]
            if queues_to_delete:
                break

        # Test that there are some queues that should be deleted.
        self.assertGreater(len(queues_to_delete), 0)

        # Setting up a callback to capture deleted queues
        deleted_queues = []

        def on_delete(queue):
            deleted_queues.append(queue)

        self.guardian.on_delete = on_delete

        # Monitor the queues; this should delete overgrown queues
        self.guardian.monitor_queues(pulse_management.queues(vhost='/'),
                                     pulse_management.bindings(vhost='/'))

        # Test that the queues that had to be deleted were deleted...
        self.assertTrue(not any(q in queues_to_delete
                                for q in pulse_management.queues()))
        # And that they were deleted by guardian...
        self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues))
        # And that no queue has overgrown.
        queues_to_delete = [
            q_data['name'] for q_data in pulse_management.queues()
            if q_data['messages_ready'] > self.guardian.del_queue_size
        ]
        self.assertEqual(len(queues_to_delete), 0)

    def test_delete_skip_unbounded(self):
        self._setup_queue()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object
        db_session.refresh(self.rabbitmq_account)

        self.assertGreater(len(self.rabbitmq_account.queues), 0)

        # set queues as unbound so they won't be deleted
        for queue in self.rabbitmq_account.queues:
            queue.unbound = 1

        # Queue multiple messages while no consumer exists.
        for i in range(self.guardian.del_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait some time for published messages to be taken into account.
        for i in range(100):
            time.sleep(0.3)
            queues_to_delete = [
                q_data['name'] for q_data in pulse_management.queues()
                if q_data['messages_ready'] > self.guardian.del_queue_size
            ]
            if queues_to_delete:
                break

        # Test that there are some queues that should be deleted.
        self.assertGreater(len(queues_to_delete), 0)

        # Setting up a callback to capture deleted queues
        deleted_queues = []

        def on_delete(queue):
            deleted_queues.append(queue)

        self.guardian.on_delete = on_delete

        # Run through the code that decides whether to delete a queue
        # that has grown too large.
        # In this case, it should run the check and decide to not delete
        # any queues.
        self.guardian.monitor_queues(pulse_management.queues(vhost='/'),
                                     pulse_management.bindings(vhost='/'))

        # Test that none of the queues were deleted...
        self.assertTrue(
            all(q in queues_to_delete for q in pulse_management.queues()))

        # And that they were not deleted by guardian...
        self.assertGreater(len(queues_to_delete), 0)

    def test_binding(self):
        """Test that you can get the bindings for a queue"""
        self._setup_queue()

        # Get the queue's object
        db_session.refresh(self.rabbitmq_account)

        self.assertEqual(len(self.rabbitmq_account.queues), 1)

        # check queue bindings in the DB
        queues = Queue.query.all()
        self.assertEqual(len(queues), 1)
        queue = queues[0]
        bindings = queue.bindings
        self.assertEqual(len(bindings), 1)
        self.assertEqual(bindings[0].routing_key, "#")
        self.assertEqual(bindings[0].exchange, "exchange/pulse/test")

    def test_create_binding_different_queue_same_exchange_routing_key(self):
        """
        Test create bindings for two queues with same exchange and routing key
        """
        self.consumer_class = consumers.PulseTestConsumer
        self._setup_queue()

        # create a second queue with the same binding
        self.consumer_class = SecondaryQueueConsumer
        self._setup_queue()

        # check queue bindings in the DB
        queues = Queue.query.all()
        self.assertEqual(len(queues), 2)
        for queue in queues:
            bindings = queue.bindings
            self.assertEqual(len(bindings), 1)
            self.assertEqual(bindings[0].routing_key, "#")
            self.assertEqual(bindings[0].exchange, "exchange/pulse/test")

    def test_add_delete_binding(self):
        """Test adding and removing a binding from a queue"""
        self._setup_queue()

        consumer = self._create_passive_consumer()
        exchange = Exchange(consumer.exchange, type='topic')
        queue = consumer._create_queue(exchange)
        bound = queue.bind(consumer.connection.channel())
        routing_key = "foo"
        bound.bind_to(exchange=consumer.exchange, routing_key=routing_key)

        self._wait_for_binding_record(queue.name, exchange.name, routing_key)

        def test_bindings(exp_routing_keys):
            queues = Queue.query.all()
            self.assertEqual(len(queues), 1)
            self.assertEqual(len(queues[0].bindings), len(exp_routing_keys))
            db_queue = queues[0]

            mgmt_bindings = pulse_management.queue_bindings('/', db_queue.name)
            mgmt_routing_keys = {x["routing_key"] for x in mgmt_bindings}
            self.assertEqual(mgmt_routing_keys, exp_routing_keys)

            db_routing_keys = {x.routing_key for x in db_queue.bindings}
            self.assertEqual(db_routing_keys, exp_routing_keys)

        test_bindings({"#", "foo"})

        # test deleting one of the bindings
        bound.unbind_from(exchange=exchange, routing_key=routing_key)
        self._wait_for_binding_delete(queue.name, exchange.name, routing_key)

        test_bindings({"#"})
コード例 #7
0
ファイル: runtests.py プロジェクト: yaoweizhen/pulseguardian
class GuardianTest(unittest.TestCase):

    """Launches a consumer process that creates a queue then disconnects,
    and then floods the exchange with messages and checks that PulseGuardian
    warns the queue's owner and deletes the queue if it gets over the maximum
    size.
    """

    # Defaults; can be overridden for particular tests.
    consumer_class = consumers.PulseTestConsumer
    publisher_class = publishers.PulseTestPublisher

    proc = None
    QUEUE_CHECK_PERIOD = 0.05
    QUEUE_CHECK_ATTEMPTS = 4000
    QUEUE_RECORD_CHECK_PERIOD = 0.1
    QUEUE_RECORD_CHECK_ATTEMPTS = 50
    PUBLISHER_CONNECT_ATTEMPTS = 50

    def setUp(self):
        global pulse_cfg

        self.proc = None
        self.publisher = None
        self.management_api = PulseManagementAPI(
            management_url='http://{}:{}/api'.format(
                pulse_cfg['host'], pulse_cfg['management_port']),
            user=pulse_cfg['user'],
            password=pulse_cfg['password']
        )
        self.guardian = PulseGuardian(self.management_api,
                                      warn_queue_size=TEST_WARN_SIZE,
                                      del_queue_size=TEST_DELETE_SIZE,
                                      emails=False)

        # Hack in a test config.
        dbinit.pulse_management = self.management_api
        dbinit.init_and_clear_db()

        self.consumer_cfg = pulse_cfg.copy()
        self.consumer_cfg['applabel'] = str(uuid.uuid1())
        # Configure/create the test user to be used for message consumption.
        self.consumer_cfg['user'] = CONSUMER_USER
        self.consumer_cfg['password'] = CONSUMER_PASSWORD

        self.user = User.new_user(email=CONSUMER_EMAIL, admin=False)

        db_session.add(self.user)
        db_session.commit()

        self.pulse_user = PulseUser.new_user(
            username=CONSUMER_USER,
            password=CONSUMER_PASSWORD,
            owner=self.user,
            management_api=self.management_api)

        db_session.add(self.pulse_user)
        db_session.commit()

    def tearDown(self):
        self._terminate_consumer_proc()  # Just in case.
        for queue in Queue.query.all():
            self.management_api.delete_queue(vhost=DEFAULT_RABBIT_VHOST,
                                             queue=queue.name)

    def _build_message(self, msg_id):
        msg = TestMessage()
        msg.set_data('id', msg_id)
        return msg

    def _create_publisher(self, create_exchange=True):
        self.publisher = self.publisher_class(**pulse_cfg)

        if create_exchange:
            attempts = 0
            exc = None

            while attempts < self.PUBLISHER_CONNECT_ATTEMPTS:
                attempts += 1
                if attempts > 1:
                    time.sleep(0.1)

                try:
                    self.publisher.publish(self._build_message(0))
                except socket.error as e:
                    exc = e
                else:
                    exc = None
                    break

            if exc:
                raise exc

    def _create_consumer_proc(self, durable=False):
        self.proc = ConsumerSubprocess(self.consumer_class, self.consumer_cfg,
                                       durable)
        self.proc.start()

    def _terminate_consumer_proc(self):
        if self.proc:
            self.proc.terminate()
            self.proc.join()
            self.proc = None

    def _create_passive_consumer(self):
        cfg = self.consumer_cfg.copy()
        cfg['connect'] = False
        consumer = self.consumer_class(**self.consumer_cfg)
        consumer.configure(topic='#', callback=lambda x, y: None)
        return consumer

    def _wait_for_queue(self, queue_should_exist=True):
        '''Wait until queue has been created by consumer process.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_CHECK_PERIOD)
            if consumer.queue_exists() == queue_should_exist:
                break
        self.assertEqual(consumer.queue_exists(), queue_should_exist)

    def _wait_for_queue_record(self):
        '''Wait until one or more queues have been added to the database.'''
        consumer = self._create_passive_consumer()
        attempts = 0
        while attempts < self.QUEUE_RECORD_CHECK_ATTEMPTS:
            attempts += 1
            if attempts > 1:
                time.sleep(self.QUEUE_RECORD_CHECK_PERIOD)
            self.guardian.monitor_queues(self.management_api.queues())
            if Queue.query.filter(Queue.name == consumer.queue_name).first():
                break

    def test_abnormal_queue_name(self):
        self.consumer_class = AbnormalQueueConsumer
        # Use account with full permissions.
        self.consumer_cfg['user'] = pulse_cfg['user']
        self.consumer_cfg['password'] = pulse_cfg['password']

        self._create_publisher()
        self._create_consumer_proc()
        self._wait_for_queue()
        self._wait_for_queue_record()

        queue = Queue.query.filter(Queue.name ==
                                   AbnormalQueueConsumer.QUEUE_NAME).first()
        owner = queue.owner

        # Queue is not durable and will be cleaned up when consumer process
        # exits; delete it from the queue to avoid assertion failure in
        # tearDown().
        self._terminate_consumer_proc()
        self._wait_for_queue(False)
        db_session.delete(queue)
        db_session.commit()

        self.assertEqual(owner, None)

    def test_warning(self):
        self._create_publisher()
        self._create_consumer_proc(durable=True)
        self._wait_for_queue()
        self._wait_for_queue_record()
        self._terminate_consumer_proc()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object.
        db_session.refresh(self.pulse_user)

        # Queue multiple messages while no consumer exists.
        for i in xrange(self.guardian.warn_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait for messages to be taken into account and get the warned
        # messages, if any.
        for i in xrange(10):
            time.sleep(0.3)
            queues_to_warn = set(q_data['name'] for q_data
                                 in self.management_api.queues()
                                 if self.guardian.warn_queue_size
                                 < q_data['messages_ready']
                                 <= self.guardian.del_queue_size)
            if queues_to_warn:
                break

        # Test that no queue has been warned at the beginning of the process.
        self.assertTrue(not any(q.warned for q in self.pulse_user.queues))
        # ... but some queues should be now.
        self.assertTrue(len(queues_to_warn) > 0)

        # Monitor the queues; this should detect queues that should be warned.
        self.guardian.monitor_queues(self.management_api.queues())

        # Refresh the user's queues state.
        db_session.refresh(self.pulse_user)

        # Test that the queues that had to be "warned" were.
        self.assertTrue(all(q.warned for q in self.pulse_user.queues
                            if q in queues_to_warn))

        # The queues that needed to be warned haven't been deleted.
        queues_to_warn_bis = set(q_data['name'] for q_data
                                 in self.management_api.queues()
                                 if self.guardian.warn_queue_size
                                    < q_data['messages_ready']
                                    <= self.guardian.del_queue_size)
        self.assertEqual(queues_to_warn_bis, queues_to_warn)

    def test_delete(self):
        self._create_publisher()
        self._create_consumer_proc(durable=True)
        self._wait_for_queue()
        self._wait_for_queue_record()
        self._terminate_consumer_proc()

        # Queue should still exist.
        self._wait_for_queue()

        # Get the queue's object
        db_session.refresh(self.pulse_user)

        self.assertTrue(len(self.pulse_user.queues) > 0)

        # Queue multiple messages while no consumer exists.
        for i in xrange(self.guardian.del_queue_size + 1):
            msg = self._build_message(i)
            self.publisher.publish(msg)

        # Wait some time for published messages to be taken into account.
        for i in xrange(10):
            time.sleep(0.3)
            queues_to_delete = [q_data['name'] for q_data
                                in self.management_api.queues()
                                if q_data['messages_ready']
                                   > self.guardian.del_queue_size]
            if queues_to_delete:
                break

        # Test that there are some queues that should be deleted.
        self.assertTrue(len(queues_to_delete) > 0)

        # Setting up a callback to capture deleted queues
        deleted_queues = []
        def on_delete(queue):
            deleted_queues.append(queue)
        self.guardian.on_delete = on_delete

        # Monitor the queues; this should create the queue object and assign
        # it to the user.
        for i in xrange(20):
            self.guardian.monitor_queues(self.management_api.queues())
            time.sleep(0.2)

        # Test that the queues that had to be deleted were deleted...
        self.assertTrue(not any(q in queues_to_delete for q
                                in self.management_api.queues()))
        # And that they were deleted by guardian...
        self.assertEqual(sorted(queues_to_delete), sorted(deleted_queues))
        # And that no queue has overgrown.
        queues_to_delete = [q_data['name'] for q_data
                            in self.management_api.queues()
                            if q_data['messages_ready'] >
                               self.guardian.del_queue_size]
        self.assertTrue(len(queues_to_delete) == 0)