Пример #1
0
    def test_multiple_queues_with_same_exchange_and_routing_key(
            self, container_factory, entrypoint_tracker, rabbit_manager,
            exchange, wait_for_result, publish_message, counter, rabbit_config,
            backoff_count, fast_backoff):
        """ Message consumption backoff works when there are muliple queues
        receiving the published message
        """
        queue_one = Queue("one", exchange=exchange, routing_key="message")
        queue_two = Queue("two", exchange=exchange, routing_key="message")

        class ServiceOne(object):
            name = "service_one"

            @consume(queue_one)
            def method(self, payload):
                if counter["one"].increment() <= backoff_count:
                    raise Backoff()
                return "one"

        class ServiceTwo(object):
            name = "service_two"

            @consume(queue_two)
            def method(self, payload):
                counter["two"].increment()
                return "two"

        container_one = container_factory(ServiceOne, rabbit_config)
        container_one.start()
        container_two = container_factory(ServiceTwo, rabbit_config)
        container_two.start()

        with entrypoint_waiter(container_one,
                               'method',
                               callback=wait_for_result) as result_one:

            with entrypoint_waiter(container_two,
                                   'method',
                                   callback=wait_for_result) as result_two:

                publish_message(exchange, "msg", routing_key="message")

        # ensure all messages are processed
        vhost = rabbit_config['vhost']
        for delay in fast_backoff:
            backoff_queue = rabbit_manager.get_queue(
                vhost, get_backoff_queue_name(delay))
            assert backoff_queue['messages'] == 0

        service_queue_one = rabbit_manager.get_queue(vhost, queue_one.name)
        service_queue_two = rabbit_manager.get_queue(vhost, queue_two.name)
        assert service_queue_one['messages'] == 0
        assert service_queue_two['messages'] == 0

        assert result_one.get() == "one"
        assert result_two.get() == "two"

        # backoff from service_one not seen by service_two
        assert counter['one'] == backoff_count + 1
        assert counter['two'] == 1
Пример #2
0
    def test_queues_removed(self, container, publish_message, exchange, queue,
                            counter, rabbit_config, rabbit_manager,
                            fast_expire):
        """ Backoff queues should be removed after their messages are
        redelivered.
        """
        delays = [50, 100, 100, 100, 50]

        def all_expired(worker_ctx, res, exc_info):
            if not issubclass(exc_info[0], Backoff.Expired):
                return
            if counter.increment() == len(delays):
                return True

        # cause multiple unique backoffs to be raised
        with entrypoint_waiter(container, 'backoff', callback=all_expired):
            for delay in delays:
                publish_message(exchange, delay, routing_key=queue.routing_key)

        # wait for long enough for the queues to expire
        time.sleep((max(delays) + fast_expire) / 1000.0)

        # verify the queues have been removed
        vhost = rabbit_config['vhost']
        for delay in set(delays):
            with pytest.raises(HTTPError) as raised:
                rabbit_manager.get_queue(vhost, get_backoff_queue_name(delay))
            assert raised.value.response.status_code == 404
Пример #3
0
 def check_queue(delay):
     backoff_queue = rabbit_manager.get_queue(
         vhost, get_backoff_queue_name(delay))
     assert backoff_queue['messages'] == delays.count(delay)
Пример #4
0
 def check_queue(delay):
     backoff_queue = queue_info(get_backoff_queue_name(delay))
     assert backoff_queue.message_count == delays.count(delay)