예제 #1
0
def test_service_pooled_events_multiple_handlers(
        rabbit_manager, rabbit_config, start_containers):

    vhost = rabbit_config['vhost']
    (container,) = start_containers(DoubleServicePoolHandler, ("double",))

    # we should have two queues with a consumer each
    foo_queue_1 = rabbit_manager.get_queue(
        vhost, "evt-srcservice-eventtype--double.handle_1")
    assert len(foo_queue_1['consumer_details']) == 1

    foo_queue_2 = rabbit_manager.get_queue(
        vhost, "evt-srcservice-eventtype--double.handle_2")
    assert len(foo_queue_2['consumer_details']) == 1

    exchange_name = "srcservice.events"

    with entrypoint_waiter(container, 'handle_1'):
        with entrypoint_waiter(container, 'handle_2'):
            rabbit_manager.publish(
                vhost, exchange_name, 'eventtype', '"msg"',
                properties=dict(content_type='application/json')
            )

    # each handler (3 of them) of the two services should have received the evt
    assert len(events) == 2

    # two worker instances would have been created to deal with the handling
    assert len(services['double']) == 2
    assert services['double'][0].events == ["msg"]
    assert services['double'][1].events == ["msg"]
예제 #2
0
    def test_message_ack_regression(
        self, container, publish, toxiproxy, lock, tracker
    ):
        """ Regression for https://github.com/nameko/nameko/issues/511
        """
        # prevent workers from completing
        lock.acquire()

        # fire entrypoint and block the worker;
        # break connection while the worker is active, then release worker
        with entrypoint_waiter(container, 'echo') as result:
            publish('msg1')
            while not lock._waiters:
                eventlet.sleep()  # pragma: no cover
            toxiproxy.disable()
            # allow connection to close before releasing worker
            eventlet.sleep(.1)
            lock.release()

        # entrypoint will return and attempt to ack initiating message
        assert result.get() == "msg1"

        # enabling connection will re-deliver the initiating message
        # and it will be processed again
        with entrypoint_waiter(container, 'echo') as result:
            toxiproxy.enable()
        assert result.get() == "msg1"

        # connection re-established, container should work again
        with entrypoint_waiter(container, 'echo', timeout=1) as result:
            publish('msg2')
        assert result.get() == 'msg2'
예제 #3
0
파일: test_example.py 프로젝트: ahmb/nameko
def test_example_service(container_factory, rabbit_config):

    db_uri = 'sqlite:///{}'.format(tempfile.NamedTemporaryFile().name)
    engine = create_engine(db_uri)
    FooModel.metadata.create_all(engine)

    config = {
        ORM_DB_URIS_KEY: {
            'foo-service:foo_base': db_uri
        }
    }
    config.update(rabbit_config)

    container = container_factory(FooService, config)

    spam_waiter = entrypoint_waiter(container, 'handle_spam')
    foo_waiter = entrypoint_waiter(container, 'foo')
    with spam_waiter, foo_waiter:
        container.start()

    handle_spam_called.assert_called_with('ham & eggs')
    handle_foo_called.assert_called_with('message')

    entries = list(engine.execute('SELECT data FROM spam LIMIT 1'))
    assert entries == [('ham',)]

    container.stop()
예제 #4
0
    def test_normal(self, publisher_container, consumer_container, tracker):

        # call 1 succeeds
        payload1 = "payload1"
        with entrypoint_waiter(consumer_container, 'recv'):
            with entrypoint_hook(publisher_container, 'send') as send:
                send(payload1)

        assert tracker.call_args_list == [
            call("send", payload1),
            call("recv", payload1),
        ]

        # call 2 succeeds
        payload2 = "payload2"
        with entrypoint_waiter(consumer_container, 'recv'):
            with entrypoint_hook(publisher_container, 'send') as send:
                send(payload2)

        assert tracker.call_args_list == [
            call("send", payload1),
            call("recv", payload1),
            call("send", payload2),
            call("recv", payload2),
        ]
예제 #5
0
def test_entrypoint_waiter_duplicates(container_factory, rabbit_config):
    container = container_factory(Service, rabbit_config)

    with pytest.raises(RuntimeError) as exc:
        with entrypoint_waiter(container, "working"):
            with entrypoint_waiter(container, "working"):
                pass
    assert "already registered" in str(exc)
예제 #6
0
def test_reliable_delivery(
    rabbit_manager, rabbit_config, start_containers, container_factory
):
    """ Events sent to queues declared by ``reliable_delivery`` handlers
    should be received even if no service was listening when they were
    dispatched.
    """
    vhost = rabbit_config['vhost']

    (container,) = start_containers(ServicePoolHandler, ('service-pool',))

    # test queue created, with one consumer
    queue_name = "evt-srcservice-eventtype--service-pool.handle"
    queue = rabbit_manager.get_queue(vhost, queue_name)
    assert len(queue['consumer_details']) == 1

    # publish an event
    exchange_name = "srcservice.events"
    with entrypoint_waiter(container, 'handle'):
        rabbit_manager.publish(
            vhost, exchange_name, 'eventtype', '"msg_1"',
            properties=dict(content_type='application/json')
        )

    # wait for the event to be received
    assert events == ["msg_1"]

    # stop container, check queue still exists, without consumers
    container.stop()
    queues = rabbit_manager.get_queues(vhost)
    assert queue_name in [q['name'] for q in queues]
    queue = rabbit_manager.get_queue(vhost, queue_name)
    assert len(queue['consumer_details']) == 0

    # publish another event while nobody is listening
    rabbit_manager.publish(vhost, exchange_name, 'eventtype', '"msg_2"',
                           properties=dict(content_type='application/json'))

    # verify the message gets queued
    messages = rabbit_manager.get_messages(vhost, queue_name, requeue=True)
    assert ['"msg_2"'] == [msg['payload'] for msg in messages]

    # start another container
    class ServicePool(ServicePoolHandler):
        name = "service-pool"

    container = container_factory(ServicePool, rabbit_config)
    with entrypoint_waiter(container, 'handle'):
        container.start()

    # check the new service to collects the pending event
    assert len(events) == 2
    assert events == ["msg_1", "msg_2"]
예제 #7
0
    def test_reuse_when_recovered(
        self, publisher_container, consumer_container, tracker, toxiproxy
    ):
        """ Verify we detect and recover from stale connections.

        Publish confirms are required for this functionality. Without confirms
        the later messages are silently lost and the test hangs waiting for a
        response.
        """
        # call 1 succeeds
        payload1 = "payload1"
        with entrypoint_waiter(consumer_container, 'recv'):
            with entrypoint_hook(publisher_container, 'send') as send:
                send(payload1)

        assert tracker.call_args_list == [
            call("send", payload1),
            call("recv", payload1),
        ]

        with toxiproxy.disabled():

            # call 2 fails
            payload2 = "payload2"
            with pytest.raises(IOError) as exc_info:
                with entrypoint_hook(publisher_container, 'send') as send:
                    send(payload2)
            assert (
                # expect the write to raise a BrokenPipe or, if it succeeds,
                # the socket to be closed on the subsequent confirmation read
                "Broken pipe" in str(exc_info.value) or
                "Socket closed" in str(exc_info.value)
            )

            assert tracker.call_args_list == [
                call("send", payload1),
                call("recv", payload1),
                call("send", payload2),
            ]

        # call 3 succeeds
        payload3 = "payload3"
        with entrypoint_waiter(consumer_container, 'recv'):
            with entrypoint_hook(publisher_container, 'send') as send:
                send(payload3)

        assert tracker.call_args_list == [
            call("send", payload1),
            call("recv", payload1),
            call("send", payload2),
            call("send", payload3),
            call("recv", payload3),
        ]
예제 #8
0
def test_call_id_over_events(rabbit_config, predictable_call_ids,
                             runner_factory):
    one_called = Mock()
    two_called = Mock()

    stack_request = Mock()
    LoggingWorkerContext = get_logging_worker_context(stack_request)

    class EventListeningServiceOne(object):
        name = "listener_one"

        @event_handler('event_raiser', 'hello')
        def hello(self, name):
            one_called()

    class EventListeningServiceTwo(object):
        name = "listener_two"

        @event_handler('event_raiser', 'hello')
        def hello(self, name):
            two_called()

    class EventRaisingService(object):
        name = "event_raiser"
        dispatch = EventDispatcher()

        @rpc
        def say_hello(self):
            self.dispatch('hello', self.name)

    runner = runner_factory(rabbit_config)
    runner.add_service(EventListeningServiceOne, LoggingWorkerContext)
    runner.add_service(EventListeningServiceTwo, LoggingWorkerContext)
    runner.add_service(EventRaisingService, LoggingWorkerContext)
    runner.start()

    container = get_container(runner, EventRaisingService)
    listener1 = get_container(runner, EventListeningServiceOne)
    listener2 = get_container(runner, EventListeningServiceTwo)
    with entrypoint_hook(container, "say_hello") as say_hello:
        waiter1 = entrypoint_waiter(listener1, 'hello')
        waiter2 = entrypoint_waiter(listener2, 'hello')
        with waiter1, waiter2:
            say_hello()

    assert predictable_call_ids.call_count == 3
    stack_request.assert_has_calls([
        call(None),
        call(['event_raiser.say_hello.0']),
        call(['event_raiser.say_hello.0']),
    ])
    def test_end_to_end(
        self, container_factory, service_cls, config, sentry_stub, tracker
    ):

        container = container_factory(service_cls, config)
        container.start()

        with entrypoint_waiter(sentry_stub, 'report'):
            with entrypoint_hook(container, 'broken') as broken:
                with entrypoint_waiter(container, 'broken'):
                    with pytest.raises(CustomException):
                        broken()

        assert tracker.called
예제 #10
0
    def test_message_requeue_regression(
        self, container, publish, toxiproxy, lock, tracker
    ):
        """ Regression for https://github.com/nameko/nameko/issues/511
        """
        # turn on requeue_on_error
        consumer = get_extension(container, Consumer)
        consumer.requeue_on_error = True

        # make entrypoint raise the first time it's called so that
        # we attempt to requeue it
        class Boom(Exception):
            pass

        def error_once():
            yield Boom("error")
            while True:
                yield
        tracker.side_effect = error_once()

        # prevent workers from completing
        lock.acquire()

        # fire entrypoint and block the worker;
        # break connection while the worker is active, then release worker
        with entrypoint_waiter(container, 'echo') as result:
            publish('msg1')
            while not lock._waiters:
                eventlet.sleep()  # pragma: no cover
            toxiproxy.disable()
            # allow connection to close before releasing worker
            eventlet.sleep(.1)
            lock.release()

        # entrypoint will return and attempt to requeue initiating message
        with pytest.raises(Boom):
            result.get()

        # enabling connection will re-deliver the initiating message
        # and it will be processed again
        with entrypoint_waiter(container, 'echo', timeout=1) as result:
            toxiproxy.enable()
        assert result.get() == 'msg1'

        # connection re-established, container should work again
        with entrypoint_waiter(container, 'echo', timeout=1) as result:
            publish('msg2')
        assert result.get() == 'msg2'
예제 #11
0
def test_requeue_on_error(rabbit_manager, rabbit_config, start_containers):
    vhost = rabbit_config['vhost']
    (container,) = start_containers(RequeueingHandler, ('requeue',))

    # the queue should been created and have one consumer
    queue = rabbit_manager.get_queue(
        vhost, "evt-srcservice-eventtype--requeue.handle")
    assert len(queue['consumer_details']) == 1

    counter = itertools.count()

    def entrypoint_fired_twice(worker_ctx, res, exc_info):
        return next(counter) > 1

    with entrypoint_waiter(
        container, 'handle', callback=entrypoint_fired_twice
    ):
        rabbit_manager.publish(
            vhost, "srcservice.events", 'eventtype', '"msg"',
            properties=dict(content_type='application/json')
        )

    # the event will be received multiple times as it gets requeued and then
    # consumed again
    assert len(events) > 1

    # multiple instances of the service should have been instantiated
    assert len(services['requeue']) > 1

    # each instance should have received one event
    for service in services['requeue']:
        assert service.events == ["msg"]
예제 #12
0
def test_runner_with_duplicate_services(
    runner_factory, rabbit_config, service_cls, tracker
):

    # host Service multiple times
    runner = runner_factory(rabbit_config)
    runner.add_service(service_cls)
    runner.add_service(service_cls)  # no-op
    runner.start()

    # it should only be hosted once
    assert len(runner.containers) == 1
    container = list(runner.containers)[0]

    # test events (only one service is hosted)
    event_data = "event"
    dispatch = event_dispatcher(rabbit_config)

    with entrypoint_waiter(container, "handle"):
        dispatch('srcservice', "testevent", event_data)
    assert tracker.call_args_list == [call(event_data)]

    # test rpc
    arg = "arg"
    with ServiceRpcProxy("service", rabbit_config) as proxy:
        proxy.handle(arg)

    assert tracker.call_args_list == [call(event_data), call(arg)]
예제 #13
0
    def test_event_dispatcher_over_ssl(
        self, container_factory, rabbit_ssl_config, rabbit_config
    ):
        class Dispatcher(object):
            name = "dispatch"

            dispatch = EventDispatcher()

            @dummy
            def method(self, payload):
                return self.dispatch("event-type", payload)

        class Handler(object):
            name = "handler"

            @event_handler("dispatch", "event-type")
            def echo(self, payload):
                return payload

        dispatcher = container_factory(Dispatcher, rabbit_ssl_config)
        dispatcher.start()

        handler = container_factory(Handler, rabbit_config)
        handler.start()

        with entrypoint_waiter(handler, 'echo') as result:
            with entrypoint_hook(dispatcher, 'method') as dispatch:
                dispatch("payload")
        assert result.get() == "payload"
예제 #14
0
def test_entrypoint_waiter_wait_until_called_with_argument(
    container_factory, rabbit_config, spawn_thread
):

    class Service(object):
        name = "service"

        @event_handler('srcservice', 'eventtype')
        def handle_event(self, msg):
            return msg

    container = container_factory(Service, rabbit_config)
    container.start()

    target = 5

    def cb(worker_ctx, res, exc_info):
        return worker_ctx.args == (target,)

    def increment_forever():
        dispatch = event_dispatcher(rabbit_config)
        for count in itertools.count():
            dispatch('srcservice', 'eventtype', count)
            time.sleep()  # force yield

    with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
        spawn_thread(increment_forever)

    assert result.get() == target
예제 #15
0
    def test_reuse_when_down(
        self, publisher_container, consumer_container, tracker, toxiproxy,
    ):
        """ Verify we detect stale connections.

        Publish confirms are required for this functionality. Without confirms
        the later messages are silently lost and the test hangs waiting for a
        response.
        """
        # call 1 succeeds
        payload1 = "payload1"
        with entrypoint_waiter(consumer_container, 'recv'):
            with entrypoint_hook(publisher_container, 'send') as send:
                send(payload1)

        assert tracker.call_args_list == [
            call("send", payload1),
            call("recv", payload1),
        ]

        with toxiproxy.disabled():

            # call 2 fails
            payload2 = "payload2"
            with pytest.raises(IOError):
                with entrypoint_hook(publisher_container, 'send') as send:
                    send(payload2)

            assert tracker.call_args_list == [
                call("send", payload1),
                call("recv", payload1),
                call("send", payload2),
            ]
예제 #16
0
    def test_deadlock_due_to_slow_workers(
        self, service_cls, container_factory, config
    ):
        """ Deadlock will occur if the unack'd messages grows beyond the
        size of the worker pool at any point. The QueueConsumer will block
        waiting for a worker and pending RPC replies will not be ack'd.
        Any running workers therefore never complete, and the worker pool
        remains exhausted.
        """
        container = container_factory(service_cls, config)
        container.start()

        count = 2

        dispatch = event_dispatcher(config)
        for _ in range(count):
            dispatch("service", "event1", 1)
            dispatch("service", "event2", 1)

        counter = itertools.count(start=1)

        def cb(worker_ctx, res, exc_info):
            if next(counter) == count:
                return True

        with entrypoint_waiter(
            container, 'handle_event1', timeout=5, callback=cb
        ):
            pass
예제 #17
0
    def test_publisher_over_ssl(
        self, container_factory, rabbit_ssl_config, rabbit_config, queue
    ):
        class PublisherService(object):
            name = "publisher"

            publish = Publisher()

            @dummy
            def method(self, payload):
                return self.publish(payload, routing_key=queue.name)

        class ConsumerService(object):
            name = "consumer"

            @consume(queue)
            def echo(self, payload):
                return payload

        publisher = container_factory(PublisherService, rabbit_ssl_config)
        publisher.start()

        consumer = container_factory(ConsumerService, rabbit_config)
        consumer.start()

        with entrypoint_waiter(consumer, 'echo') as result:
            with entrypoint_hook(publisher, 'method') as publish:
                publish("payload")
        assert result.get() == "payload"
예제 #18
0
    def test_downstream_timeout(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets timing out.

        This failure mode means that the socket between the rabbit broker and
        the consumer times for out `timeout` milliseconds and then closes.

        Attempting to read from the socket after it's closed raises a
        socket.error and the connection will be re-established. If `timeout`
        is longer than twice the heartbeat interval, the behaviour is the same
        as in `test_downstream_blackhole` below, except that the consumer
        cancel will eventually (`timeout` milliseconds) raise a socket.error,
        which is ignored, allowing the teardown to continue.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=100)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
예제 #19
0
def test_entrypoint_waiter_bad_entrypoint(container_factory, rabbit_config):
    container = container_factory(Service, rabbit_config)

    with pytest.raises(RuntimeError) as exc:
        with entrypoint_waiter(container, "unknown"):
            pass
    assert "has no entrypoint" in str(exc)
예제 #20
0
def test_entrypoint_waiter(container_factory, rabbit_config):
    container = container_factory(Service, rabbit_config)
    container.start()

    dispatch = event_dispatcher(rabbit_config)
    with entrypoint_waiter(container, "handle"):
        dispatch("srcservice", "eventtype", "")
예제 #21
0
    def test_downstream_blackhole(
        self, container, publish, toxiproxy
    ):  # pragma: no cover
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the rabbit broker to
        the consumer is lost, but the socket remains open.

        Heartbeat acknowledgements from the broker are not received by the
        consumer. After two beats are missed the consumer raises a "too many
        heartbeats missed" error.

        Cancelling the consumer requests an acknowledgement from the broker,
        which is swallowed by the socket. There is no timeout when reading
        the acknowledgement so this hangs forever.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        pytest.skip("skip until kombu supports recovery in this scenario")

        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
예제 #22
0
def test_entrypoint_waiter_wait_until_stops_raising(
    container_factory, rabbit_config, spawn_thread
):
    threshold = 5

    class NotEnough(Exception):
        pass

    class Service(object):
        name = "service"

        @event_handler('srcservice', 'eventtype')
        def handle_event(self, msg):
            if msg < threshold:
                raise NotEnough(msg)
            return msg

    container = container_factory(Service, rabbit_config)
    container.start()

    def cb(worker_ctx, res, exc_info):
        return exc_info is None

    def increment_forever():
        dispatch = event_dispatcher(rabbit_config)
        for count in itertools.count():
            dispatch('srcservice', 'eventtype', count)
            time.sleep()  # force yield

    with entrypoint_waiter(container, 'handle_event', callback=cb) as result:
        spawn_thread(increment_forever)

    assert result.get() == threshold
예제 #23
0
    def test_upstream_blackhole(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the consumer to the
        rabbit broker is lost, but the socket remains open.

        Heartbeats sent from the consumer are not received by the broker. After
        two beats are missed the broker closes the connection, and subsequent
        reads from the socket raise a socket.error, so the connection is
        re-established.
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
예제 #24
0
def test_multiple_runners_coexist(
    runner_factory, rabbit_config, rabbit_manager, service_cls, tracker
):

    runner1 = runner_factory(rabbit_config, service_cls)
    runner1.start()

    runner2 = runner_factory(rabbit_config, service_cls)
    runner2.start()

    vhost = rabbit_config['vhost']

    # verify there are two event queues with a single consumer each
    def check_consumers():
        evt_queues = [queue for queue in rabbit_manager.get_queues(vhost)
                      if queue['name'].startswith('evt-srcservice-testevent')]
        assert len(evt_queues) == 2
        for queue in evt_queues:
            assert queue['consumers'] == 1

    # rabbit's management API seems to lag
    assert_stops_raising(check_consumers)

    # test events (both services will receive if in "broadcast" mode)
    event_data = "event"
    dispatch = event_dispatcher(rabbit_config)

    container1 = list(runner1.containers)[0]
    container2 = list(runner2.containers)[0]

    with entrypoint_waiter(container1, "handle"):
        with entrypoint_waiter(container2, "handle"):
            dispatch('srcservice', "testevent", event_data)
    assert tracker.call_args_list == [call(event_data), call(event_data)]

    # verify there are two consumers on the rpc queue
    rpc_queue = rabbit_manager.get_queue(vhost, 'rpc-service')
    assert rpc_queue['consumers'] == 2

    # test rpc (only one service will respond)
    arg = "arg"
    with ServiceRpcProxy('service', rabbit_config) as proxy:
        proxy.handle(arg)

    assert tracker.call_args_list == [
        call(event_data), call(event_data), call(arg)
    ]
예제 #25
0
def test_entrypoint_waiter_timeout(container_factory, rabbit_config):
    container = container_factory(Service, rabbit_config)
    container.start()

    with pytest.raises(EntrypointWaiter.Timeout) as exc_info:
        with entrypoint_waiter(container, "handle", timeout=0.01):
            pass
    assert str(exc_info.value) == ("Entrypoint service.handle failed to complete within 0.01 seconds")
예제 #26
0
    def test_timer(self, container_factory, rabbit_config):

        from examples.timer import Service

        container = container_factory(Service, rabbit_config)

        with entrypoint_waiter(container, 'ping'):
            container.start()
예제 #27
0
def test_broadcast_events(rabbit_manager, rabbit_config, start_containers):
    vhost = rabbit_config['vhost']
    (c1, c2, c3) = start_containers(BroadcastHandler, ("foo", "foo", "bar"))

    # each broadcast queue should have one consumer
    queues = rabbit_manager.get_queues(vhost)
    queue_names = [queue['name'] for queue in queues
                   if queue['name'].startswith("evt-srcservice-eventtype-")]

    assert len(queue_names) == 3
    for name in queue_names:
        queue = rabbit_manager.get_queue(vhost, name)
        assert len(queue['consumer_details']) == 1

    exchange_name = "srcservice.events"

    with entrypoint_waiter(c1, 'handle'):
        with entrypoint_waiter(c2, 'handle'):
            with entrypoint_waiter(c3, 'handle'):
                rabbit_manager.publish(
                    vhost, exchange_name, 'eventtype', '"msg"',
                    properties=dict(content_type='application/json')
                )

    # a total of three events should be received
    assert len(events) == 3

    # all three handlers should receive the event, but they're only of two
    # different types
    assert len(services) == 2

    # two of them were "foo" handlers
    assert len(services['foo']) == 2
    assert isinstance(services['foo'][0], BroadcastHandler)
    assert isinstance(services['foo'][1], BroadcastHandler)

    # and they both should have received the event
    assert services['foo'][0].events == ["msg"]
    assert services['foo'][1].events == ["msg"]

    # the other was a "bar" handler
    assert len(services['bar']) == 1
    assert isinstance(services['bar'][0], BroadcastHandler)

    # and it too should have received the event
    assert services['bar'][0].events == ["msg"]
예제 #28
0
def test_prefetch_count(rabbit_manager, rabbit_config, container_factory):

    class NonShared(QueueConsumer):
        @property
        def sharing_key(self):
            return uuid.uuid4()

    messages = []

    class SelfishConsumer1(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            consumer_continue.wait()
            super(SelfishConsumer1, self).handle_message(body, message)

    class SelfishConsumer2(Consumer):
        queue_consumer = NonShared()

        def handle_message(self, body, message):
            messages.append(body)
            super(SelfishConsumer2, self).handle_message(body, message)

    class Service(object):
        name = "service"

        @SelfishConsumer1.decorator(queue=ham_queue)
        @SelfishConsumer2.decorator(queue=ham_queue)
        def handle(self, payload):
            pass

    rabbit_config['max_workers'] = 1
    container = container_factory(Service, rabbit_config)
    container.start()

    consumer_continue = Event()

    # the two handlers would ordinarily take alternating messages, but are
    # limited to holding one un-ACKed message. Since Handler1 never ACKs, it
    # only ever gets one message, and Handler2 gets the others.

    def wait_for_expected(worker_ctx, res, exc_info):
        return {'m3', 'm4', 'm5'}.issubset(set(messages))

    with entrypoint_waiter(container, 'handle', callback=wait_for_expected):
        vhost = rabbit_config['vhost']
        properties = {'content_type': 'application/data'}
        for message in ('m1', 'm2', 'm3', 'm4', 'm5'):
            rabbit_manager.publish(
                vhost, 'spam', '', message, properties=properties
            )

    # we don't know which handler picked up the first message,
    # but all the others should've been handled by Handler2
    assert messages[-3:] == ['m3', 'm4', 'm5']

    # release the waiting consumer
    consumer_continue.send(None)
예제 #29
0
def test_entrypoint_waiter_timeout(container_factory, rabbit_config):
    container = container_factory(Service, rabbit_config)
    container.start()

    with pytest.raises(entrypoint_waiter.Timeout) as exc_info:
        with entrypoint_waiter(container, 'handle', timeout=0.01):
            pass
    assert str(exc_info.value) == (
        "Timeout on service.handle after 0.01 seconds")
예제 #30
0
def test_entrypoint_waiter_duplicate(container_factory, rabbit_config):

    class Service(object):
        name = "service"

        @event_handler('srcservice', 'eventtype')
        def handle_event(self, msg):
            handle_event(msg)

    container = container_factory(Service, rabbit_config)
    container.start()

    dispatch = event_dispatcher(rabbit_config)
    with entrypoint_waiter(container, 'handle_event'):
        with entrypoint_waiter(container, 'handle_event'):
            dispatch('srcservice', 'eventtype', "msg")

    assert handle_event.call_args_list == [call("msg")]
예제 #31
0
def test_call_id_over_events(rabbit_config, predictable_call_ids,
                             runner_factory, stack_logger, tracker):
    StackLogger = stack_logger

    one_called = Mock()
    two_called = Mock()

    class EventListeningServiceOne(object):
        name = "listener_one"

        stack_logger = StackLogger()

        @event_handler('event_raiser', 'hello')
        def hello(self, name):
            one_called()

    class EventListeningServiceTwo(object):
        name = "listener_two"

        stack_logger = StackLogger()

        @event_handler('event_raiser', 'hello')
        def hello(self, name):
            two_called()

    class EventRaisingService(object):
        name = "event_raiser"
        dispatch = EventDispatcher()

        stack_logger = StackLogger()

        @rpc
        def say_hello(self):
            self.dispatch('hello', self.name)

    runner = runner_factory(rabbit_config)
    runner.add_service(EventListeningServiceOne)
    runner.add_service(EventListeningServiceTwo)
    runner.add_service(EventRaisingService)
    runner.start()

    container = get_container(runner, EventRaisingService)
    listener1 = get_container(runner, EventListeningServiceOne)
    listener2 = get_container(runner, EventListeningServiceTwo)
    with entrypoint_hook(container, "say_hello") as say_hello:
        waiter1 = entrypoint_waiter(listener1, 'hello')
        waiter2 = entrypoint_waiter(listener2, 'hello')
        with waiter1, waiter2:
            say_hello()

    assert predictable_call_ids.call_count == 3

    # order of event handlers and dependencies is non-deterministic,
    # so there are four permutations of valid call stacks
    possible_call_lists = ([
        call(['event_raiser.say_hello.0']),
        call(['event_raiser.say_hello.0', 'listener_one.hello.1']),
        call(['event_raiser.say_hello.0', 'listener_two.hello.2']),
    ], [
        call(['event_raiser.say_hello.0']),
        call(['event_raiser.say_hello.0', 'listener_one.hello.2']),
        call(['event_raiser.say_hello.0', 'listener_two.hello.1']),
    ], [
        call(['event_raiser.say_hello.0']),
        call(['event_raiser.say_hello.0', 'listener_two.hello.1']),
        call(['event_raiser.say_hello.0', 'listener_one.hello.2']),
    ], [
        call(['event_raiser.say_hello.0']),
        call(['event_raiser.say_hello.0', 'listener_two.hello.2']),
        call(['event_raiser.say_hello.0', 'listener_one.hello.1']),
    ])
    assert tracker.call_args_list in possible_call_lists
예제 #32
0
def test_unreliable_delivery(rabbit_manager, rabbit_config, start_containers):
    """ Events sent to queues declared by non- ``reliable_delivery`` handlers
    should be lost if no service was listening when they were dispatched.
    """
    vhost = rabbit_config['vhost']

    (c1,) = start_containers(UnreliableHandler, ('unreliable',))

    # start a normal handler service so the auto-delete events exchange
    # for srcservice is not removed when we stop the UnreliableHandler
    (c2,) = start_containers(ServicePoolHandler, ('keep-exchange-alive',))

    # test queue created, with one consumer
    queue_name = "evt-srcservice-eventtype--unreliable.handle"
    queue = rabbit_manager.get_queue(vhost, queue_name)
    assert len(queue['consumer_details']) == 1

    # publish an event
    exchange_name = "srcservice.events"
    with entrypoint_waiter(c1, 'handle'):
        with entrypoint_waiter(c2, 'handle'):
            rabbit_manager.publish(
                vhost, exchange_name, 'eventtype', '"msg_1"',
                properties=dict(content_type='application/json')
            )

    # verify it arrived in both services
    assert events == ["msg_1", "msg_1"]

    # test that the unreliable service received it
    assert len(services['unreliable']) == 1
    assert services['unreliable'][0].events == ["msg_1"]

    # stop container, test queue deleted
    c1.stop()
    queues = rabbit_manager.get_queues(vhost)
    assert queue_name not in [q['name'] for q in queues]

    # publish a second event while nobody is listening
    rabbit_manager.publish(vhost, exchange_name, 'eventtype', '"msg_2"',
                           properties=dict(content_type='application/json'))

    # start another container
    (c3,) = start_containers(UnreliableHandler, ('unreliable',))

    # verify the queue is recreated, with one consumer
    queue = rabbit_manager.get_queue(vhost, queue_name)
    assert len(queue['consumer_details']) == 1

    # publish a third event
    with entrypoint_waiter(c3, 'handle'):
        rabbit_manager.publish(
            vhost, exchange_name, 'eventtype', '"msg_3"',
            properties=dict(content_type='application/json')
        )

    # verify that the "unreliable" handler didn't receive the message sent
    # when there wasn't an instance running
    assert len(services['unreliable']) == 2
    assert services['unreliable'][0].events == ["msg_1"]
    assert services['unreliable'][1].events == ["msg_3"]
예제 #33
0
 def test_normal(self, container, publish):
     msg = "foo"
     with entrypoint_waiter(container, 'echo') as result:
         publish(msg)
     assert result.get() == msg