def test_kill_closes_connections(rabbit_manager, rabbit_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 1 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() class Handler(object): queue = ham_queue def handle_message(self, body, message): pass queue_consumer.register_provider(Handler()) queue_consumer.start() # kill should close all connections queue_consumer.kill() # no connections should remain for our vhost vhost = rabbit_config['vhost'] connections = get_rabbit_connections(vhost, rabbit_manager) if connections: for connection in connections: assert connection['vhost'] != vhost
def test_dispatch_to_rabbit(rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = rabbit_config service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider()) dispatcher = EventDispatcher().bind(container, 'dispatch') dispatcher.setup() dispatcher.start() # we should have an exchange but no queues exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) assert "srcservice.events" in [exchange['name'] for exchange in exchanges] assert queues == [] # manually add a queue to capture the events rabbit_manager.create_queue(vhost, "event-sink", auto_delete=True) rabbit_manager.create_queue_binding( vhost, "srcservice.events", "event-sink", routing_key="eventtype") service.dispatch = dispatcher.get_dependency(worker_ctx) service.dispatch("eventtype", "msg") # test event receieved on manually added queue messages = rabbit_manager.get_messages(vhost, "event-sink") assert ['msg'] == [msg['payload'] for msg in messages]
def test_reconnect_on_socket_error(rabbit_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 1 container.spawn_managed_thread = spawn_thread connection_revived = Mock() queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() queue_consumer.on_connection_revived = connection_revived handler = MessageHandler() queue_consumer.register_provider(handler) queue_consumer.start() with patch.object( Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = socket.error('test-error') def check_reconnected(): assert connection_revived.call_count > 1 assert_stops_raising(check_reconnected) queue_consumer.unregister_provider(handler) queue_consumer.stop()
def test_nova_consumer_bad_provider(): container = Mock(spec=ServiceContainer) container.shared_extensions = {} consumer = NovaRpcConsumer().bind(container) message = Message( channel=None, delivery_info={'routing_key': 'some route'}, properties={}, ) with patch.multiple( consumer, get_provider_for_method=DEFAULT, handle_result=DEFAULT, ) as mocks: provider = mocks['get_provider_for_method'] handle_result = mocks['handle_result'] exception = LookupError('broken') provider.side_effect = exception consumer.handle_message({'args': ()}, message) assert handle_result.call_count == 1 args, kwargs = handle_result.call_args exc_info = args[-1] exc_type, exc_value, traceback = exc_info assert exc_value is exception
def test_publish_to_queue(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = empty_config ctx_data = {"language": "en"} service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("publish"), data=ctx_data) publisher = Publisher(queue=foobar_queue).bind(container, "publish") producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.setup() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = {"nameko.language": "en", "nameko.call_id_stack": ["srcservice.publish.0"]} service.publish = publisher.get_dependency(worker_ctx) service.publish(msg, publish_kwarg="value") producer.publish.assert_called_once_with( msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value" )
def test_nova_rpc_provider(empty_config): rpc_consumer = Mock() message = Mock(headers={}) message_body = { 'method': 'method', 'args': {"arg": "arg_value"}, 'msg_id': 'msg_id', '_context_user_id': 'user_id' } class Service(object): def method(self, arg): pass container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_cls = Service container.worker_ctx_cls = WorkerContext container.service_name = "service" container.config = empty_config entrypoint = NovaRpc().bind(container, "method") entrypoint.setup() entrypoint.rpc_consumer = rpc_consumer container.spawn_worker.side_effect = ContainerBeingKilled() entrypoint.handle_message(message_body, message) assert rpc_consumer.requeue_message.called
def test_consume_provider(empty_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.worker_ctx_cls = WorkerContext container.service_name = "service" container.config = empty_config worker_ctx = WorkerContext(container, None, DummyProvider()) spawn_worker = container.spawn_worker spawn_worker.return_value = worker_ctx queue_consumer = Mock() consume_provider = Consumer(queue=foobar_queue, requeue_on_error=False).bind(container, "consume") consume_provider.queue_consumer = queue_consumer message = Mock(headers={}) # test lifecycle consume_provider.setup() queue_consumer.register_provider.assert_called_once_with(consume_provider) consume_provider.stop() queue_consumer.unregister_provider.assert_called_once_with(consume_provider) # test handling successful call queue_consumer.reset_mock() consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, "result") queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call without requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = False consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, None, (Exception, Exception("Error"), "tb")) queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call with requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = True consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]["handle_result"] handle_result(worker_ctx, None, (Exception, Exception("Error"), "tb")) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message) # test requeueing on ContainerBeingKilled (even without requeue_on_error) queue_consumer.reset_mock() consume_provider.requeue_on_error = False spawn_worker.side_effect = ContainerBeingKilled() consume_provider.handle_message("body", message) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message)
def test_consume_from_rabbit(container_factory, rabbit_manager, rabbit_config): vhost = rabbit_config["vhost"] container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.worker_ctx_cls = CustomWorkerContext container.service_name = "service" container.config = rabbit_config container.max_workers = 10 def spawn_thread(method, protected): return eventlet.spawn(method) container.spawn_managed_thread = spawn_thread worker_ctx = CustomWorkerContext(container, None, DummyProvider()) consumer = Consumer(queue=foobar_queue, requeue_on_error=False).bind(container, "publish") # prepare and start extensions consumer.setup() consumer.queue_consumer.setup() consumer.start() consumer.queue_consumer.start() # test queue, exchange and binding created in rabbit exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) bindings = rabbit_manager.get_queue_bindings(vhost, foobar_queue.name) assert "foobar_ex" in [exchange["name"] for exchange in exchanges] assert "foobar_queue" in [queue["name"] for queue in queues] assert "foobar_ex" in [binding["source"] for binding in bindings] # test message consumed from queue container.spawn_worker.return_value = worker_ctx headers = {"nameko.language": "en", "nameko.customheader": "customvalue"} rabbit_manager.publish(vhost, foobar_ex.name, "", "msg", properties=dict(headers=headers)) ctx_data = {"language": "en", "customheader": "customvalue"} with wait_for_call(CONSUME_TIMEOUT, container.spawn_worker) as method: method.assert_called_once_with(consumer, ("msg",), {}, context_data=ctx_data, handle_result=ANY_PARTIAL) handle_result = method.call_args[1]["handle_result"] # ack message handle_result(worker_ctx, "result") # stop will hang if the consumer hasn't acked or requeued messages with eventlet.timeout.Timeout(CONSUME_TIMEOUT): consumer.stop() consumer.queue_consumer.kill()
def test_lifecycle(rabbit_manager, rabbit_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread.side_effect = spawn_thread queue_consumer = QueueConsumer().bind(container) handler = MessageHandler() queue_consumer.register_provider(handler) queue_consumer.setup() queue_consumer.start() # making sure the QueueConsumer uses the container to spawn threads container.spawn_managed_thread.assert_called_once_with(ANY, protected=True) vhost = rabbit_config['vhost'] rabbit_manager.publish(vhost, 'spam', '', 'shrub') message = handler.wait() gt = eventlet.spawn(queue_consumer.unregister_provider, handler) # wait for the handler to be removed with eventlet.Timeout(TIMEOUT): while len(queue_consumer._consumers): eventlet.sleep() # remove_consumer has to wait for all messages to be acked assert not gt.dead # the consumer should have stopped and not accept any new messages rabbit_manager.publish(vhost, 'spam', '', 'ni') # this should cause the consumer to finish shutting down queue_consumer.ack_message(message) with eventlet.Timeout(TIMEOUT): gt.wait() # there should be a message left on the queue messages = rabbit_manager.get_messages(vhost, 'ham') assert ['ni'] == [msg['payload'] for msg in messages] queue_consumer.kill()
def test_stop_while_starting(rabbit_config): started = Event() container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 3 container.spawn_managed_thread = spawn_thread class BrokenConnConsumer(QueueConsumer): def consume(self, *args, **kwargs): started.send(None) # kombu will retry again and again on broken connections # so we have to make sure the event is reset to allow consume # to be called again started.reset() return super(BrokenConnConsumer, self).consume(*args, **kwargs) queue_consumer = BrokenConnConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object(Connection, 'connect', autospec=True) as connect: # patch connection to raise an error connect.side_effect = TimeoutError('test') # try to start the queue consumer gt = eventlet.spawn(queue_consumer.start) # wait for the queue consumer to begin starting and # then immediately stop it started.wait() with eventlet.Timeout(TIMEOUT): queue_consumer.unregister_provider(handler) queue_consumer.stop() with eventlet.Timeout(TIMEOUT): # we expect the queue_consumer.start thread to finish # almost immediately adn when it does the queue_consumer thread # should be dead too while not gt.dead: eventlet.sleep() assert queue_consumer._gt.dead
def test_reply_listener(get_rpc_exchange, queue_consumer): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {} container.service_name = "exampleservice" exchange = Mock() get_rpc_exchange.return_value = exchange reply_listener = ReplyListener().bind(container) forced_uuid = uuid.uuid4().hex with patch('nameko.rpc.uuid', autospec=True) as patched_uuid: patched_uuid.uuid4.return_value = forced_uuid reply_listener.setup() queue_consumer.setup() queue = reply_listener.queue assert queue.name == "rpc.reply-exampleservice-{}".format(forced_uuid) assert queue.exchange == exchange assert queue.routing_key == forced_uuid queue_consumer.register_provider.assert_called_once_with(reply_listener) correlation_id = 1 reply_event = reply_listener.get_reply_event(correlation_id) assert reply_listener._reply_events == {1: reply_event} message = Mock() message.properties.get.return_value = correlation_id reply_listener.handle_message("msg", message) queue_consumer.ack_message.assert_called_once_with(message) assert reply_event.ready() assert reply_event.wait() == "msg" assert reply_listener._reply_events == {} with patch('nameko.rpc._log', autospec=True) as log: reply_listener.handle_message("msg", message) assert log.debug.call_args == call( 'Unknown correlation id: %s', correlation_id)
def test_reply_listener(get_rpc_exchange, queue_consumer): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {} container.service_name = "exampleservice" exchange = Mock() get_rpc_exchange.return_value = exchange reply_listener = ReplyListener().bind(container) forced_uuid = uuid.uuid4().hex with patch('nameko.rpc.uuid', autospec=True) as patched_uuid: patched_uuid.uuid4.return_value = forced_uuid reply_listener.setup() queue_consumer.setup() queue = reply_listener.queue assert queue.name == "rpc.reply-exampleservice-{}".format(forced_uuid) assert queue.exchange == exchange assert queue.routing_key == forced_uuid queue_consumer.register_provider.assert_called_once_with(reply_listener) correlation_id = 1 reply_event = reply_listener.get_reply_event(correlation_id) assert reply_listener._reply_events == {1: reply_event} message = Mock() message.properties.get.return_value = correlation_id reply_listener.handle_message("msg", message) queue_consumer.ack_message.assert_called_once_with(message) assert reply_event.ready() assert reply_event.wait() == "msg" assert reply_listener._reply_events == {} with patch('nameko.rpc._log', autospec=True) as log: reply_listener.handle_message("msg", message) assert log.debug.call_args == call('Unknown correlation id: %s', correlation_id)
def test_reentrant_start_stops(): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() queue_consumer.start() gt = queue_consumer._gt # nothing should happen as the consumer has already been started queue_consumer.start() assert gt is queue_consumer._gt queue_consumer.kill()
def test_on_consume_error_kills_consumer(): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 1 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with patch.object(queue_consumer, 'on_consume_ready') as on_consume_ready: on_consume_ready.side_effect = Exception('err') queue_consumer.start() with pytest.raises(Exception): queue_consumer._gt.wait()
def test_publish_to_queue(empty_config, maybe_declare, patch_publisher): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.service_name = "srcservice" container.config = empty_config ctx_data = {'language': 'en'} service = Mock() worker_ctx = WorkerContext(container, service, DummyProvider("publish"), data=ctx_data) publisher = Publisher(queue=foobar_queue).bind(container, "publish") producer = Mock() connection = Mock() get_connection, get_producer = patch_publisher(publisher) get_connection.return_value = as_context_manager(connection) get_producer.return_value = as_context_manager(producer) # test declarations publisher.setup() maybe_declare.assert_called_once_with(foobar_queue, connection) # test publish msg = "msg" headers = { 'nameko.language': 'en', 'nameko.call_id_stack': ['srcservice.publish.0'], } service.publish = publisher.get_dependency(worker_ctx) service.publish(msg, publish_kwarg="value") producer.publish.assert_called_once_with(msg, headers=headers, exchange=foobar_ex, retry=True, retry_policy=DEFAULT_RETRY_POLICY, publish_kwarg="value")
def test_rpc_consumer(get_rpc_exchange, queue_consumer): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {} container.service_name = "exampleservice" container.service_cls = Mock(rpcmethod=lambda: None) exchange = Mock() get_rpc_exchange.return_value = exchange consumer = RpcConsumer().bind(container) entrypoint = Rpc().bind(container, "rpcmethod") entrypoint.rpc_consumer = consumer entrypoint.setup() consumer.setup() queue_consumer.setup() queue = consumer.queue assert queue.name == "rpc-exampleservice" assert queue.routing_key == "exampleservice.*" assert queue.exchange == exchange assert queue.durable queue_consumer.register_provider.assert_called_once_with(consumer) consumer.register_provider(entrypoint) assert consumer._providers == set([entrypoint]) routing_key = "exampleservice.rpcmethod" assert consumer.get_provider_for_method(routing_key) == entrypoint routing_key = "exampleservice.invalidmethod" with pytest.raises(MethodNotFound): consumer.get_provider_for_method(routing_key) consumer.unregister_provider(entrypoint) assert consumer._providers == set()
def test_error_stops_consumer_thread(): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = {AMQP_URI_CONFIG_KEY: 'memory://'} container.max_workers = 3 container.spawn_managed_thread = spawn_thread queue_consumer = QueueConsumer().bind(container) queue_consumer.setup() handler = MessageHandler() queue_consumer.register_provider(handler) with eventlet.Timeout(TIMEOUT): with patch.object( Connection, 'drain_events', autospec=True) as drain_events: drain_events.side_effect = Exception('test') queue_consumer.start() with pytest.raises(Exception) as exc_info: queue_consumer._gt.wait() assert exc_info.value.args == ('test',)
def test_prefetch_count(rabbit_manager, rabbit_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.config = rabbit_config container.max_workers = 1 container.spawn_managed_thread = spawn_thread class NonShared(QueueConsumer): @property def sharing_key(self): return uuid.uuid4() queue_consumer1 = NonShared().bind(container) queue_consumer1.setup() queue_consumer2 = NonShared().bind(container) queue_consumer2.setup() consumer_continue = Event() class Handler1(object): queue = ham_queue def handle_message(self, body, message): consumer_continue.wait() queue_consumer1.ack_message(message) messages = [] class Handler2(object): queue = ham_queue def handle_message(self, body, message): messages.append(body) queue_consumer2.ack_message(message) handler1 = Handler1() handler2 = Handler2() queue_consumer1.register_provider(handler1) queue_consumer2.register_provider(handler2) queue_consumer1.start() queue_consumer2.start() vhost = rabbit_config['vhost'] # the first consumer only has a prefetch_count of 1 and will only # consume 1 message and wait in handler1() rabbit_manager.publish(vhost, 'spam', '', 'ham') # the next message will go to handler2() no matter of any prefetch_count rabbit_manager.publish(vhost, 'spam', '', 'eggs') # the third message is only going to handler2 because the first consumer # has a prefetch_count of 1 and thus is unable to deal with another message # until having ACKed the first one rabbit_manager.publish(vhost, 'spam', '', 'bacon') with eventlet.Timeout(TIMEOUT): while len(messages) < 2: eventlet.sleep() # allow the waiting consumer to ack its message consumer_continue.send(None) assert messages == ['eggs', 'bacon'] queue_consumer1.unregister_provider(handler1) queue_consumer2.unregister_provider(handler2) queue_consumer1.kill() queue_consumer2.kill()
def test_consume_provider(empty_config): container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.worker_ctx_cls = WorkerContext container.service_name = "service" container.config = empty_config worker_ctx = WorkerContext(container, None, DummyProvider()) spawn_worker = container.spawn_worker spawn_worker.return_value = worker_ctx queue_consumer = Mock() consume_provider = Consumer(queue=foobar_queue, requeue_on_error=False).bind( container, "consume") consume_provider.queue_consumer = queue_consumer message = Mock(headers={}) # test lifecycle consume_provider.setup() queue_consumer.register_provider.assert_called_once_with(consume_provider) consume_provider.stop() queue_consumer.unregister_provider.assert_called_once_with( consume_provider) # test handling successful call queue_consumer.reset_mock() consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]['handle_result'] handle_result(worker_ctx, 'result') queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call without requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = False consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]['handle_result'] handle_result(worker_ctx, None, (Exception, Exception('Error'), "tb")) queue_consumer.ack_message.assert_called_once_with(message) # test handling failed call with requeue queue_consumer.reset_mock() consume_provider.requeue_on_error = True consume_provider.handle_message("body", message) handle_result = spawn_worker.call_args[1]['handle_result'] handle_result(worker_ctx, None, (Exception, Exception('Error'), "tb")) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message) # test requeueing on ContainerBeingKilled (even without requeue_on_error) queue_consumer.reset_mock() consume_provider.requeue_on_error = False spawn_worker.side_effect = ContainerBeingKilled() consume_provider.handle_message("body", message) assert not queue_consumer.ack_message.called queue_consumer.requeue_message.assert_called_once_with(message)
def test_consume_from_rabbit(container_factory, rabbit_manager, rabbit_config): vhost = rabbit_config['vhost'] container = Mock(spec=ServiceContainer) container.shared_extensions = {} container.worker_ctx_cls = CustomWorkerContext container.service_name = "service" container.config = rabbit_config container.max_workers = 10 def spawn_thread(method, protected): return eventlet.spawn(method) container.spawn_managed_thread = spawn_thread worker_ctx = CustomWorkerContext(container, None, DummyProvider()) consumer = Consumer(queue=foobar_queue, requeue_on_error=False).bind(container, "publish") # prepare and start extensions consumer.setup() consumer.queue_consumer.setup() consumer.start() consumer.queue_consumer.start() # test queue, exchange and binding created in rabbit exchanges = rabbit_manager.get_exchanges(vhost) queues = rabbit_manager.get_queues(vhost) bindings = rabbit_manager.get_queue_bindings(vhost, foobar_queue.name) assert "foobar_ex" in [exchange['name'] for exchange in exchanges] assert "foobar_queue" in [queue['name'] for queue in queues] assert "foobar_ex" in [binding['source'] for binding in bindings] # test message consumed from queue container.spawn_worker.return_value = worker_ctx headers = {'nameko.language': 'en', 'nameko.customheader': 'customvalue'} rabbit_manager.publish(vhost, foobar_ex.name, '', 'msg', properties=dict(headers=headers)) ctx_data = { 'language': 'en', 'customheader': 'customvalue', } with wait_for_call(CONSUME_TIMEOUT, container.spawn_worker) as method: method.assert_called_once_with(consumer, ('msg', ), {}, context_data=ctx_data, handle_result=ANY_PARTIAL) handle_result = method.call_args[1]['handle_result'] # ack message handle_result(worker_ctx, 'result') # stop will hang if the consumer hasn't acked or requeued messages with eventlet.timeout.Timeout(CONSUME_TIMEOUT): consumer.stop() consumer.queue_consumer.kill()