def entrypoint_waiter(container, entrypoint, timeout=30): """Helper to wait for entrypoints to fire (and complete) Usage:: container = ServiceContainer(ExampleService, config) with entrypoint_waiter(container, 'example_handler'): ... # e.g. rpc call that will result in handler being called """ waiter = EntrypointWaiter(entrypoint) if not get_dependency(container, EntrypointProvider, name=entrypoint): raise RuntimeError("{} has no entrypoint `{}`".format( container.service_name, entrypoint)) if get_dependency(container, EntrypointWaiter, entrypoint=entrypoint): raise RuntimeError("Waiter already registered for {}".format( entrypoint)) # can't mess with dependencies while container is running wait_for_worker_idle(container) container.dependencies.add(waiter) try: yield with eventlet.Timeout(timeout): waiter.wait() finally: wait_for_worker_idle(container) container.dependencies.remove(waiter)
def test_greenthread_raise_in_kill(container_factory, rabbit_config, logger): class Service(object): @rpc def echo(self, arg): return arg container = container_factory(Service, rabbit_config) queue_consumer = get_dependency(container, QueueConsumer) rpc_consumer = get_dependency(container, RpcConsumer) # an error in rpc_consumer.handle_message will kill the queue_consumer's # greenthread. when the container suicides and kills the queue_consumer, # it should warn instead of re-raising the original exception exc = Exception('error handling message') with patch.object(rpc_consumer, 'handle_message') as handle_message: handle_message.side_effect = exc container.start() with RpcProxy('service', rabbit_config) as service_rpc: # spawn because `echo` will never respond eventlet.spawn(service_rpc.echo, "foo") # container will have died with the messaging handling error with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == "error handling message" # queueconsumer will have warned about the exc raised by its greenthread assert logger.warn.call_args_list == [ call("QueueConsumer %s raised `%s` during kill", queue_consumer, exc)]
def test_greenthread_raise_in_kill(container_factory, rabbit_config, logger): class Service(object): @rpc def echo(self, arg): return arg container = container_factory(Service, rabbit_config) queue_consumer = get_dependency(container, QueueConsumer) rpc_consumer = get_dependency(container, RpcConsumer) # an error in rpc_consumer.handle_message will kill the queue_consumer's # greenthread. when the container suicides and kills the queue_consumer, # it should warn instead of re-raising the original exception exc = Exception('error handling message') with patch.object(rpc_consumer, 'handle_message') as handle_message: handle_message.side_effect = exc container.start() with RpcProxy('service', rabbit_config) as service_rpc: # spawn because `echo` will never respond eventlet.spawn(service_rpc.echo, "foo") # container will have died with the messaging handling error with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == "error handling message" # queueconsumer will have warned about the exc raised by its greenthread assert logger.warn.call_args_list == [ call("QueueConsumer %s raised `%s` during kill", queue_consumer, exc) ]
def entrypoint_waiter(container, entrypoint, timeout=30): """Helper to wait for entrypoints to fire (and complete) Usage:: container = ServiceContainer(ExampleService, config) with entrypoint_waiter(container, 'example_handler'): ... # e.g. rpc call that will result in handler being called """ waiter = EntrypointWaiter(entrypoint) if not get_dependency(container, EntrypointProvider, name=entrypoint): raise RuntimeError("{} has no entrypoint `{}`".format( container.service_name, entrypoint)) if get_dependency(container, EntrypointWaiter, entrypoint=entrypoint): raise RuntimeError( "Waiter already registered for {}".format(entrypoint)) # can't mess with dependencies while container is running wait_for_worker_idle(container) container.dependencies.add(waiter) try: yield with eventlet.Timeout(timeout): waiter.wait() finally: wait_for_worker_idle(container) container.dependencies.remove(waiter)
def test_expected_exceptions(rabbit_config): container = ServiceContainer(ExampleService, WorkerContext, rabbit_config) broken = get_dependency(container, RpcProvider, name="broken") assert broken.expected_exceptions == ExampleError very_broken = get_dependency(container, RpcProvider, name="very_broken") assert very_broken.expected_exceptions == (KeyError, ValueError)
def test_rpc_container_being_killed_retries(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) container.start() def wait_for_result(): with RpcProxy("exampleservice", rabbit_config) as proxy: return proxy.task_a() container._being_killed = True rpc_provider = get_dependency(container, RpcProvider, name='task_a') with patch.object( rpc_provider, 'rpc_consumer', wraps=rpc_provider.rpc_consumer, ) as wrapped_consumer: waiter = eventlet.spawn(wait_for_result) with wait_for_call(1, wrapped_consumer.requeue_message): pass # wait until at least one message has been requeued assert not waiter.dead container._being_killed = False assert waiter.wait() == 'result_a' # now completed
def test_dependency_call_lifecycle_errors(container_factory, rabbit_config, method_name): container = container_factory(ExampleService, rabbit_config) container.start() dependency = get_dependency(container, EventDispatcher) with patch.object(dependency, method_name, autospec=True) as method: err = "error in {}".format(method_name) method.side_effect = Exception(err) # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to container.wait() with eventlet.Timeout(1): with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == err
def test_runner_catches_container_errors(runner_factory, rabbit_config): runner = runner_factory(rabbit_config, ExampleService) runner.start() container = get_container(runner, ExampleService) rpc_consumer = get_dependency(container, RpcConsumer) with patch.object(rpc_consumer, 'handle_result', autospec=True) as handle_result: exception = Exception("error") handle_result.side_effect = exception # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to runner.wait() with pytest.raises(Exception) as exc_info: runner.wait() assert exc_info.value == exception
def test_dependency_call_lifecycle_errors( container_factory, rabbit_config, method_name): container = container_factory(ExampleService, rabbit_config) container.start() dependency = get_dependency(container, EventDispatcher) with patch.object(dependency, method_name, autospec=True) as method: err = "error in {}".format(method_name) method.side_effect = Exception(err) # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to container.wait() with eventlet.Timeout(1): with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == err
def test_rpc_container_being_killed_retries( container_factory, rabbit_config, rabbit_manager): container = container_factory(ExampleService, rabbit_config) container.start() def wait_for_result(): with RpcProxy("exampleservice", rabbit_config) as proxy: return proxy.task_a() container._being_killed = True rpc_provider = get_dependency(container, RpcProvider, name='task_a') with patch.object( rpc_provider, 'rpc_consumer', wraps=rpc_provider.rpc_consumer, ) as wrapped_consumer: waiter = eventlet.spawn(wait_for_result) with wait_for_call(1, wrapped_consumer.requeue_message): pass # wait until at least one message has been requeued assert not waiter.dead container._being_killed = False assert waiter.wait() == 'result_a' # now completed
def test_runner_catches_container_errors(runner_factory, rabbit_config): runner = runner_factory(rabbit_config, ExampleService) runner.start() container = get_container(runner, ExampleService) rpc_consumer = get_dependency(container, RpcConsumer) with patch.object( rpc_consumer, 'handle_result', autospec=True) as handle_result: exception = Exception("error") handle_result.side_effect = exception # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to runner.wait() with pytest.raises(Exception) as exc_info: runner.wait() assert exc_info.value == exception
def test_kill_container_with_active_workers(container_factory): waiting = Event() wait_forever = Event() class Service(object): name = 'kill-with-active-workers' @foobar def spam(self): waiting.send(None) wait_forever.wait() container = container_factory(Service, {}) dep = get_dependency(container, EntrypointProvider) # start the first worker, which should wait for spam_continue container.spawn_worker(dep, (), {}) waiting.wait() with patch('nameko.containers._log') as logger: container.kill() calls = logger.warning.call_args_list assert call( 'killing active thread for %s', 'kill-with-active-workers.spam' ) in calls
def test_rpc_headers(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) context_data = {'language': 'en', 'bogus_header': '123456789'} headers = {} rpc_consumer = get_dependency(container, RpcConsumer) handle_message = rpc_consumer.handle_message with patch.object(rpc_consumer, 'handle_message', autospec=True) as patched_handler: def side_effect(body, message): headers.update(message.headers) # extract message headers return handle_message(body, message) patched_handler.side_effect = side_effect container.start() # use a standalone rpc proxy to call exampleservice.say_hello() with RpcProxy("exampleservice", rabbit_config, context_data) as proxy: proxy.say_hello() # bogus_header dropped assert headers == { 'nameko.language': 'en', 'nameko.call_id_stack': ['standalone_rpc_proxy.call.0'], }
def test_get_builtin_providers(provider_name, context_key, container): provider = get_dependency( container, ContextDataProvider, name=provider_name) worker_ctx = WorkerContext( container, "service", provider, data={context_key: 'value'}) assert provider.acquire_injection(worker_ctx) == "value"
def test_get_unset_value(container): provider = get_dependency( container, ContextDataProvider, name="custom_value") worker_ctx = WorkerContext( container, "service", provider, data={}) assert provider.acquire_injection(worker_ctx) is None
def test_get_custom_context_value(container): provider = get_dependency( container, ContextDataProvider, name="custom_value") worker_ctx = WorkerContext( container, "service", provider, data={CUSTOM_CONTEXT_KEY: "hello"}) assert provider.acquire_injection(worker_ctx) == "hello"
def test_rpc_custom_headers(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) context_data = { 'language': 'en', 'bogus_header': '123456789', 'custom_header': 'specialvalue', } headers = {} rpc_consumer = get_dependency(container, RpcConsumer) handle_message = rpc_consumer.handle_message with patch.object( rpc_consumer, 'handle_message', autospec=True) as patched_handler: def side_effect(body, message): headers.update(message.headers) # extract message headers return handle_message(body, message) patched_handler.side_effect = side_effect container.start() # use a standalone rpc proxy to call exampleservice.say_hello(), # with a worker context that enables "custom_header" with RpcProxy("exampleservice", rabbit_config, context_data, CustomWorkerContext) as proxy: proxy.say_hello() # bogus_header dropped, custom_header present assert headers == { 'nameko.language': 'en', 'nameko.custom_header': 'specialvalue', 'nameko.call_id_stack': ['standalone_rpc_proxy.call.0'] }
def test_stops_entrypoints_before_injections(container): container.stop() dependencies = container.dependencies spam_dep = get_dependency(container, InjectionProvider) for dec_dep in dependencies.entrypoints: assert dec_dep.call_ids[0] < spam_dep.call_ids[0]
def test_rpc_consumer_sharing(container_factory, rabbit_config, rabbit_manager): """ Verify that the RpcConsumer unregisters from the queueconsumer when the first provider unregisters itself. Otherwise it keeps consuming messages for the unregistered provider, raising MethodNotFound. """ container = container_factory(ExampleService, rabbit_config) container.start() task_a = get_dependency(container, RpcProvider, name="task_a") task_a_stop = task_a.stop task_b = get_dependency(container, RpcProvider, name="task_b") task_b_stop = task_b.stop task_a_stopped = Event() def patched_task_a_stop(): task_a_stop() # stop immediately task_a_stopped.send(True) def patched_task_b_stop(): eventlet.sleep(2) # stop after 2 seconds task_b_stop() with patch.object(task_b, 'stop', patched_task_b_stop), \ patch.object(task_a, 'stop', patched_task_a_stop): # stop the container and wait for task_a to stop # task_b will still be in the process of stopping eventlet.spawn(container.stop) task_a_stopped.wait() # try to call task_a. # should timeout, rather than raising MethodNotFound with RpcProxy("exampleservice", rabbit_config) as proxy: with pytest.raises(eventlet.Timeout): with eventlet.Timeout(1): proxy.task_a() # kill the container so we don't have to wait for task_b to stop container.kill()
def test_handle_killed_worker(container, logger): dep = get_dependency(container, EntrypointProvider) container.spawn_worker(dep, ['sleep'], {}) assert len(container._active_threads) == 1 (worker_gt,) = container._active_threads worker_gt.kill() assert logger.warning.call_args == call("%s thread killed by container", container) assert not container._died.ready() # container continues running
def entrypoint_hook(container, name, context_data=None): """ Yield a function providing an entrypoint into a hosted service. The yielded function may be called as if it were the bare method defined in the service class. Intended to be used as an integration testing utility. **Usage** To verify that ServiceX and ServiceY are compatible, make an integration test that checks their interaction: .. literalinclude:: examples/testing/integration_test.py """ provider = get_dependency(container, EntrypointProvider, name=name) if provider is None: raise DependencyNotFound("No entrypoint called '{}' found " "on container {}.".format(name, container)) def hook(*args, **kwargs): result = event.Event() def handle_result(worker_ctx, res=None, exc_info=None): result.send(res, exc_info) return res, exc_info container.spawn_worker(provider, args, kwargs, context_data=context_data, handle_result=handle_result) # If the container errors (e.g. due to a bad provider), handle_result # is never called and we hang. To mitigate, we spawn a greenlet waiting # for the container, and if that throws we send the exception back # as our result def catch_container_errors(gt): try: gt.wait() except Exception as exc: result.send_exception(exc) gt = eventlet.spawn(container.wait) gt.link(catch_container_errors) return result.wait() yield hook
def test_get_dependency(rabbit_config): from nameko.messaging import QueueConsumer from nameko.rpc import rpc, RpcProvider, RpcConsumer from nameko.containers import ServiceContainer, WorkerContext class Service(object): @rpc def foo(self): pass @rpc def bar(self): pass container = ServiceContainer(Service, WorkerContext, rabbit_config) rpc_consumer = get_dependency(container, RpcConsumer) queue_consumer = get_dependency(container, QueueConsumer) foo_rpc = get_dependency(container, RpcProvider, name="foo") bar_rpc = get_dependency(container, RpcProvider, name="bar") all_deps = container.dependencies assert all_deps == set([rpc_consumer, queue_consumer, foo_rpc, bar_rpc])
def test_get_dependency(rabbit_config): from nameko.messaging import QueueConsumer from nameko.rpc import RpcProvider, RpcConsumer from nameko.containers import ServiceContainer, WorkerContext class Service(object): @rpc def foo(self): pass @rpc def bar(self): pass container = ServiceContainer(Service, WorkerContext, rabbit_config) rpc_consumer = get_dependency(container, RpcConsumer) queue_consumer = get_dependency(container, QueueConsumer) foo_rpc = get_dependency(container, RpcProvider, name="foo") bar_rpc = get_dependency(container, RpcProvider, name="bar") all_deps = container.dependencies assert all_deps == set([rpc_consumer, queue_consumer, foo_rpc, bar_rpc])
def test_expected_exceptions_integration(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) container.start() worker_logger = get_dependency(container, WorkerErrorLogger) with entrypoint_hook(container, 'broken') as broken: with pytest.raises(ExampleError): broken() with entrypoint_hook(container, 'very_broken') as very_broken: with pytest.raises(AttributeError): very_broken() wait_for_worker_idle(container) # wait for worker lifecycle to complete assert worker_logger.expected == {'broken': ExampleError} assert worker_logger.unexpected == {'very_broken': AttributeError}
def test_rpc_consumer_cannot_exit_with_providers(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) container.start() task_a = get_dependency(container, RpcProvider, name="task_a") def never_stops(): while True: eventlet.sleep() with patch.object(task_a, 'stop', never_stops): with pytest.raises(eventlet.Timeout): with eventlet.Timeout(1): container.stop() # kill off task_a's misbehaving rpc provider container.kill()
def test_rpc_consumer_cannot_exit_with_providers( container_factory, rabbit_config, rabbit_manager): container = container_factory(ExampleService, rabbit_config) container.start() task_a = get_dependency(container, RpcProvider, name="task_a") def never_stops(): while True: eventlet.sleep() with patch.object(task_a, 'stop', never_stops): with pytest.raises(eventlet.Timeout): with eventlet.Timeout(1): container.stop() # kill off task_a's misbehaving rpc provider container.kill()
def test_kill_bad_dependency(container): """ Verify that an exception from a badly-behaved dependency.kill() doesn't stop the container's kill process. """ dep = get_dependency(container, InjectionProvider) with patch.object(dep, 'kill') as dep_kill: dep_kill.side_effect = Exception('dependency error') container.start() # manufacture an exc_info to kill with try: raise Exception('container error') except: pass exc_info = sys.exc_info() container.kill(exc_info) with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == "container error"
def test_graceful_stop_on_one_container_error(runner_factory, rabbit_config): runner = runner_factory(rabbit_config, ExampleService, SecondService) runner.start() container = get_container(runner, ExampleService) second_container = get_container(runner, SecondService) original_stop = second_container.stop with patch.object(second_container, 'stop', autospec=True, wraps=original_stop) as stop: rpc_consumer = get_dependency(container, RpcConsumer) with patch.object(rpc_consumer, 'handle_result', autospec=True) as handle_result: exception = Exception("error") handle_result.side_effect = exception # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread # yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to runner.wait() with pytest.raises(Exception) as exc_info: runner.wait() assert exc_info.value == exception # Check that the second service was stopped due to the first # service being killed stop.assert_called_once_with()
def test_wait_for_worker_idle(container_factory, rabbit_config): event = Event() class Service(object): @rpc def wait_for_event(self): event.wait() container = container_factory(Service, rabbit_config) container.start() max_workers = DEFAULT_MAX_WORKERS # verify nothing running assert container._worker_pool.free() == max_workers with eventlet.Timeout(1): wait_for_worker_idle(container) # spawn a worker wait_for_event = get_dependency(container, rpc.provider_cls) container.spawn_worker(wait_for_event, [], {}) # verify that wait_for_worker_idle does not return while worker active assert container._worker_pool.free() == max_workers - 1 gt = eventlet.spawn(wait_for_worker_idle, container) assert not gt.dead # still waiting # verify that wait_for_worker_idle raises when it times out with pytest.raises(eventlet.Timeout): wait_for_worker_idle(container, timeout=0) # complete the worker, verify previous wait_for_worker_idle completes event.send() with eventlet.Timeout(1): gt.wait() assert container._worker_pool.free() == max_workers
def test_container_doesnt_exhaust_max_workers(container): spam_called = Event() spam_continue = Event() class Service(object): name = 'max-workers' @foobar def spam(self, a): spam_called.send(a) spam_continue.wait() container = ServiceContainer(service_cls=Service, worker_ctx_cls=WorkerContext, config={MAX_WORKERS_CONFIG_KEY: 1}) dep = get_dependency(container, EntrypointProvider) # start the first worker, which should wait for spam_continue container.spawn_worker(dep, ['ham'], {}) # start the next worker in a speparate thread, # because it should block until the first one completed gt = spawn(container.spawn_worker, dep, ['eggs'], {}) with Timeout(1): assert spam_called.wait() == 'ham' # if the container had spawned the second worker, we would see # an error indicating that spam_called was fired twice, and the # greenthread would now be dead. assert not gt.dead # reset the calls and allow the waiting worker to complete. spam_called.reset() spam_continue.send(None) # the second worker should now run and complete assert spam_called.wait() == 'eggs' assert gt.dead
def test_handle_result_error(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) container.start() rpc_consumer = get_dependency(container, RpcConsumer) with patch.object(rpc_consumer, 'handle_result', autospec=True) as handle_result: err = "error in handle_result" handle_result.side_effect = Exception(err) # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will never return, so give up almost immediately try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass with eventlet.Timeout(1): with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == err
def test_graceful_stop_on_one_container_error(runner_factory, rabbit_config): runner = runner_factory(rabbit_config, ExampleService, SecondService) runner.start() container = get_container(runner, ExampleService) second_container = get_container(runner, SecondService) original_stop = second_container.stop with patch.object(second_container, 'stop', autospec=True, wraps=original_stop) as stop: rpc_consumer = get_dependency(container, RpcConsumer) with patch.object( rpc_consumer, 'handle_result', autospec=True) as handle_result: exception = Exception("error") handle_result.side_effect = exception # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will hang forever because it generates an error # in the remote container (so never receives a response). # generate and then swallow a timeout as soon as the thread # yields try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass # verify that the error bubbles up to runner.wait() with pytest.raises(Exception) as exc_info: runner.wait() assert exc_info.value == exception # Check that the second service was stopped due to the first # service being killed stop.assert_called_once_with()
def test_handle_result_error(container_factory, rabbit_config): container = container_factory(ExampleService, rabbit_config) container.start() rpc_consumer = get_dependency(container, RpcConsumer) with patch.object( rpc_consumer, 'handle_result', autospec=True) as handle_result: err = "error in handle_result" handle_result.side_effect = Exception(err) # use a standalone rpc proxy to call exampleservice.task() with RpcProxy("exampleservice", rabbit_config) as proxy: # proxy.task() will never return, so give up almost immediately try: with eventlet.Timeout(0): proxy.task() except eventlet.Timeout: pass with eventlet.Timeout(1): with pytest.raises(Exception) as exc_info: container.wait() assert exc_info.value.message == err