Exemple #1
0
def test_entrypoints_lifecycle(clients, container_factory, config):
    class Service:

        name = 'sample'

        @rtm.handle_event
        def handle_event(self, event):
            pass

        @rtm.handle_message
        def handle_message(self, event, message):
            pass

    container = container_factory(Service, config)

    event_handler = get_extension(container, rtm.RTMEventHandlerEntrypoint)
    message_handler = get_extension(container, rtm.RTMMessageHandlerEntrypoint)

    container.start()
    assert call(event_handler) in clients.register_provider.mock_calls
    assert call(message_handler) in clients.register_provider.mock_calls

    container.stop()
    assert call(event_handler) in clients.unregister_provider.mock_calls
    assert call(message_handler) in clients.unregister_provider.mock_calls
Exemple #2
0
def entrypoint_waiter(container, entrypoint, timeout=30):
    """Helper to wait for entrypoints to fire (and complete)

    Usage::

        container = ServiceContainer(ExampleService, config)
        with entrypoint_waiter(container, 'example_handler'):
            ...  # e.g. rpc call that will result in handler being called
    """

    waiter = EntrypointWaiter(entrypoint)
    if not get_extension(container, Entrypoint, method_name=entrypoint):
        raise RuntimeError("{} has no entrypoint `{}`".format(
            container.service_name, entrypoint))
    if get_extension(container, EntrypointWaiter, entrypoint=entrypoint):
        raise RuntimeError(
            "Waiter already registered for {}".format(entrypoint))

    # can't mess with dependencies while container is running
    wait_for_worker_idle(container)
    container.dependencies.add(waiter)

    try:
        yield
        with eventlet.Timeout(timeout):
            waiter.wait()
    finally:
        wait_for_worker_idle(container)
        container.dependencies.remove(waiter)
def test_custom_sharing_key(container_factory):

    class CustomSharedExtension(CallCollectorMixin, SharedExtension):
        def __init__(self, arg):
            self.arg = arg

        @property
        def sharing_key(self):
            return (type(self), self.arg)

    class SimpleDependencyProvider(CallCollectorMixin, DependencyProvider):
        ext_a = CustomSharedExtension("a")
        ext_b = CustomSharedExtension("b")

    class Service(object):
        name = "service"
        dep_1 = SimpleDependencyProvider()
        dep_2 = SimpleDependencyProvider()

    container = container_factory(Service, {})
    container.start()

    assert len(container.extensions) == 4
    calls = CallCollectorMixin.calls
    assert len(calls[SimpleDependencyProvider]['start']) == 2
    assert len(calls[CustomSharedExtension]['start']) == 2

    dep_1 = get_extension(
        container, SimpleDependencyProvider, attr_name="dep_1")
    dep_2 = get_extension(
        container, SimpleDependencyProvider, attr_name="dep_2")

    assert dep_1.ext_a is not dep_2.ext_b
    assert dep_1.ext_a is dep_2.ext_a
    assert dep_1.ext_b is dep_2.ext_b
Exemple #4
0
def test_get_extension(rabbit_config):

    from nameko.messaging import QueueConsumer
    from nameko.rpc import Rpc, RpcConsumer
    from nameko.containers import ServiceContainer

    class Service(object):
        name = "service"

        @rpc
        def foo(self):
            pass

        @rpc
        def bar(self):
            pass

    container = ServiceContainer(Service, rabbit_config)

    rpc_consumer = get_extension(container, RpcConsumer)
    queue_consumer = get_extension(container, QueueConsumer)
    foo_rpc = get_extension(container, Rpc, method_name="foo")
    bar_rpc = get_extension(container, Rpc, method_name="bar")

    extensions = container.extensions
    assert extensions == set([rpc_consumer, queue_consumer, foo_rpc, bar_rpc])
Exemple #5
0
def entrypoint_waiter(container, entrypoint, timeout=30):
    """Helper to wait for entrypoints to fire (and complete)

    Usage::

        container = ServiceContainer(ExampleService, config)
        with entrypoint_waiter(container, 'example_handler'):
            ...  # e.g. rpc call that will result in handler being called
    """

    waiter = EntrypointWaiter(entrypoint)
    if not get_extension(container, Entrypoint, method_name=entrypoint):
        raise RuntimeError("{} has no entrypoint `{}`".format(
            container.service_name, entrypoint))
    if get_extension(container, EntrypointWaiter, entrypoint=entrypoint):
        raise RuntimeError("Waiter already registered for {}".format(
            entrypoint))

    # can't mess with dependencies while container is running
    wait_for_worker_idle(container)
    container.dependencies.add(waiter)

    try:
        yield
        exc = waiter.Timeout(
            "Entrypoint {}.{} failed to complete within {} seconds".format(
                container.service_name, entrypoint, timeout)
        )
        with eventlet.Timeout(timeout, exception=exc):
            waiter.wait()
    finally:
        wait_for_worker_idle(container)
        container.dependencies.remove(waiter)
def test_worker_life_cycle(container):

    spam_dep = get_extension(container, DependencyProvider)
    ham_dep = get_extension(container, Entrypoint, method_name="ham")
    egg_dep = get_extension(container, Entrypoint, method_name="egg")

    handle_result = Mock()
    handle_result.side_effect = (
        lambda worker_ctx, res, exc_info: (res, exc_info))

    ham_worker_ctx = container.spawn_worker(
        ham_dep, [], {}, handle_result=handle_result)
    container._worker_pool.waitall()

    egg_worker_ctx = container.spawn_worker(
        egg_dep, [], {}, handle_result=handle_result)
    container._worker_pool.waitall()

    assert spam_dep.calls == [
        ('get_dependency', ham_worker_ctx),
        ('worker_setup', ham_worker_ctx),
        ('worker_result', ham_worker_ctx, ('ham', None)),
        ('worker_teardown', ham_worker_ctx),
        ('get_dependency', egg_worker_ctx),
        ('worker_setup', egg_worker_ctx),
        ('worker_result', egg_worker_ctx, (None, (Exception, egg_error, ANY))),
        ('worker_teardown', egg_worker_ctx),
    ]

    assert handle_result.call_args_list == [
        call(ham_worker_ctx, "ham", None),
        call(egg_worker_ctx, None, (Exception, egg_error, ANY))
    ]
Exemple #7
0
def test_greenthread_raise_in_kill(container_factory, rabbit_config, logger):
    class Service(object):
        name = "service"

        @rpc
        def echo(self, arg):
            return arg  # pragma: no cover

    container = container_factory(Service, rabbit_config)
    queue_consumer = get_extension(container, QueueConsumer)
    rpc_consumer = get_extension(container, RpcConsumer)

    # an error in rpc_consumer.handle_message will kill the queue_consumer's
    # greenthread. when the container suicides and kills the queue_consumer,
    # it should warn instead of re-raising the original exception
    exc = Exception('error handling message')
    with patch.object(rpc_consumer, 'handle_message') as handle_message:
        handle_message.side_effect = exc

        container.start()

        with ServiceRpcProxy('service', rabbit_config) as service_rpc:
            # spawn because `echo` will never respond
            eventlet.spawn(service_rpc.echo, "foo")

    # container will have died with the messaging handling error
    with pytest.raises(Exception) as exc_info:
        container.wait()
    assert str(exc_info.value) == "error handling message"

    # queueconsumer will have warned about the exc raised by its greenthread
    assert logger.warn.call_args_list == [
        call("QueueConsumer %s raised `%s` during kill", queue_consumer, exc)
    ]
def test_greenthread_raise_in_kill(container_factory, rabbit_config, logger):
    class Service(object):
        name = "service"

        @rpc
        def echo(self, arg):
            return arg  # pragma: no cover

    container = container_factory(Service, rabbit_config)
    queue_consumer = get_extension(container, QueueConsumer)
    rpc_consumer = get_extension(container, RpcConsumer)

    # an error in rpc_consumer.handle_message will kill the queue_consumer's
    # greenthread. when the container suicides and kills the queue_consumer,
    # it should warn instead of re-raising the original exception
    exc = Exception("error handling message")
    with patch.object(rpc_consumer, "handle_message") as handle_message:
        handle_message.side_effect = exc

        container.start()

        with ServiceRpcProxy("service", rabbit_config) as service_rpc:
            # spawn because `echo` will never respond
            eventlet.spawn(service_rpc.echo, "foo")

    # container will have died with the messaging handling error
    with pytest.raises(Exception) as exc_info:
        container.wait()
    assert str(exc_info.value) == "error handling message"

    # queueconsumer will have warned about the exc raised by its greenthread
    assert logger.warn.call_args_list == [call("QueueConsumer %s raised `%s` during kill", queue_consumer, exc)]
Exemple #9
0
def test_expected_exceptions(rabbit_config):
    container = ServiceContainer(ExampleService, rabbit_config)

    broken = get_extension(container, Rpc, method_name="broken")
    assert broken.expected_exceptions == ExampleError

    very_broken = get_extension(container, Rpc, method_name="very_broken")
    assert very_broken.expected_exceptions == (KeyError, ValueError)
Exemple #10
0
def test_expected_exceptions(rabbit_config):
    container = ServiceContainer(ExampleService, rabbit_config)

    broken = get_extension(container, Rpc, method_name="broken")
    assert broken.expected_exceptions == ExampleError

    very_broken = get_extension(container, Rpc, method_name="very_broken")
    assert very_broken.expected_exceptions == (KeyError, ValueError)
Exemple #11
0
def test_dependency_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})

    # dependencyprovider declarations are identical between containers
    assert c1.service_cls.dep == c2.service_cls.dep

    # dependencyprovider instances are different between containers
    dep1 = get_extension(c1, SimpleDependencyProvider)
    dep2 = get_extension(c2, SimpleDependencyProvider)
    assert dep1 != dep2
Exemple #12
0
def test_dependency_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})

    # dependencyprovider declarations are identical between containers
    assert c1.service_cls.dep == c2.service_cls.dep

    # dependencyprovider instances are different between containers
    dep1 = get_extension(c1, SimpleDependencyProvider)
    dep2 = get_extension(c2, SimpleDependencyProvider)
    assert dep1 != dep2
Exemple #13
0
def test_extension_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})
    dep1 = get_extension(c1, SimpleDependencyProvider)
    dep2 = get_extension(c2, SimpleDependencyProvider)

    # extension declarations are identical between containers
    assert c1.service_cls.dep.ext == c2.service_cls.dep.ext

    # extension instances are different between dependencies
    assert dep1 != dep2
    assert dep1.ext != dep2.ext
Exemple #14
0
def test_extension_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})
    dep1 = get_extension(c1, SimpleDependencyProvider)
    dep2 = get_extension(c2, SimpleDependencyProvider)

    # extension declarations are identical between containers
    assert c1.service_cls.dep.ext == c2.service_cls.dep.ext

    # extension instances are different between dependencies
    assert dep1 != dep2
    assert dep1.ext != dep2.ext
Exemple #15
0
def test_get_custom_context_value(container):
    dependency = get_extension(
        container, ContextDataProvider, attr_name="custom_value")
    worker_ctx = WorkerContext(
        container, "service", Mock(), data={CUSTOM_CONTEXT_KEY: "hello"})

    assert dependency.get_dependency(worker_ctx) == "hello"
def test_container_doesnt_exhaust_max_workers(container):
    spam_called = Event()
    spam_continue = Event()

    class Service(object):
        name = 'max-workers'

        @foobar
        def spam(self, a):
            spam_called.send(a)
            spam_continue.wait()

    container = ServiceContainer(Service, config={MAX_WORKERS_CONFIG_KEY: 1})

    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, ['ham'], {})

    # start the next worker in a speparate thread,
    # because it should block until the first one completed
    gt = spawn(container.spawn_worker, dep, ['eggs'], {})

    with Timeout(1):
        assert spam_called.wait() == 'ham'
        # if the container had spawned the second worker, we would see
        # an error indicating that spam_called was fired twice, and the
        # greenthread would now be dead.
        assert not gt.dead
        # reset the calls and allow the waiting worker to complete.
        spam_called.reset()
        spam_continue.send(None)
        # the second worker should now run and complete
        assert spam_called.wait() == 'eggs'
        assert gt.dead
Exemple #17
0
def test_get_unset_value(container):
    dependency = get_extension(container,
                               ContextDataProvider,
                               attr_name="custom_value")
    worker_ctx = WorkerContext(container, "service", Mock(), data={})

    assert dependency.get_dependency(worker_ctx) is None
def test_stops_entrypoints_before_dependency_providers(container):
    container.stop()

    provider = get_extension(container, DependencyProvider)

    for entrypoint in container.entrypoints:
        assert entrypoint.call_ids[0] < provider.call_ids[0]
Exemple #19
0
        def make(
            service_name,
            proto_name=None,
            compression_algorithm="none",
            compression_level="high",
        ):
            if proto_name is None:
                proto_name = service_name

            stubs = load_stubs(proto_name)
            stub_cls = getattr(stubs, "{}Stub".format(service_name))

            class Service:
                name = "caller"

                example_grpc = GrpcProxy(
                    "//localhost:{}".format(grpc_port),
                    stub_cls,
                    compression_algorithm=compression_algorithm,
                    compression_level=compression_level,
                )

                @dummy
                def call(self):
                    pass

            container = container_factory(Service)
            container.start()

            grpc_proxy = get_extension(container, GrpcProxy)
            clients.append(grpc_proxy)
            return grpc_proxy.get_dependency(Mock(context_data={}))
Exemple #20
0
def test_get_builtin_dependencies(attr_name, context_key, container):
    dependency = get_extension(
        container, ContextDataProvider, attr_name=attr_name)
    worker_ctx = WorkerContext(
        container, "service", Mock(), data={context_key: 'value'})

    assert dependency.get_dependency(worker_ctx) == "value"
def test_dependency_provider(container_factory):

    config = {DB_URIS_KEY: {"exampleservice:examplebase": "sqlite:///:memory:"}}

    container = container_factory(ExampleService, config)
    container.start()

    session_provider = get_extension(container, Session)

    # verify setup
    assert session_provider.db_uri == "sqlite:///:memory:"

    # verify get_dependency
    worker_ctx = Mock()  # don't need a real worker context
    session = session_provider.get_dependency(worker_ctx)
    assert isinstance(session, SqlalchemySession)
    assert session_provider.sessions[worker_ctx] is session

    # verify multiple workers
    worker_ctx_2 = Mock()
    session_2 = session_provider.get_dependency(worker_ctx_2)
    assert session_provider.sessions == WeakKeyDictionary({worker_ctx: session, worker_ctx_2: session_2})

    # verify weakref
    del worker_ctx_2
    assert session_provider.sessions == WeakKeyDictionary({worker_ctx: session})

    # verify teardown
    session.add(ExampleModel())
    assert session.new
    session_provider.worker_teardown(worker_ctx)
    assert worker_ctx not in session_provider.sessions
    assert not session.new  # session.close() rolls back new objects
def test_kill_container_with_active_workers(container_factory):
    waiting = Event()
    wait_forever = Event()

    class Service(object):
        name = 'kill-with-active-workers'

        @foobar
        def spam(self):
            waiting.send(None)
            wait_forever.wait()

    container = container_factory(Service, {})
    dep = get_extension(container, Entrypoint)

    # start the first worker, which should wait for spam_continue
    container.spawn_worker(dep, (), {})

    waiting.wait()

    with patch('nameko.containers._log') as logger:
        container.kill()

    assert logger.warning.call_args_list == [
        call('killing %s active workers(s)', 1),
        call('killing active worker for %s', ANY)
    ]
def test_get_builtin_dependencies(attr_name, context_key, container):
    dependency = get_extension(
        container, ContextDataProvider, attr_name=attr_name)
    worker_ctx = WorkerContext(
        container, "service", Mock(), data={context_key: 'value'})

    assert dependency.get_dependency(worker_ctx) == "value"
def test_get_unset_value(container):
    dependency = get_extension(
        container, ContextDataProvider, attr_name="custom_value")
    worker_ctx = WorkerContext(
        container, "service", Mock(), data={})

    assert dependency.get_dependency(worker_ctx) is None
def test_get_custom_context_value(container):
    dependency = get_extension(
        container, ContextDataProvider, attr_name="custom_value")
    worker_ctx = WorkerContext(
        container, "service", Mock(), data={CUSTOM_CONTEXT_KEY: "hello"})

    assert dependency.get_dependency(worker_ctx) == "hello"
    def test_default(self, service_cls, container_factory, rabbit_config):

        container = container_factory(service_cls, rabbit_config)
        container.start()

        queue_consumer = get_extension(container, QueueConsumer)
        assert queue_consumer.connection.heartbeat == DEFAULT_HEARTBEAT
Exemple #27
0
    def test_upstream_blackhole(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the consumer to the
        rabbit broker is lost, but the socket remains open.

        Heartbeats sent from the consumer are not received by the broker. After
        two beats are missed the broker closes the connection, and subsequent
        reads from the socket raise a socket.error, so the connection is
        re-established.
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
Exemple #28
0
    def test_downstream_blackhole(
        self, container, publish, toxiproxy
    ):  # pragma: no cover
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the rabbit broker to
        the consumer is lost, but the socket remains open.

        Heartbeat acknowledgements from the broker are not received by the
        consumer. After two beats are missed the consumer raises a "too many
        heartbeats missed" error.

        Cancelling the consumer requests an acknowledgement from the broker,
        which is swallowed by the socket. There is no timeout when reading
        the acknowledgement so this hangs forever.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        pytest.skip("skip until kombu supports recovery in this scenario")

        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
    def test_default(self, service_cls, container_factory, rabbit_config):

        container = container_factory(service_cls, rabbit_config)
        container.start()

        queue_consumer = get_extension(container, QueueConsumer)
        assert queue_consumer.connection.heartbeat == DEFAULT_HEARTBEAT
Exemple #30
0
def test_rpc_headers(container_factory, rabbit_config):

    container = container_factory(ExampleService, rabbit_config)

    context_data = {
        'language': 'en',
        'otherheader': 'othervalue'
    }

    headers = {}
    rpc_consumer = get_extension(container, RpcConsumer)
    handle_message = rpc_consumer.handle_message

    with patch.object(
            rpc_consumer, 'handle_message', autospec=True) as patched_handler:
        def side_effect(body, message):
            headers.update(message.headers)  # extract message headers
            return handle_message(body, message)

        patched_handler.side_effect = side_effect
        container.start()

    # use a standalone rpc proxy to call exampleservice.say_hello()
    with ServiceRpcProxy(
        "exampleservice", rabbit_config, context_data
    ) as proxy:
        proxy.say_hello()

    # headers as per context data, plus call stack
    assert headers == {
        'nameko.language': 'en',
        'nameko.otherheader': 'othervalue',
        'nameko.call_id_stack': ['standalone_rpc_proxy.call.0'],
    }
Exemple #31
0
    def test_downstream_timeout(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets timing out.

        This failure mode means that the socket between the rabbit broker and
        the consumer times for out `timeout` milliseconds and then closes.

        Attempting to read from the socket after it's closed raises a
        socket.error and the connection will be re-established. If `timeout`
        is longer than twice the heartbeat interval, the behaviour is the same
        as in `test_downstream_blackhole` below, except that the consumer
        cancel will eventually (`timeout` milliseconds) raise a socket.error,
        which is ignored, allowing the teardown to continue.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=100)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
Exemple #32
0
    def test_upstream_blackhole(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the consumer to the
        rabbit broker is lost, but the socket remains open.

        Heartbeats sent from the consumer are not received by the broker. After
        two beats are missed the broker closes the connection, and subsequent
        reads from the socket raise a socket.error, so the connection is
        re-established.
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
Exemple #33
0
def test_rpc_container_being_killed_retries(container_factory, rabbit_config):

    container = container_factory(ExampleService, rabbit_config)
    container.start()

    def wait_for_result():
        with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
            return proxy.task_a()

    container._being_killed = True

    rpc_provider = get_extension(container, Rpc, method_name='task_a')

    with patch.object(
            rpc_provider,
            'rpc_consumer',
            wraps=rpc_provider.rpc_consumer,
    ) as wrapped_consumer:
        waiter = eventlet.spawn(wait_for_result)
        with wait_for_call(1, wrapped_consumer.requeue_message):
            pass  # wait until at least one message has been requeued
        assert not waiter.dead

    container._being_killed = False
    assert waiter.wait() == 'result_a'  # now completed
Exemple #34
0
    def test_downstream_timeout(self, container, publish, toxiproxy):
        """ Verify we detect and recover from sockets timing out.

        This failure mode means that the socket between the rabbit broker and
        the consumer times for out `timeout` milliseconds and then closes.

        Attempting to read from the socket after it's closed raises a
        socket.error and the connection will be re-established. If `timeout`
        is longer than twice the heartbeat interval, the behaviour is the same
        as in `test_downstream_blackhole` below, except that the consumer
        cancel will eventually (`timeout` milliseconds) raise a socket.error,
        which is ignored, allowing the teardown to continue.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=100)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
Exemple #35
0
    def test_downstream_blackhole(
        self, container, publish, toxiproxy
    ):  # pragma: no cover
        """ Verify we detect and recover from sockets losing data.

        This failure mode means that all data sent from the rabbit broker to
        the consumer is lost, but the socket remains open.

        Heartbeat acknowledgements from the broker are not received by the
        consumer. After two beats are missed the consumer raises a "too many
        heartbeats missed" error.

        Cancelling the consumer requests an acknowledgement from the broker,
        which is swallowed by the socket. There is no timeout when reading
        the acknowledgement so this hangs forever.

        See :meth:`kombu.messsaging.Consumer.__exit__`
        """
        pytest.skip("skip until kombu supports recovery in this scenario")

        queue_consumer = get_extension(container, QueueConsumer)

        def reset(args, kwargs, result, exc_info):
            toxiproxy.reset_timeout()
            return True

        with patch_wait(queue_consumer, 'on_connection_error', callback=reset):
            toxiproxy.set_timeout(stream="downstream", timeout=0)

        # connection re-established
        msg = "foo"
        with entrypoint_waiter(container, 'echo') as result:
            publish(msg)
        assert result.get() == msg
Exemple #36
0
def test_rpc_headers(container_factory, rabbit_config):

    container = container_factory(ExampleService, rabbit_config)

    context_data = {'language': 'en', 'otherheader': 'othervalue'}

    headers = {}
    rpc_consumer = get_extension(container, RpcConsumer)
    handle_message = rpc_consumer.handle_message

    with patch.object(rpc_consumer, 'handle_message',
                      autospec=True) as patched_handler:

        def side_effect(body, message):
            headers.update(message.headers)  # extract message headers
            return handle_message(body, message)

        patched_handler.side_effect = side_effect
        container.start()

        # use a standalone rpc proxy to call exampleservice.say_hello()
        with ServiceRpcProxy("exampleservice", rabbit_config,
                             context_data) as proxy:
            proxy.say_hello()

    # headers as per context data, plus call stack
    assert headers == {
        'nameko.language': 'en',
        'nameko.otherheader': 'othervalue',
        'nameko.call_id_stack': ['standalone_rpc_proxy.call.0'],
    }
Exemple #37
0
def test_graceful_stop_on_one_container_error(runner_factory, rabbit_config):

    runner = runner_factory(rabbit_config, ExampleService, SecondService)
    runner.start()

    container = get_container(runner, ExampleService)
    second_container = get_container(runner, SecondService)
    original_stop = second_container.stop
    with patch.object(second_container,
                      'stop',
                      autospec=True,
                      wraps=original_stop) as stop:
        rpc_consumer = get_extension(container, RpcConsumer)
        with patch.object(rpc_consumer, 'handle_result',
                          autospec=True) as handle_result:
            exception = Exception("error")
            handle_result.side_effect = exception

            # use a standalone rpc proxy to call exampleservice.task()
            with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
                # proxy.task() will hang forever because it generates an error
                # in the remote container (so never receives a response).
                proxy.task.call_async()

            # verify that the error bubbles up to runner.wait()
            with pytest.raises(Exception) as exc_info:
                runner.wait()
            assert exc_info.value == exception

            # Check that the second service was stopped due to the first
            # service being killed
            stop.assert_called_once_with()
Exemple #38
0
def test_graceful_stop_on_one_container_error(runner_factory, rabbit_config):

    runner = runner_factory(rabbit_config, ExampleService, SecondService)
    runner.start()

    container = get_container(runner, ExampleService)
    second_container = get_container(runner, SecondService)
    original_stop = second_container.stop
    with patch.object(second_container, 'stop', autospec=True,
                      wraps=original_stop) as stop:
        rpc_consumer = get_extension(container, RpcConsumer)
        with patch.object(
                rpc_consumer, 'handle_result', autospec=True) as handle_result:
            exception = Exception("error")
            handle_result.side_effect = exception

            # use a standalone rpc proxy to call exampleservice.task()
            with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
                # proxy.task() will hang forever because it generates an error
                # in the remote container (so never receives a response).
                proxy.task.call_async()

            # verify that the error bubbles up to runner.wait()
            with pytest.raises(Exception) as exc_info:
                runner.wait()
            assert exc_info.value == exception

            # Check that the second service was stopped due to the first
            # service being killed
            stop.assert_called_once_with()
Exemple #39
0
def test_rpc_container_being_killed_retries(
        container_factory, rabbit_config):

    container = container_factory(ExampleService, rabbit_config)
    container.start()

    def wait_for_result():
        with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
            return proxy.task_a()

    container._being_killed = True

    rpc_provider = get_extension(container, Rpc, method_name='task_a')

    with patch.object(
        rpc_provider,
        'rpc_consumer',
        wraps=rpc_provider.rpc_consumer,
    ) as wrapped_consumer:
        waiter = eventlet.spawn(wait_for_result)
        with wait_for_call(1, wrapped_consumer.requeue_message):
            pass  # wait until at least one message has been requeued
        assert not waiter.dead

    container._being_killed = False
    assert waiter.wait() == 'result_a'  # now completed
def test_dependency_provider(container_factory, setup_db):
    db = setup_db

    class ExampleService(object):
        name = "exampleservice"

        rdbc = RDB(db)

        @dummy
        def write(self, value):
            result = r.table('test').insert({'data': value}).run(self.rdbc)
            return result

        @dummy
        def read(self, id):
            return r.table('test').get(id).run(self.rdbc)

    config = {
        RDB_KEY: {
            'exampleservice': {
                'RDB_HOST': RDB_HOST,
                'RDB_PORT': RDB_PORT,
            }
        }
    }

    container = container_factory(ExampleService, config)
    container.start()

    rdb_provider = get_extension(container, RDB, db=db)

    # verify setup
    assert rdb_provider.RDB_DB == db

    # verify get_dependency
    worker_ctx = Mock()  # don't need a real worker context
    rdb = rdb_provider.get_dependency(worker_ctx)
    assert rdb_provider.rdb_connections[worker_ctx] is rdb

    # verify multiple workers
    worker_ctx_2 = Mock()
    rdb_2 = rdb_provider.get_dependency(worker_ctx_2)
    assert rdb_provider.rdb_connections == WeakKeyDictionary({
        worker_ctx: rdb,
        worker_ctx_2: rdb_2
    })

    # verify weakref
    del worker_ctx_2
    assert rdb_provider.rdb_connections == WeakKeyDictionary({
        worker_ctx: rdb
    })

    # verify teardown
    # TODO(cad): Add extra testing here
    rdb_provider.worker_teardown(worker_ctx)

    assert not rdb.is_open()
    assert worker_ctx not in rdb_provider.rdb_connections
Exemple #41
0
def entrypoint_hook(container, method_name, context_data=None):
    """ Yield a function providing an entrypoint into a hosted service.

    The yielded function may be called as if it were the bare method defined
    in the service class. Intended to be used as an integration testing
    utility.

    :Parameters:
        container : ServiceContainer
            The container hosting the service owning the entrypoint
        method_name : str
            The name of the entrypoint decorated method on the service class
        context_data : dict
            Context data to provide for the call, e.g. a language, auth
            token or session.

    **Usage**

    To verify that `ServiceX` and `ServiceY` are compatible, make an
    integration test that checks their interaction:

    .. literalinclude:: ../examples/testing/integration_x_y_test.py

    """
    entrypoint = get_extension(container, Entrypoint, method_name=method_name)
    if entrypoint is None:
        raise ExtensionNotFound(
            "No entrypoint for '{}' found on container {}.".format(
                method_name, container))

    def hook(*args, **kwargs):
        result = event.Event()

        def handle_result(worker_ctx, res=None, exc_info=None):
            result.send(res, exc_info)
            return res, exc_info

        container.spawn_worker(entrypoint,
                               args,
                               kwargs,
                               context_data=context_data,
                               handle_result=handle_result)

        # If the container errors (e.g. due to a bad entrypoint), handle_result
        # is never called and we hang. To mitigate, we spawn a greenlet waiting
        # for the container, and if that throws we send the exception back
        # as our result
        def catch_container_errors(gt):
            try:
                gt.wait()
            except Exception as exc:
                result.send_exception(exc)

        gt = eventlet.spawn(container.wait)
        gt.link(catch_container_errors)

        return result.wait()

    yield hook
Exemple #42
0
 def worker_ctx(self, container, service_class):
     entrypoint = get_extension(container,
                                Entrypoint,
                                method_name='some_method')
     return WorkerContext(container,
                          service_class,
                          entrypoint,
                          args=('some-arg', ))
Exemple #43
0
def test_entrypoint_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})

    # entrypoint declarations are identical between containers
    c1_meth1_entrypoints = c1.service_cls.meth1.nameko_entrypoints
    c2_meth1_entrypoints = c2.service_cls.meth1.nameko_entrypoints
    assert c1_meth1_entrypoints == c2_meth1_entrypoints

    # entrypoint instances are different between containers
    c1_simple_meth1 = get_extension(c1, SimpleEntrypoint, method_name="meth1")
    c2_simple_meth1 = get_extension(c2, SimpleEntrypoint, method_name="meth1")
    assert c1_simple_meth1 != c2_simple_meth1

    # entrypoint instances are different within a container
    simple_meth1 = get_extension(c1, SimpleEntrypoint, method_name="meth1")
    simple_meth2 = get_extension(c1, SimpleEntrypoint, method_name="meth2")
    assert simple_meth1 != simple_meth2
Exemple #44
0
def test_entrypoint_uniqueness(container_factory):
    c1 = container_factory(Service, config={})
    c2 = container_factory(Service, config={})

    # entrypoint declarations are identical between containers
    c1_meth1_entrypoints = c1.service_cls.meth1.nameko_entrypoints
    c2_meth1_entrypoints = c2.service_cls.meth1.nameko_entrypoints
    assert c1_meth1_entrypoints == c2_meth1_entrypoints

    # entrypoint instances are different between containers
    c1_simple_meth1 = get_extension(c1, SimpleEntrypoint, method_name="meth1")
    c2_simple_meth1 = get_extension(c2, SimpleEntrypoint, method_name="meth1")
    assert c1_simple_meth1 != c2_simple_meth1

    # entrypoint instances are different within a container
    simple_meth1 = get_extension(c1, SimpleEntrypoint, method_name="meth1")
    simple_meth2 = get_extension(c1, SimpleEntrypoint, method_name="meth2")
    assert simple_meth1 != simple_meth2
Exemple #45
0
def entrypoint_hook(container, method_name, context_data=None):
    """ Yield a function providing an entrypoint into a hosted service.

    The yielded function may be called as if it were the bare method defined
    in the service class. Intended to be used as an integration testing
    utility.

    :Parameters:
        container : ServiceContainer
            The container hosting the service owning the entrypoint
        method_name : str
            The name of the entrypoint decorated method on the service class
        context_data : dict
            Context data to provide for the call, e.g. a language, auth
            token or session.

    **Usage**

    To verify that `ServiceX` and `ServiceY` are compatible, make an
    integration test that checks their interaction:

    .. literalinclude:: ../examples/testing/integration_x_y_test.py

    """
    entrypoint = get_extension(container, Entrypoint, method_name=method_name)
    if entrypoint is None:
        raise ExtensionNotFound(
            "No entrypoint for '{}' found on container {}.".format(
                method_name, container))

    def hook(*args, **kwargs):
        result = event.Event()

        def handle_result(worker_ctx, res=None, exc_info=None):
            result.send(res, exc_info)
            return res, exc_info

        container.spawn_worker(entrypoint, args, kwargs,
                               context_data=context_data,
                               handle_result=handle_result)

        # If the container errors (e.g. due to a bad entrypoint), handle_result
        # is never called and we hang. To mitigate, we spawn a greenlet waiting
        # for the container, and if that throws we send the exception back
        # as our result
        def catch_container_errors(gt):
            try:
                gt.wait()
            except Exception as exc:
                result.send_exception(exc)

        gt = eventlet.spawn(container.wait)
        gt.link(catch_container_errors)

        return result.wait()

    yield hook
Exemple #46
0
    def test_config_value(self, heartbeat, service_cls, container_factory,
                          rabbit_config):
        rabbit_config[HEARTBEAT_CONFIG_KEY] = heartbeat

        container = container_factory(service_cls, rabbit_config)
        container.start()

        queue_consumer = get_extension(container, QueueConsumer)
        assert queue_consumer.connection.heartbeat == heartbeat
Exemple #47
0
def test_rpc_consumer_sharing(container_factory, rabbit_config,
                              rabbit_manager):
    """ Verify that the RpcConsumer unregisters from the queueconsumer when
    the first provider unregisters itself. Otherwise it keeps consuming
    messages for the unregistered provider, raising MethodNotFound.
    """

    container = container_factory(ExampleService, rabbit_config)
    container.start()

    task_a = get_extension(container, Rpc, method_name="task_a")
    task_a_stop = task_a.stop

    task_b = get_extension(container, Rpc, method_name="task_b")
    task_b_stop = task_b.stop

    task_a_stopped = Event()

    def patched_task_a_stop():
        task_a_stop()  # stop immediately
        task_a_stopped.send(True)

    def patched_task_b_stop():
        eventlet.sleep(2)  # stop after 2 seconds
        task_b_stop()

    with patch.object(task_b, 'stop', patched_task_b_stop), \
            patch.object(task_a, 'stop', patched_task_a_stop):

        # stop the container and wait for task_a to stop
        # task_b will still be in the process of stopping
        eventlet.spawn(container.stop)
        task_a_stopped.wait()

        # try to call task_a.
        # should timeout, rather than raising MethodNotFound
        with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
            with pytest.raises(eventlet.Timeout):
                with eventlet.Timeout(1):
                    proxy.task_a()

    # kill the container so we don't have to wait for task_b to stop
    container.kill()
Exemple #48
0
def test_dependency_attr_name(container_factory):
    c1 = container_factory(Service, config={})

    bound_dep_provider = get_extension(c1, SimpleDependencyProvider)
    assert bound_dep_provider.attr_name == 'dep'

    dep_provider_declaration = c1.service_cls.dep
    assert dep_provider_declaration.attr_name == 'dep'

    assert bound_dep_provider != dep_provider_declaration
Exemple #49
0
def test_rpc_consumer_sharing(container_factory, rabbit_config,
                              rabbit_manager):
    """ Verify that the RpcConsumer unregisters from the queueconsumer when
    the first provider unregisters itself. Otherwise it keeps consuming
    messages for the unregistered provider, raising MethodNotFound.
    """

    container = container_factory(ExampleService, rabbit_config)
    container.start()

    task_a = get_extension(container, Rpc, method_name="task_a")
    task_a_stop = task_a.stop

    task_b = get_extension(container, Rpc, method_name="task_b")
    task_b_stop = task_b.stop

    task_a_stopped = Event()

    def patched_task_a_stop():
        task_a_stop()  # stop immediately
        task_a_stopped.send(True)

    def patched_task_b_stop():
        eventlet.sleep(2)  # stop after 2 seconds
        task_b_stop()

    with patch.object(task_b, 'stop', patched_task_b_stop), \
            patch.object(task_a, 'stop', patched_task_a_stop):

        # stop the container and wait for task_a to stop
        # task_b will still be in the process of stopping
        eventlet.spawn(container.stop)
        task_a_stopped.wait()

        # try to call task_a.
        # should timeout, rather than raising MethodNotFound
        with ServiceRpcProxy("exampleservice", rabbit_config) as proxy:
            with pytest.raises(eventlet.Timeout):
                with eventlet.Timeout(1):
                    proxy.task_a()

    # kill the container so we don't have to wait for task_b to stop
    container.kill()
    def test_config_value(
        self, heartbeat, service_cls, container_factory, rabbit_config
    ):
        rabbit_config[HEARTBEAT_CONFIG_KEY] = heartbeat

        container = container_factory(service_cls, rabbit_config)
        container.start()

        queue_consumer = get_extension(container, QueueConsumer)
        assert queue_consumer.connection.heartbeat == heartbeat
def test_sensitive_event(container_factory):

    container = container_factory(Service, {})
    handler_entrypoint = get_extension(container, EventHandler)

    assert handler_entrypoint.sensitive_variables == "event_data.foo"

    with entrypoint_hook(container, "handle") as handler:
        handler({"foo": "FOO", "bar": "BAR"})

    assert redacted == {"event_data": {"foo": REDACTED, "bar": "BAR"}}
Exemple #52
0
def test_frequencies(container_factory, web_container_config):
    test_container = container_factory(HttpService, web_container_config)
    test_container.start()
    storage = get_extension(test_container, RedisStorage)
    storage.store_group('http://example1.com', 'group1')
    storage.store_group('http://example2.com', 'group2')
    storage.store_frequency('http://example1.com', 'group1', 'hourly')
    storage.store_frequency('http://example2.com', 'group2', 'hourly')
    urls = [u for u in storage.get_frequency_urls('hourly')]
    assert 'http://example1.com' in urls
    assert 'http://example2.com' in urls
    assert len(urls) == 2
def test_handle_killed_worker(container, logger):

    dep = get_extension(container, Entrypoint)
    container.spawn_worker(dep, ['sleep'], {})

    assert len(container._worker_threads) == 1
    (worker_gt,) = container._worker_threads.values()

    worker_gt.kill()
    assert logger.debug.call_args == call(
        "%s thread killed by container", container)

    assert not container._died.ready()  # container continues running