def test_downstream_timeout(self, container, publish, toxiproxy): """ Verify we detect and recover from sockets timing out. This failure mode means that the socket between the rabbit broker and the consumer times for out `timeout` milliseconds and then closes. Attempting to read from the socket after it's closed raises a socket.error and the connection will be re-established. If `timeout` is longer than twice the heartbeat interval, the behaviour is the same as in `test_downstream_blackhole` below, except that the consumer cancel will eventually (`timeout` milliseconds) raise a socket.error, which is ignored, allowing the teardown to continue. See :meth:`kombu.messsaging.Consumer.__exit__` """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(stream="downstream", timeout=100) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_downstream_blackhole( self, container, publish, toxiproxy ): # pragma: no cover """ Verify we detect and recover from sockets losing data. This failure mode means that all data sent from the rabbit broker to the consumer is lost, but the socket remains open. Heartbeat acknowledgements from the broker are not received by the consumer. After two beats are missed the consumer raises a "too many heartbeats missed" error. Cancelling the consumer requests an acknowledgement from the broker, which is swallowed by the socket. There is no timeout when reading the acknowledgement so this hangs forever. See :meth:`kombu.messsaging.Consumer.__exit__` """ pytest.skip("skip until kombu supports recovery in this scenario") queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(stream="downstream", timeout=0) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_downstream_timeout(self, container, publish, toxiproxy): """ Verify we detect and recover from sockets timing out. This failure mode means that the socket between the rabbit broker and the consumer times for out `timeout` milliseconds and then closes. Attempting to read from the socket after it's closed raises a socket.error and the connection will be re-established. If `timeout` is longer than twice the heartbeat interval, the behaviour is the same as in `test_downstream_blackhole` below, except that the consumer cancel will eventually (`timeout` milliseconds) raise a socket.error, which is ignored, allowing the teardown to continue. See :meth:`kombu.messsaging.Consumer.__exit__` """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(stream="downstream", timeout=100) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_upstream_blackhole(self, container, publish, toxiproxy): """ Verify we detect and recover from sockets losing data. This failure mode means that all data sent from the consumer to the rabbit broker is lost, but the socket remains open. Heartbeats sent from the consumer are not received by the broker. After two beats are missed the broker closes the connection, and subsequent reads from the socket raise a socket.error, so the connection is re-established. """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(timeout=0) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_downstream_blackhole( self, container, publish, toxiproxy ): # pragma: no cover """ Verify we detect and recover from sockets losing data. This failure mode means that all data sent from the rabbit broker to the consumer is lost, but the socket remains open. Heartbeat acknowledgements from the broker are not received by the consumer. After two beats are missed the consumer raises a "too many heartbeats missed" error. Cancelling the consumer requests an acknowledgement from the broker, which is swallowed by the socket. There is no timeout when reading the acknowledgement so this hangs forever. See :meth:`kombu.messsaging.Consumer.__exit__` """ pytest.skip("skip until kombu supports recovery in this scenario") queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(stream="downstream", timeout=0) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_upstream_blackhole(self, container, publish, toxiproxy): """ Verify we detect and recover from sockets losing data. This failure mode means that all data sent from the consumer to the rabbit broker is lost, but the socket remains open. Heartbeats sent from the consumer are not received by the broker. After two beats are missed the broker closes the connection, and subsequent reads from the socket raise a socket.error, so the connection is re-established. """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(timeout=0) # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg
def test_with_retry_policy(self, service_rpc, toxiproxy): """ Verify we automatically recover from stale connections. Publish confirms are required for this functionality. Without confirms the later messages are silently lost and the test hangs waiting for a response. """ assert service_rpc.echo(1) == 1 toxiproxy.disable() def enable_after_retry(args, kwargs, res, exc_info): toxiproxy.enable() return True # call 2 succeeds (after reconnecting via retry policy) with patch_wait(Connection, 'connect', callback=enable_after_retry): assert service_rpc.echo(2) == 2
def test_down(self, container, service_rpc, toxiproxy): """ Verify we detect and recover from closed sockets. This failure mode closes the socket between the consumer and the rabbit broker. Attempting to read from the closed socket raises a socket.error and the connection is re-established. """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.disable() # connection re-established assert service_rpc.echo("foo") == "foo"
def test_with_retry_policy( self, publisher_container, consumer_container, tracker, toxiproxy ): """ Verify we automatically recover from stale connections. Publish confirms are required for this functionality. Without confirms the later messages are silently lost and the test hangs waiting for a response. """ # call 1 succeeds payload1 = "payload1" with entrypoint_waiter(consumer_container, 'recv'): with entrypoint_hook(publisher_container, 'send') as send: send(payload1) assert tracker.call_args_list == [ call("send", payload1), call("recv", payload1), ] toxiproxy.disable() def enable_after_retry(args, kwargs, res, exc_info): toxiproxy.enable() return True # call 2 succeeds (after reconnecting via retry policy) with patch_wait(Connection, 'connect', callback=enable_after_retry): payload2 = "payload2" with entrypoint_waiter(consumer_container, 'recv'): with entrypoint_hook(publisher_container, 'send') as send: send(payload2) assert tracker.call_args_list == [ call("send", payload1), call("recv", payload1), call("send", payload2), call("recv", payload2), ]
def test_with_retry_policy( self, publisher_container, consumer_container, tracker, toxiproxy ): """ Verify we automatically recover from stale connections. Publish confirms are required for this functionality. Without confirms the later messages are silently lost and the test hangs waiting for a response. """ # call 1 succeeds payload1 = "payload1" with entrypoint_waiter(consumer_container, 'recv'): with entrypoint_hook(publisher_container, 'send') as send: send(payload1) assert tracker.call_args_list == [ call("send", payload1), call("recv", payload1), ] toxiproxy.disable() def enable_after_retry(args, kwargs, res, exc_info): toxiproxy.enable() return True # call 2 succeeds (after reconnecting via retry policy) with patch_wait(Connection, 'connect', callback=enable_after_retry): payload2 = "payload2" with entrypoint_waiter(consumer_container, 'recv'): with entrypoint_hook(publisher_container, 'send') as send: send(payload2) assert tracker.call_args_list == [ call("send", payload1), call("recv", payload1), call("send", payload2), call("recv", payload2), ]
def test_upstream_timeout(self, container, service_rpc, toxiproxy): """ Verify we detect and recover from sockets timing out. This failure mode means that the socket between the consumer and the rabbit broker times for out `timeout` milliseconds and then closes. Attempting to read from the socket after it's closed raises a socket.error and the connection will be re-established. If `timeout` is longer than twice the heartbeat interval, the behaviour is the same as in `test_upstream_blackhole` below. """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.reset_timeout() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.set_timeout(timeout=100) # connection re-established assert service_rpc.echo("foo") == "foo"
def test_down(self, container, publish, toxiproxy): """ Verify we detect and recover from closed sockets. This failure mode closes the socket between the consumer and the rabbit broker. Attempting to read from the closed socket raises a socket.error and the connection is re-established. """ queue_consumer = get_extension(container, QueueConsumer) def reset(args, kwargs, result, exc_info): toxiproxy.enable() return True with patch_wait(queue_consumer, 'on_connection_error', callback=reset): toxiproxy.disable() # connection re-established msg = "foo" with entrypoint_waiter(container, 'echo') as result: publish(msg) assert result.get() == msg