def test_makeUnconnectedRequest(self): """ Ensure that sending a request when not connected will attempt to bring up a connection if one isn't already in the process of being brought up """ id1 = 65432 reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'testmakeUnconnectedRequest', 9092, 'clientId') request = KafkaCodec.encode_fetch_request( b'testmakeUnconnectedRequest', id1) d = c.makeRequest(id1, request) self.assertIsInstance(d, Deferred) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Make sure a connection was attempted self.assertTrue(c.connector) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(1.0) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request)
def test_producer_send_messages_unknown_topic(self): client = Mock() ds = [Deferred() for _ in range(Producer.DEFAULT_REQ_ATTEMPTS)] clock = MemoryReactorClock() client.load_metadata_for_topics.side_effect = ds client.metadata_error_for_topic.return_value = 3 client.topic_partitions = {} msgs = [self.msg("one"), self.msg("two")] ack_timeout = 5 producer = Producer(client, ack_timeout=ack_timeout, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # d is waiting on result from ds[0] for load_metadata_for_topics self.assertNoResult(d) # fire it with client still reporting no metadata for topic # The producer will retry the lookup DEFAULT_REQ_ATTEMPTS times... for i in range(Producer.DEFAULT_REQ_ATTEMPTS): ds[i].callback(None) # And then wait producer._retry_interval for a call back... clock.advance(producer._retry_interval + 0.01) self.failureResultOf(d, UnknownTopicOrPartitionError) self.assertFalse(client.send_produce_request.called) producer.stop()
def test_producer_send_messages_unknown_topic(self): client = Mock() ds = [Deferred() for _ in range(Producer.DEFAULT_REQ_ATTEMPTS)] clock = MemoryReactorClock() client.load_metadata_for_topics.side_effect = ds client.metadata_error_for_topic.return_value = 3 client.topic_partitions = {} msgs = [self.msg("one"), self.msg("two")] ack_timeout = 5 producer = Producer(client, ack_timeout=ack_timeout, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # d is waiting on result from ds[0] for load_metadata_for_topics self.assertNoResult(d) # fire it with client still reporting no metadata for topic # The producer will retry the lookup DEFAULT_REQ_ATTEMPTS times... for i in range(Producer.DEFAULT_REQ_ATTEMPTS): ds[i].callback(None) # And then wait producer._retry_interval for a call back... clock.advance(producer._retry_interval + 0.01) self.failureResultOf(d, UnknownTopicOrPartitionError) self.assertFalse(client.send_produce_request.called) producer.stop()
def test_makeUnconnectedRequest(self): """ test_makeUnconnectedRequest Ensure that sending a request when not connected will attempt to bring up a connection if one isn't already in the process of being brought up """ id1 = 65432 reactor = MemoryReactorClock() c = KafkaBrokerClient('testmakeUnconnectedRequest', reactor=reactor) request = KafkaCodec.encode_fetch_request( 'testmakeUnconnectedRequest', id1) d = c.makeRequest(id1, request) self.assertIsInstance(d, Deferred) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Make sure a connection was attempted self.assertTrue(c.connector) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(1.0) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request)
def test_producer_send_timer_failed(self): """test_producer_send_timer_failed Test that the looping call is restarted when _send_batch errs Somewhat artificial test to confirm that when failures occur in _send_batch (which cause the looping call to terminate) that the looping call is restarted. """ client = Mock() client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False batch_t = 5 clock = MemoryReactorClock() with patch.object(aProducer, 'log') as klog: producer = Producer(client, batch_send=True, batch_every_t=batch_t, clock=clock) msgs = [self.msg("one"), self.msg("two")] d = producer.send_messages(self.topic, msgs=msgs) # Check no request was yet sent self.assertFalse(client.send_produce_request.called) # Patch Producer's Deferred to throw an exception with patch.object(aProducer, 'Deferred') as d: d.side_effect = ValueError( "test_producer_send_timer_failed induced failure") # Advance the clock clock.advance(batch_t) # Check the expected message was logged by the looping call restart klog.warning.assert_called_once_with('_send_timer_failed:%r: %s', ANY, ANY) # Check that the looping call was restarted self.assertTrue(producer.sendLooper.running) producer.stop()
def test_producer_send_timer_failed(self): """test_producer_send_timer_failed Test that the looping call is restarted when _send_batch errs Somewhat artificial test to confirm that when failures occur in _send_batch (which cause the looping call to terminate) that the looping call is restarted. """ client = Mock() client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False batch_t = 5 clock = MemoryReactorClock() with patch.object(aProducer, 'log') as klog: producer = Producer(client, batch_send=True, batch_every_t=batch_t, clock=clock) msgs = [self.msg("one"), self.msg("two")] d = producer.send_messages(self.topic, msgs=msgs) # Check no request was yet sent self.assertFalse(client.send_produce_request.called) # Patch Producer's Deferred to throw an exception with patch.object(aProducer, 'Deferred') as d: d.side_effect = ValueError( "test_producer_send_timer_failed induced failure") # Advance the clock clock.advance(batch_t) # Check the expected message was logged by the looping call restart klog.warning.assert_called_once_with('_send_timer_failed:%r: %s', ANY, ANY) # Check that the looping call was restarted self.assertTrue(producer.sendLooper.running) producer.stop()
def test_makeRequest(self): id1 = 54321 id2 = 76543 reactor = MemoryReactorClock() c = KafkaBrokerClient('testmakeRequest', reactor=reactor) request = KafkaCodec.encode_fetch_request('testmakeRequest', id1) d = c.makeRequest(id1, request) eb1 = Mock() self.assertIsInstance(d, Deferred) d.addErrback(eb1) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Make sure a connection was attempted self.assertTrue(c.connector) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() # Advance the clock so sendQueued() will be called reactor.advance(1.0) # The proto should have be asked to sendString the request c.proto.sendString.assert_called_once_with(request) # now call with 'expectReply=False' c.proto = Mock() request = KafkaCodec.encode_fetch_request('testmakeRequest2', id2) d2 = c.makeRequest(id2, request, expectResponse=False) self.assertIsInstance(d2, Deferred) c.proto.sendString.assert_called_once_with(request) # Now close the KafkaBrokerClient c.close() fail1 = eb1.call_args[0][0] # The actual failure sent to errback self.assertTrue(fail1.check(CancelledError))
def test_producer_send_messages_batched(self): client = Mock() f = Failure(BrokerNotAvailableError()) ret = [fail(f), succeed([ProduceResponse(self.topic, 0, 0, 10L)])] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] clock = MemoryReactorClock() batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # Check the expected request was sent msgSet = create_message_set(make_send_requests(msgs), producer.codec) req = ProduceRequest(self.topic, ANY, msgSet) client.send_produce_request.assert_called_once_with( [req], acks=producer.req_acks, timeout=producer.ack_timeout, fail_on_error=False) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) clock.advance(producer._retry_interval) self.successResultOf(d) producer.stop()
def test_makeRequest(self): id1 = 54321 id2 = 76543 reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'testmakeRequest', 9092, 'clientId') request = KafkaCodec.encode_fetch_request(b'testmakeRequest', id1) d = c.makeRequest(id1, request) eb1 = Mock() self.assertIsInstance(d, Deferred) d.addErrback(eb1) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Make sure a connection was attempted self.assertTrue(c.connector) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() # Advance the clock so sendQueued() will be called reactor.advance(1.0) # The proto should have be asked to sendString the request c.proto.sendString.assert_called_once_with(request) # now call with 'expectReply=False' c.proto = Mock() request = KafkaCodec.encode_fetch_request(b'testmakeRequest2', id2) d2 = c.makeRequest(id2, request, expectResponse=False) self.assertIsInstance(d2, Deferred) c.proto.sendString.assert_called_once_with(request) # Now close the KafkaBrokerClient c.close() fail1 = eb1.call_args[0][0] # The actual failure sent to errback self.assertTrue(fail1.check(CancelledError))
def test_producer_send_messages_batched(self): client = Mock() f = Failure(BrokerNotAvailableError()) ret = [fail(f), succeed([ProduceResponse(self.topic, 0, 0, 10L)])] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] clock = MemoryReactorClock() batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # Check the expected request was sent msgSet = create_message_set( make_send_requests(msgs), producer.codec) req = ProduceRequest(self.topic, ANY, msgSet) client.send_produce_request.assert_called_once_with( [req], acks=producer.req_acks, timeout=producer.ack_timeout, fail_on_error=False) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) clock.advance(producer._retry_interval) self.successResultOf(d) producer.stop()
def test_connect(self): reactor = MemoryReactorClock() reactor.running = True c = KafkaBrokerClient('test_connect', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. # Let's pretend we've connected, which will schedule the firing c.buildProtocol(None) reactor.advance(1.0)
def test_connect(self): reactor = MemoryReactorClock() reactor.running = True c = KafkaBrokerClient('test_connect', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. # Let's pretend we've connected, which will schedule the firing c.buildProtocol(None) reactor.advance(1.0)
def test_connect(self): reactor = MemoryReactorClock() reactor.running = True c = KafkaBrokerClient('test_connect', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. # Build the protocol, like a real connector would c.buildProtocol(None) reactor.advance(1.0) # Trigger the DelayedCall to _notify
def test_connectNotify(self): from afkak.protocol import KafkaProtocol reactor = MemoryReactorClock() c = KafkaBrokerClient('test_connectNotify', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. proto = c.buildProtocol(None) self.assertIsInstance(proto, KafkaProtocol) reactor.advance(1.0) self.assertFalse(c.clock.getDelayedCalls())
def test_connectNotify(self): from afkak.protocol import KafkaProtocol reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'test_connectNotify', 9092, 'clientId') c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. proto = c.buildProtocol(None) self.assertIsInstance(proto, KafkaProtocol) reactor.advance(1.0) self.assertEqual([], reactor.getDelayedCalls())
class CreateUserServletTestCase(unittest.TestCase): """ Tests for CreateUserRestServlet. """ def setUp(self): self.registration_handler = Mock() self.appservice = Mock(sender="@as:test") self.datastore = Mock(get_app_service_by_token=Mock( return_value=self.appservice)) handlers = Mock(registration_handler=self.registration_handler) self.clock = MemoryReactorClock() self.hs_clock = Clock(self.clock) self.hs = self.hs = setup_test_homeserver(http_client=None, clock=self.hs_clock, reactor=self.clock) self.hs.get_datastore = Mock(return_value=self.datastore) self.hs.get_handlers = Mock(return_value=handlers) def test_POST_createuser_with_valid_user(self): res = JsonResource(self.hs) register_servlets(self.hs, res) request_data = json.dumps({ "localpart": "someone", "displayname": "someone interesting", "duration_seconds": 200, }) url = b'/_matrix/client/api/v1/createUser?access_token=i_am_an_app_service' user_id = "@someone:interesting" token = "my token" self.registration_handler.get_or_create_user = Mock( return_value=(user_id, token)) request, channel = make_request(b"POST", url, request_data) request.render(res) # Advance the clock because it waits self.clock.advance(1) self.assertEquals(channel.result["code"], b"200") det_data = { "user_id": user_id, "access_token": token, "home_server": self.hs.hostname, } self.assertDictContainsSubset(det_data, json.loads(channel.result["body"]))
def test_connected(self): reactor = MemoryReactorClock() reactor.running = True c = KafkaBrokerClient(reactor, 'test_connect', 9092, 'clientId') c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. self.assertFalse(c.connected()) # Build the protocol, like a real connector would c.buildProtocol(None) self.assertTrue(c.connected()) reactor.advance(1.0) # Trigger the DelayedCall to _notify
def test_closeNotifyDuringConnect(self): reactor = MemoryReactorClock() c = KafkaBrokerClient('test_closeNotify', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. reactor.advance(1.0) self.assertFalse(c.clock.getDelayedCalls()) c.close() c.clientConnectionFailed(c.connector, Failure(UserError())) reactor.advance(1.0) self.assertFalse(c.clock.getDelayedCalls())
def test_closeNotifyDuringConnect(self): reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'test_closeNotify', 9092, 'clientId') c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. reactor.advance(1.0) self.assertEqual([], reactor.getDelayedCalls()) c.close() c.clientConnectionFailed(c.connector, Failure(UserError())) reactor.advance(1.0) self.assertEqual([], reactor.getDelayedCalls())
def test_converge_complete(self): """ At the end of a convergence iteration, ``_CONVERGE_COMPLETE`` is updated to the current time. """ interval = 45 reactor = MemoryReactorClock() deploy_config = DeploymentConfiguration( domain=u"s4.example.com", kubernetes_namespace=u"testing", subscription_manager_endpoint=URL.from_text( u"http://localhost:8000"), s3_access_key_id=u"access key id", s3_secret_key=u"secret key", introducer_image=u"introducer:abcdefgh", storageserver_image=u"storageserver:abcdefgh", ) state_path = FilePath(self.mktemp().decode("ascii")) state_path.makedirs() subscription_client = memory_client( state_path, deploy_config.domain, ) k8s_client = KubeClient(k8s=memory_kubernetes().client()) aws_region = FakeAWSServiceRegion( access_key=deploy_config.s3_access_key_id, secret_key=deploy_config.s3_secret_key, ) d = aws_region.get_route53_client().create_hosted_zone( u"foo", deploy_config.domain, ) self.successResultOf(d) service = _convergence_service( reactor, interval, deploy_config, subscription_client, k8s_client, aws_region, ) service.startService() reactor.advance(interval) last_completed = next( iter( list(metric.samples[-1][-1] for metric in REGISTRY.collect() if metric.name == u"s4_last_convergence_succeeded"))) self.assertThat(reactor.seconds(), Equals(last_completed))
def test_converge_complete(self): """ At the end of a convergence iteration, ``_CONVERGE_COMPLETE`` is updated to the current time. """ interval = 45 reactor = MemoryReactorClock() deploy_config = DeploymentConfiguration( domain=u"s4.example.com", kubernetes_namespace=u"testing", subscription_manager_endpoint=URL.from_text(u"http://localhost:8000"), s3_access_key_id=u"access key id", s3_secret_key=u"secret key", introducer_image=u"introducer:abcdefgh", storageserver_image=u"storageserver:abcdefgh", ) state_path = FilePath(self.mktemp().decode("ascii")) state_path.makedirs() subscription_client = memory_client( state_path, deploy_config.domain, ) k8s_client = KubeClient(k8s=memory_kubernetes().client()) aws_region = FakeAWSServiceRegion( access_key=deploy_config.s3_access_key_id, secret_key=deploy_config.s3_secret_key, ) d = aws_region.get_route53_client().create_hosted_zone( u"foo", deploy_config.domain, ) self.successResultOf(d) service = _convergence_service( reactor, interval, deploy_config, subscription_client, k8s_client, aws_region, ) service.startService() reactor.advance(interval) last_completed = next(iter(list( metric.samples[-1][-1] for metric in REGISTRY.collect() if metric.name == u"s4_last_convergence_succeeded" ))) self.assertThat(reactor.seconds(), Equals(last_completed))
def test_closeNotify(self): from twisted.internet.error import ConnectionDone reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'test_closeNotify', 9092, 'clientId') c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. c.buildProtocol(None) reactor.advance(1.0) self.assertEqual([], reactor.getDelayedCalls()) c.continueTrying = False c.close() c.clientConnectionLost(c.connector, Failure(ConnectionDone())) reactor.advance(1.0) self.assertEqual([], reactor.getDelayedCalls())
def test_closeNotify(self): from twisted.internet.error import ConnectionDone reactor = MemoryReactorClock() c = KafkaBrokerClient('test_closeNotify', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. c.buildProtocol(None) reactor.advance(1.0) self.assertFalse(c.clock.getDelayedCalls()) c.continueTrying = False c.close() c.clientConnectionLost(c.connector, Failure(ConnectionDone())) reactor.advance(1.0) self.assertFalse(c.clock.getDelayedCalls())
def test_producer_send_messages_batched_fail(self): client = Mock() ret = [Deferred(), Deferred(), Deferred()] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] batch_t = 5 clock = MemoryReactorClock() producer = Producer(client, batch_every_t=batch_t, batch_send=True, clock=clock, max_req_attempts=3) # Advance the clock to ensure when no messages to send no error clock.advance(batch_t) d = producer.send_messages(self.topic, msgs=msgs) # Check no request was yet sent self.assertFalse(client.send_produce_request.called) # Advance the clock clock.advance(batch_t) # Check the expected request was sent msgSet = create_message_set( make_send_requests(msgs), producer.codec) req = ProduceRequest(self.topic, 0, msgSet) produce_request_call = call([req], acks=producer.req_acks, timeout=producer.ack_timeout, fail_on_error=False) produce_request_calls = [produce_request_call] client.send_produce_request.assert_has_calls(produce_request_calls) self.assertNoResult(d) # Fire the failure from the first request to the client ret[0].errback(OffsetOutOfRangeError( 'test_producer_send_messages_batched_fail')) # Still no result, producer should retry first self.assertNoResult(d) # Check retry wasn't immediate self.assertEqual(client.send_produce_request.call_count, 1) # Advance the clock by the retry delay clock.advance(producer._retry_interval) # Check 2nd send_produce_request (1st retry) was sent produce_request_calls.append(produce_request_call) client.send_produce_request.assert_has_calls(produce_request_calls) # Fire the failure from the 2nd request to the client ret[1].errback(BrokerNotAvailableError( 'test_producer_send_messages_batched_fail_2')) # Still no result, producer should retry one more time self.assertNoResult(d) # Advance the clock by the retry delay clock.advance(producer._retry_interval * 1.1) # Check 3nd send_produce_request (2st retry) was sent produce_request_calls.append(produce_request_call) client.send_produce_request.assert_has_calls(produce_request_calls) # Fire the failure from the 2nd request to the client ret[2].errback(LeaderNotAvailableError( 'test_producer_send_messages_batched_fail_3')) self.failureResultOf(d, LeaderNotAvailableError) producer.stop()
def test_close(self): reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'test_close', 9092, 'clientId') c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. c.connector.state = 'connected' # set the connector to connected state dd = c.close() self.assertIsInstance(dd, Deferred) self.assertNoResult(dd) f = Failure(ConnectionDone('test_close')) c.clientConnectionLost(c.connector, f) self.assertNoResult(dd) # Advance the clock so the notify() call fires reactor.advance(0.1) r = self.successResultOf(dd) self.assertIs(r, None)
def test_close(self): reactor = MemoryReactorClock() c = KafkaBrokerClient('test_close', reactor=reactor) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. c.connector.state = 'connected' # set the connector to connected state dd = c.close() self.assertIsInstance(dd, Deferred) self.assertNoResult(dd) f = Failure(ConnectionDone('test_close')) c.clientConnectionLost(c.connector, f) self.assertNoResult(dd) # Advance the clock so the notify() call fires reactor.advance(0.1) r = self.successResultOf(dd) self.assertIs(r, None)
def test_producer_stop_during_request(self): # Test stopping producer while it's waiting for reply from client client = Mock() f = Failure(BrokerNotAvailableError()) ret = [fail(f), Deferred()] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] clock = MemoryReactorClock() batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) clock.advance(producer._retry_interval) producer.stop() self.failureResultOf(d, tid_CancelledError)
def test_producer_stop_during_request(self): """ Test stopping producer while it's waiting for reply from client """ clock = MemoryReactorClock() client = Mock(reactor=clock) f = Failure(BrokerNotAvailableError()) ret = [fail(f), Deferred()] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True) d = producer.send_messages(self.topic, msgs=msgs) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) clock.advance(producer._retry_interval) producer.stop() self.failureResultOf(d, tid_CancelledError)
def test_producer_cancel_getting_topic(self): # Test cancelling while waiting to retry getting metadata clock = MemoryReactorClock() client = Mock() client.topic_partitions = {} # start with no metadata rets = [Deferred(), Deferred()] client.load_metadata_for_topics.side_effect = rets msgs = [self.msg("one"), self.msg("two")] producer = Producer(client, clock=clock) d1 = producer.send_messages(self.topic, msgs=msgs) # Check that no request was sent self.assertFalse(client.send_produce_request.called) # Fire the result of load_metadata_for_topics, but # metadata_error_for_topic is still True, so it'll retry after delay # Advance the clock, some, but not enough to retry rets[0].callback(None) # Advance to partway thru the delay clock.advance(producer._retry_interval / 2) # Cancel the request and ake sure we got the CancelledError d1.cancel() self.failureResultOf(d1, CancelledError) # Check that still no request was sent self.assertFalse(client.send_produce_request.called) # Setup the client's topics and trigger the metadata deferred client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False rets[1].callback(None) # Check that still no request was sent self.assertFalse(client.send_produce_request.called) # Advance the clock again to complete the delay clock.advance(producer._retry_interval) # Make sure the retry got reset self.assertEqual(producer._retry_interval, producer._init_retry_interval) producer.stop()
def test_producer_cancel_getting_topic(self): # Test cancelling while waiting to retry getting metadata clock = MemoryReactorClock() client = Mock() client.topic_partitions = {} # start with no metadata rets = [Deferred(), Deferred()] client.load_metadata_for_topics.side_effect = rets msgs = [self.msg("one"), self.msg("two")] producer = Producer(client, clock=clock) d1 = producer.send_messages(self.topic, msgs=msgs) # Check that no request was sent self.assertFalse(client.send_produce_request.called) # Fire the result of load_metadata_for_topics, but # metadata_error_for_topic is still True, so it'll retry after delay # Advance the clock, some, but not enough to retry rets[0].callback(None) # Advance to partway thru the delay clock.advance(producer._retry_interval / 2) # Cancel the request and ake sure we got the CancelledError d1.cancel() self.failureResultOf(d1, CancelledError) # Check that still no request was sent self.assertFalse(client.send_produce_request.called) # Setup the client's topics and trigger the metadata deferred client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False rets[1].callback(None) # Check that still no request was sent self.assertFalse(client.send_produce_request.called) # Advance the clock again to complete the delay clock.advance(producer._retry_interval) # Make sure the retry got reset self.assertEqual(producer._retry_interval, producer._init_retry_interval) producer.stop()
def test_producer_stop_waiting_to_retry(self): """ Test stopping producer while it's waiting to retry a request """ clock = MemoryReactorClock() client = Mock(reactor=clock) f = Failure(BrokerNotAvailableError()) ret = [fail(f)] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True) d = producer.send_messages(self.topic, msgs=msgs) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) # Advance the clock, some, but not enough to retry clock.advance(producer._retry_interval / 2) # Stop the producer before the retry producer.stop() self.failureResultOf(d, tid_CancelledError)
def test_makeRequest_fails(self): id1 = 15432 reactor = MemoryReactorClock() c = KafkaBrokerClient('testmakeRequest', reactor=reactor) request = KafkaCodec.encode_fetch_request('testmakeRequest', id1) d = c.makeRequest(id1, request) eb1 = Mock() self.assertIsInstance(d, Deferred) d.addErrback(eb1) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() c.proto.sendString.side_effect = StringTooLongError( "Tried to send too many bytes") # Advance the clock so sendQueued() will be called reactor.advance(1.0) # The proto should have be asked to sendString the request c.proto.sendString.assert_called_once_with(request) # Now close the KafkaBrokerClient c.close()
def test_makeRequest_fails(self): id1 = 15432 reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'testmakeRequest', 9092, 'clientId') request = KafkaCodec.encode_fetch_request(b'testmakeRequest', id1) d = c.makeRequest(id1, request) eb1 = Mock() self.assertIsInstance(d, Deferred) d.addErrback(eb1) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() c.proto.sendString.side_effect = StringTooLongError( "Tried to send too many bytes") # Advance the clock so sendQueued() will be called reactor.advance(1.0) # The proto should have be asked to sendString the request c.proto.sendString.assert_called_once_with(request) # Now close the KafkaBrokerClient c.close()
def test_producer_stop_waiting_to_retry(self): # Test stopping producer while it's waiting to retry a request client = Mock() f = Failure(BrokerNotAvailableError()) ret = [fail(f)] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] clock = MemoryReactorClock() batch_n = 2 producer = Producer(client, batch_every_n=batch_n, batch_send=True, clock=clock) d = producer.send_messages(self.topic, msgs=msgs) # At first, there's no result. Have to retry due to first failure self.assertNoResult(d) # Advance the clock, some, but not enough to retry clock.advance(producer._retry_interval / 2) # Stop the producer before the retry producer.stop() self.failureResultOf(d, tid_CancelledError)
def test_requestsRetried(self): id1 = 65432 reactor = MemoryReactorClock() c = KafkaBrokerClient('testrequestsRetried', reactor=reactor) request = KafkaCodec.encode_fetch_request( 'testrequestsRetried', id1) c.makeRequest(id1, request) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(0.1) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request) # And the request should be 'sent' self.assertTrue(c.requests[id1].sent) # Before the reply 'comes back' drop the connection from twisted.internet.main import CONNECTION_LOST c.clientConnectionLost(c.connector, Failure(CONNECTION_LOST)) # Make sure the proto was reset self.assertIs(c.proto, None) # Advance the clock again reactor.advance(0.1) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(0.1) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request) # And the request should be 'sent' self.assertTrue(c.requests[id1].sent)
def test_requestsRetried(self): id1 = 65432 reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'testrequestsRetried', 9092, 'clientId') request = KafkaCodec.encode_fetch_request(b'testrequestsRetried', id1) c.makeRequest(id1, request) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) c.connector.factory = c # MemoryReactor doesn't make this connection. # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(0.1) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request) # And the request should be 'sent' self.assertTrue(c.requests[id1].sent) # Before the reply 'comes back' drop the connection from twisted.internet.main import CONNECTION_LOST c.clientConnectionLost(c.connector, Failure(CONNECTION_LOST)) # Make sure the proto was reset self.assertIs(c.proto, None) # Advance the clock again reactor.advance(0.1) # Make sure the request shows unsent self.assertFalse(c.requests[id1].sent) # Bring up the "connection"... c.buildProtocol(None) # Replace the created proto with a mock c.proto = Mock() reactor.advance(0.1) # Now, we should have seen the 'sendString' called c.proto.sendString.assert_called_once_with(request) # And the request should be 'sent' self.assertTrue(c.requests[id1].sent)
class ServiceProtocolTest(TestCase): def setUp(self): super(ServiceProtocolTest, self).setUp() self.logger = self.useFixture(FakeLogger()) self.reactor = MemoryReactorClock() self.process = MemoryProcess() self.protocol = ServiceProtocol(self.reactor) self.process.proto = self.protocol def test_fork(self): """ When the connection is made it means that we sucessfully forked the service process, so we start waiting a bit to see if it stays running or exits shortly. """ self.protocol.makeConnection(self.process) [call1, call2] = self.reactor.getDelayedCalls() self.assertEqual(call1.time, self.protocol.minUptime) self.assertEqual(call2.time, self.protocol.timeout) self.assertIn("Service process spawned", self.logger.output) def test_min_uptime(self): """ If the process stays running for at least minUptime seconds, the 'ready' Deferred gets fired. """ self.protocol.makeConnection(self.process) self.reactor.advance(0.2) self.assertThat(self.protocol.ready, succeeded(Is(None))) self.assertIn("Service process alive for 0.2 seconds", self.logger.output) def test_expected_output(self): """ If some expected output is required, the 'ready' deferred fires only when such output has been received. """ self.protocol.expectedOutput = "hello" self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.assertThat(self.protocol.ready, has_no_result()) self.protocol.outReceived(b"hello world!\n") self.assertThat(self.protocol.ready, succeeded(Is(None))) self.assertIn("Service process emitted 'hello'", self.logger.output) def test_expected_port(self): """ If some expected port is required, the 'ready' deferred fires only when such port has been opened. """ self.protocol.expectedPort = 1234 self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.assertThat(self.protocol.ready, has_no_result()) factory = self.reactor.tcpClients[0][2] factory.buildProtocol(None).connectionMade() self.assertThat(self.protocol.ready, succeeded(Is(None))) self.assertIn("Service opened port 1234", self.logger.output) def test_expected_port_probe_failed(self): """ If probing for the expected port fails, the probe will be retried. """ self.protocol.expectedPort = 1234 self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.assertThat(self.protocol.ready, has_no_result()) factory = self.reactor.tcpClients[0][2] factory.clientConnectionFailed(None, ConnectionRefusedError()) self.assertIn("Service port probe failed", self.logger.output) self.reactor.advance(0.1) factory = self.reactor.tcpClients[1][2] factory.buildProtocol(None).connectionMade() self.assertThat(self.protocol.ready, succeeded(Is(None))) self.assertIn("Service opened port 1234", self.logger.output) def test_process_dies_shortly_after_fork(self): """ If the service process exists right after having been spawned (for example the executable was not found), the 'ready' Deferred fires with an errback. """ self.protocol.makeConnection(self.process) error = ProcessTerminated(exitCode=1, signal=None) self.protocol.processExited(Failure(error)) self.assertThat(self.protocol.ready, failed(MatchesStructure(value=Is(error)))) def test_cancel_while_waiting_for_uptime(self): """ If the 'ready' deferred gets cancelled while still waiting for the minumum uptime, a proper message is emitted. """ self.protocol.makeConnection(self.process) self.protocol.ready.cancel() self.assertIn("minimum uptime not yet elapsed", self.logger.output) self.assertThat( self.protocol.ready, failed(MatchesStructure(value=IsInstance(CancelledError)))) def test_process_dies_while_waiting_expected_output(self): """ If the service process exists while waiting for the expected output, the 'ready' Deferred fires with an errback. """ self.protocol.expectedOutput = "hello" self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) error = ProcessTerminated(exitCode=1, signal=None) self.protocol.processExited(Failure(error)) self.assertThat(self.protocol.ready, failed(MatchesStructure(value=Is(error)))) # Further input received on the file descriptor will be discarded self.protocol.ready = Deferred() # pretend that we didn't get fired self.protocol.outReceived(b"hello world!\n") self.assertThat(self.protocol.ready, has_no_result()) def test_timeout_while_waiting_expected_output(self): """ If the timeout elapses while waiting for the expected output, the 'ready' Deferred fires with an errback. """ self.protocol.expectedOutput = "hello" self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.reactor.advance(self.protocol.timeout) self.assertThat( self.protocol.ready, failed(MatchesStructure(value=IsInstance(TimeoutError)))) self.assertIn("expected output not yet received", self.logger.output) def test_process_dies_while_probing_port(self): """ If the service process exists while waiting for the expected port to, be open, the 'ready' Deferred fires with an errback. """ self.protocol.expectedPort = 1234 self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) error = ProcessTerminated(exitCode=1, signal=None) self.protocol.processExited(Failure(error)) self.assertThat(self.protocol.ready, failed(MatchesStructure(value=Is(error)))) # No further probe will happen self.reactor.advance(0.1) self.assertEqual(1, len(self.reactor.tcpClients)) def test_timeout_while_probing_port(self): """ If the service process doesn't listen to the expected port within the, timeout, 'ready' Deferred fires with an errback. """ self.protocol.expectedPort = 1234 self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.reactor.advance(self.protocol.timeout) self.assertThat( self.protocol.ready, failed(MatchesStructure(value=IsInstance(TimeoutError)))) self.assertIn("expected port not yet open", self.logger.output) def test_cancel_ready(self): """ If the `ready` deferred gets cancelled, the protocol will stop doing anything related to waiting for the service to be ready. """ self.protocol.makeConnection(self.process) self.protocol.ready.cancel() self.assertThat( self.protocol.ready, failed(MatchesStructure(value=IsInstance(CancelledError)))) self.assertEqual(0, len(self.reactor.getDelayedCalls())) def test_terminated(self): """ When the process is fully terminated, the 'terminated' deferred gets fired. """ self.protocol.makeConnection(self.process) self.reactor.advance(self.protocol.minUptime) self.protocol.transport.processEnded(0) self.assertThat(self.protocol.terminated, succeeded(Is(None)))
class JsonResourceTests(unittest.TestCase): def setUp(self): self.reactor = MemoryReactorClock() self.hs_clock = Clock(self.reactor) self.homeserver = setup_test_homeserver(http_client=None, clock=self.hs_clock, reactor=self.reactor) def test_handler_for_request(self): """ JsonResource.handler_for_request gives correctly decoded URL args to the callback, while Twisted will give the raw bytes of URL query arguments. """ got_kwargs = {} def _callback(request, **kwargs): got_kwargs.update(kwargs) return (200, kwargs) res = JsonResource(self.homeserver) res.register_paths("GET", [re.compile("^/foo/(?P<room_id>[^/]*)$")], _callback) request, channel = make_request(b"GET", b"/foo/%E2%98%83?a=%E2%98%83") request.render(res) self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]}) self.assertEqual(got_kwargs, {u"room_id": u"\N{SNOWMAN}"}) def test_callback_direct_exception(self): """ If the web callback raises an uncaught exception, it will be translated into a 500. """ def _callback(request, **kwargs): raise Exception("boo") res = JsonResource(self.homeserver) res.register_paths("GET", [re.compile("^/foo$")], _callback) request, channel = make_request(b"GET", b"/foo") request.render(res) self.assertEqual(channel.result["code"], b'500') def test_callback_indirect_exception(self): """ If the web callback raises an uncaught exception in a Deferred, it will be translated into a 500. """ def _throw(*args): raise Exception("boo") def _callback(request, **kwargs): d = Deferred() d.addCallback(_throw) self.reactor.callLater(1, d.callback, True) return d res = JsonResource(self.homeserver) res.register_paths("GET", [re.compile("^/foo$")], _callback) request, channel = make_request(b"GET", b"/foo") request.render(res) # No error has been raised yet self.assertTrue("code" not in channel.result) # Advance time, now there's an error self.reactor.advance(1) self.assertEqual(channel.result["code"], b'500') def test_callback_synapseerror(self): """ If the web callback raises a SynapseError, it returns the appropriate status code and message set in it. """ def _callback(request, **kwargs): raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN) res = JsonResource(self.homeserver) res.register_paths("GET", [re.compile("^/foo$")], _callback) request, channel = make_request(b"GET", b"/foo") request.render(res) self.assertEqual(channel.result["code"], b'403') reply_body = json.loads(channel.result["body"]) self.assertEqual(reply_body["error"], "Forbidden!!one!") self.assertEqual(reply_body["errcode"], "M_FORBIDDEN") def test_no_handler(self): """ If there is no handler to process the request, Synapse will return 400. """ def _callback(request, **kwargs): """ Not ever actually called! """ self.fail("shouldn't ever get here") res = JsonResource(self.homeserver) res.register_paths("GET", [re.compile("^/foo$")], _callback) request, channel = make_request(b"GET", b"/foobar") request.render(res) self.assertEqual(channel.result["code"], b'400') reply_body = json.loads(channel.result["body"]) self.assertEqual(reply_body["error"], "Unrecognized request") self.assertEqual(reply_body["errcode"], "M_UNRECOGNIZED")
def test_subscribersListCalls(self): """ test_subscribersListCalls Test that a brokerclient's connSubscribers callbacks are called in the proper order, and that all the deferreds of a previous call are resolved before the next round of calls is done. """ reactor = MemoryReactorClock() callList = [] def c1(c, conn, reason): s = 'c1:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) def c2(c, conn, reason): def c2_cb(_, c, conn, reason): callList.append('c2_cb:{0}'.format(conn)) d = Deferred() d.addCallback(c2_cb, c, conn, reason) reactor.callLater(1.0, d.callback, None) s = 'c2:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) return d def c3(c, conn, reason): s = 'c3:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) def c4(c, conn, reason): callList.append('c4:{0}'.format(conn)) def c5(c, conn, reason): callList.append('c5:{0}'.format(conn)) sublist = [c1, c2, c3] c = KafkaBrokerClient('slc', subscribers=sublist, reactor=reactor) # Trigger the call to the 3 subscribers c._notify(True) self.assertEqual(callList, ['c1:True', 'c2:True', 'c3:True']) callList = [] c._notify(False) # Nothing should be called yet, because the c2_cb # callback hasn't been called yet... self.assertEqual(callList, []) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) self.assertEqual(callList, ['c2_cb:True', 'c1:False', 'c2:False', 'c3:False']) callList = [] reactor.advance(1.0) self.assertEqual(callList, ['c2_cb:False']) callList = [] # Trigger the call to the subscribers c._notify(True, reason='TheReason') c.addSubscriber(c4) self.assertEqual( callList, ['c1:True:TheReason', 'c2:True:TheReason', 'c3:True:TheReason']) callList = [] c._notify(False) self.assertEqual(callList, []) # Add a subscriber after the notify call, but before the advance # and ensure that the new subscriber isn't notified for the event # which occurred before it was added c.addSubscriber(c5) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) self.assertEqual( callList, ['c2_cb:True', 'c1:False', 'c2:False', 'c3:False', 'c4:False']) callList = [] c.delSubscriber(c2) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) # We should still get the c2_cb:False here... self.assertEqual(callList, ['c2_cb:False']) callList = [] c.delSubscriber(c4) # Trigger the call to the subscribers c._notify(True) reactor.advance(1.0) self.assertEqual(callList, ['c1:True', 'c3:True', 'c5:True']) callList = [] c._notify(False) reactor.advance(1.0) self.assertEqual(callList, ['c1:False', 'c3:False', 'c5:False']) callList = []
def test_producer_send_messages_batched_partial_success(self): """test_producer_send_messages_batched_partial_success This tests the complexity of the error handling for a single batch request. Scenario: The producer's caller sends 5 requests to two (total) topics The client's metadata is such that the producer will produce requests to post msgs to 5 separate topic/partition tuples The batch size is reached, so the producer sends the request The caller then cancels one of the requests The (mock) client returns partial success in the form of a FailedPayloadsError. The Producer then should return the successful results and retry the failed. The (mock) client then "succeeds" the remaining results. """ client = Mock() topic2 = 'tpsmbps_two' client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]} client.metadata_error_for_topic.return_value = False init_resp = [ ProduceResponse(self.topic, 0, 0, 10L), ProduceResponse(self.topic, 1, 6, 20L), ProduceResponse(topic2, 5, 0, 30L), ] next_resp = [ ProduceResponse(self.topic, 2, 0, 10L), ProduceResponse(self.topic, 1, 0, 20L), ProduceResponse(topic2, 4, 0, 30L), ] failed_payloads = [ (ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()), (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()), ] f = Failure(FailedPayloadsError(init_resp, failed_payloads)) ret = [fail(f), succeed(next_resp)] client.send_produce_request.side_effect = ret msgs = self.msgs(range(10)) results = [] clock = MemoryReactorClock() producer = Producer(client, batch_send=True, batch_every_t=0, clock=clock) # Send 5 total requests: 4 here, one after we make sure we didn't # send early results.append(producer.send_messages(self.topic, msgs=msgs[0:3])) results.append(producer.send_messages(topic2, msgs=msgs[3:5])) results.append(producer.send_messages(self.topic, msgs=msgs[5:8])) results.append(producer.send_messages(topic2, msgs=msgs[8:9])) # No call yet, not enough messages self.assertFalse(client.send_produce_request.called) # Enough messages to start the request results.append(producer.send_messages(self.topic, msgs=msgs[9:10])) # Before the retry, there should be some results self.assertEqual(init_resp[0], self.successResultOf(results[0])) self.assertEqual(init_resp[2], self.successResultOf(results[3])) # Advance the clock clock.advance(producer._retry_interval) # Check the otehr results came in self.assertEqual(next_resp[0], self.successResultOf(results[4])) self.assertEqual(next_resp[1], self.successResultOf(results[2])) self.assertEqual(next_resp[2], self.successResultOf(results[1])) producer.stop()
def test_delay_reset(self): """ Test that reconnect delay is handled correctly: 1) That initializer values are respected 2) That delay maximum is respected 3) That delay is reset to initial delay on successful connection """ init_delay = last_delay = 0.025 max_delay = 14 reactor = MemoryReactorClock() c = KafkaBrokerClient(reactor, 'test_delay_reset', 9092, 'clientId', initDelay=init_delay, maxDelay=max_delay) c.jitter = 0 # Eliminate randomness for test # Ensure KBC was initialized correctly self.assertEqual(c.retries, 0) self.assertEqual(c.delay, init_delay) self.assertEqual(c.maxDelay, max_delay) self.assertTrue(c.continueTrying) c._connect() # Force a connection attempt c.connector.factory = c # MemoryReactor doesn't make this connection. self.assertTrue(c.connector.connectCalled) # Reset it so we can track future calls c.connector.connectCalled = False # Build the protocol, like a real connector would on successful connect c.buildProtocol(None) # Fake server connection close f = Failure(ConnectionDone('test_delay_reset')) c.clientConnectionLost(c.connector, f) # Now loop failing connection attempts until we get to the max while c.delay < max_delay: # Assert a reconnect wasn't immediately attempted self.assertFalse(c.connector.connectCalled) # Assert the new delay was calculated correctly self.assertEqual(last_delay * c.factor, c.delay) last_delay = c.delay # advance the reactor, but not enough to connect reactor.advance(0.1 * c.delay) # Still no connection self.assertFalse(c.connector.connectCalled) # Should see a connection attempt after this reactor.advance(c.delay) self.assertTrue(c.connector.connectCalled) c.connector.connectCalled = False # Reset again # Claim the connection failed e = ConnectionRefusedError() c.connector.connectionFailed(e) # Assert the delay was calculated correctly self.assertEqual(max_delay, c.delay) self.assertFalse(c.connector.connectCalled) # "run" the reactor, but not enough to connect reactor.advance(0.1 * c.delay) # Still no connection self.assertFalse(c.connector.connectCalled) # Should see a connection attempt after this reactor.advance(c.delay) self.assertTrue(c.connector.connectCalled) # Build the protocol, like a real connector would on successful connect c.buildProtocol(None) # Assert that the delay and retry count were reset self.assertEqual(init_delay, c.delay) self.assertEqual(c.retries, 0)
def test_producer_send_messages_batched_fail(self): client = Mock() ret = [Deferred(), Deferred(), Deferred()] client.send_produce_request.side_effect = ret client.topic_partitions = {self.topic: [0, 1, 2, 3]} client.metadata_error_for_topic.return_value = False msgs = [self.msg("one"), self.msg("two")] batch_t = 5 clock = MemoryReactorClock() producer = Producer(client, batch_every_t=batch_t, batch_send=True, clock=clock, max_req_attempts=3) # Advance the clock to ensure when no messages to send no error clock.advance(batch_t) d = producer.send_messages(self.topic, msgs=msgs) # Check no request was yet sent self.assertFalse(client.send_produce_request.called) # Advance the clock clock.advance(batch_t) # Check the expected request was sent msgSet = create_message_set(make_send_requests(msgs), producer.codec) req = ProduceRequest(self.topic, 0, msgSet) produce_request_call = call([req], acks=producer.req_acks, timeout=producer.ack_timeout, fail_on_error=False) produce_request_calls = [produce_request_call] client.send_produce_request.assert_has_calls(produce_request_calls) self.assertNoResult(d) # Fire the failure from the first request to the client ret[0].errback( OffsetOutOfRangeError('test_producer_send_messages_batched_fail')) # Still no result, producer should retry first self.assertNoResult(d) # Check retry wasn't immediate self.assertEqual(client.send_produce_request.call_count, 1) # Advance the clock by the retry delay clock.advance(producer._retry_interval) # Check 2nd send_produce_request (1st retry) was sent produce_request_calls.append(produce_request_call) client.send_produce_request.assert_has_calls(produce_request_calls) # Fire the failure from the 2nd request to the client ret[1].errback( BrokerNotAvailableError( 'test_producer_send_messages_batched_fail_2')) # Still no result, producer should retry one more time self.assertNoResult(d) # Advance the clock by the retry delay clock.advance(producer._retry_interval * 1.1) # Check 3nd send_produce_request (2st retry) was sent produce_request_calls.append(produce_request_call) client.send_produce_request.assert_has_calls(produce_request_calls) # Fire the failure from the 2nd request to the client ret[2].errback( LeaderNotAvailableError( 'test_producer_send_messages_batched_fail_3')) self.failureResultOf(d, LeaderNotAvailableError) producer.stop()
def make_request_with_cancellation_test( test_name: str, reactor: MemoryReactorClock, site: Site, method: str, path: str, content: Union[bytes, str, JsonDict] = b"", ) -> FakeChannel: """Performs a request repeatedly, disconnecting at successive `await`s, until one completes. Fails if: * A logging context is lost during cancellation. * A logging context get restarted after it is marked as finished, eg. if a request's logging context is used by some processing started by the request, but the request neglects to cancel that processing or wait for it to complete. Note that "Re-starting finished log context" errors get raised within the request handling code and may or may not get caught. These errors will likely manifest as a different logging context error at a later point. When debugging logging context failures, setting a breakpoint in `logcontext_error` can prove useful. * A request gets stuck, possibly due to a previous cancellation. * The request does not return a 499 when the client disconnects. This implies that a `CancelledError` was swallowed somewhere. It is up to the caller to verify that the request returns the correct data when it finally runs to completion. Note that this function can only cover a single code path and does not guarantee that an endpoint is compatible with cancellation on every code path. To allow inspection of the code path that is being tested, this function will log the stack trace at every `await` that gets cancelled. To view these log lines, `trial` can be run with the `SYNAPSE_TEST_LOG_LEVEL=INFO` environment variable, which will include the log lines in `_trial_temp/test.log`. Alternatively, `_log_for_request` can be modified to write to `sys.stdout`. Args: test_name: The name of the test, which will be logged. reactor: The twisted reactor running the request handler. site: The twisted `Site` to use to render the request. method: The HTTP request method ("verb"). path: The HTTP path, suitably URL encoded (e.g. escaped UTF-8 & spaces and such). content: The body of the request. Returns: The `FakeChannel` object which stores the result of the final request that runs to completion. """ # To process a request, a coroutine run is created for the async method handling # the request. That method may then start other coroutine runs, wrapped in # `Deferred`s. # # We would like to trigger a cancellation at the first `await`, re-run the # request and cancel at the second `await`, and so on. By patching # `Deferred.__next__`, we can intercept `await`s, track which ones we have or # have not seen, and force them to block when they wouldn't have. # The set of previously seen `await`s. # Each element is a stringified stack trace. seen_awaits: Set[Tuple[str, ...]] = set() _log_for_request( 0, f"Running make_request_with_cancellation_test for {test_name}...") for request_number in itertools.count(1): deferred_patch = Deferred__next__Patch(seen_awaits, request_number) try: with mock.patch("synapse.http.server.respond_with_json", wraps=respond_with_json) as respond_mock: with deferred_patch.patch(): # Start the request. channel = make_request(reactor, site, method, path, content, await_result=False) request = channel.request # Run the request until we see a new `await` which we have not # yet cancelled at, or it completes. while not respond_mock.called and not deferred_patch.new_await_seen: previous_awaits_seen = deferred_patch.awaits_seen reactor.advance(0.0) if deferred_patch.awaits_seen == previous_awaits_seen: # We didn't see any progress. Try advancing the clock. reactor.advance(1.0) if deferred_patch.awaits_seen == previous_awaits_seen: # We still didn't see any progress. The request might be # stuck. raise AssertionError( "Request appears to be stuck, possibly due to a " "previous cancelled request") if respond_mock.called: # The request ran to completion and we are done with testing it. # `respond_with_json` writes the response asynchronously, so we # might have to give the reactor a kick before the channel gets # the response. deferred_patch.unblock_awaits() channel.await_result() return channel # Disconnect the client and wait for the response. request.connectionLost(reason=ConnectionDone()) _log_for_request(request_number, "--- disconnected ---") # Advance the reactor just enough to get a response. # We don't want to advance the reactor too far, because we can only # detect re-starts of finished logging contexts after we set the # finished flag below. for _ in range(2): # We may need to pump the reactor to allow `delay_cancellation`s to # finish. if not respond_mock.called: reactor.advance(0.0) # Try advancing the clock if that didn't work. if not respond_mock.called: reactor.advance(1.0) # `delay_cancellation`s may be waiting for processing that we've # forced to block. Try unblocking them, followed by another round of # pumping the reactor. if not respond_mock.called: deferred_patch.unblock_awaits() # Mark the request's logging context as finished. If it gets # activated again, an `AssertionError` will be raised and bubble up # through request handling code. This `AssertionError` may or may not be # caught. Eventually some other code will deactivate the logging # context which will raise a different `AssertionError` because # resource usage won't have been correctly tracked. if isinstance(request, SynapseRequest) and request.logcontext: request.logcontext.finished = True # Check that the request finished with a 499, # ie. the `CancelledError` wasn't swallowed. respond_mock.assert_called_once() if request.code != HTTP_STATUS_REQUEST_CANCELLED: raise AssertionError( f"{request.code} != {HTTP_STATUS_REQUEST_CANCELLED} : " "Cancelled request did not finish with the correct status code." ) finally: # Unblock any processing that might be shared between requests, if we # haven't already done so. deferred_patch.unblock_awaits() assert False, "unreachable" # noqa: B011
def test_disconnect( reactor: MemoryReactorClock, channel: FakeChannel, expect_cancellation: bool, expected_body: Union[bytes, JsonDict], expected_code: Optional[int] = None, ) -> None: """Disconnects an in-flight request and checks the response. Args: reactor: The twisted reactor running the request handler. channel: The `FakeChannel` for the request. expect_cancellation: `True` if request processing is expected to be cancelled, `False` if the request should run to completion. expected_body: The expected response for the request. expected_code: The expected status code for the request. Defaults to `200` or `499` depending on `expect_cancellation`. """ # Determine the expected status code. if expected_code is None: if expect_cancellation: expected_code = HTTP_STATUS_REQUEST_CANCELLED else: expected_code = HTTPStatus.OK request = channel.request if channel.is_finished(): raise AssertionError( "Request finished before we could disconnect - " "ensure `await_result=False` is passed to `make_request`.", ) # We're about to disconnect the request. This also disconnects the channel, so we # have to rely on mocks to extract the response. respond_method: Callable[..., Any] if isinstance(expected_body, bytes): respond_method = respond_with_html_bytes else: respond_method = respond_with_json with mock.patch(f"synapse.http.server.{respond_method.__name__}", wraps=respond_method) as respond_mock: # Disconnect the request. request.connectionLost(reason=ConnectionDone()) if expect_cancellation: # An immediate cancellation is expected. respond_mock.assert_called_once() else: respond_mock.assert_not_called() # The handler is expected to run to completion. reactor.advance(1.0) respond_mock.assert_called_once() args, _kwargs = respond_mock.call_args code, body = args[1], args[2] if code != expected_code: raise AssertionError( f"{code} != {expected_code} : " "Request did not finish with the expected status code.") if request.code != expected_code: raise AssertionError( f"{request.code} != {expected_code} : " "Request did not finish with the expected status code.") if body != expected_body: raise AssertionError( f"{body!r} != {expected_body!r} : " "Request did not finish with the expected status code.")
def test_producer_send_messages_batched_partial_success(self): """test_producer_send_messages_batched_partial_success This tests the complexity of the error handling for a single batch request. Scenario: The producer's caller sends 5 requests to two (total) topics The client's metadata is such that the producer will produce requests to post msgs to 5 separate topic/partition tuples The batch size is reached, so the producer sends the request The caller then cancels one of the requests The (mock) client returns partial success in the form of a FailedPayloadsError. The Producer then should return the successful results and retry the failed. The (mock) client then "succeeds" the remaining results. """ client = Mock() topic2 = 'tpsmbps_two' client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]} client.metadata_error_for_topic.return_value = False init_resp = [ProduceResponse(self.topic, 0, 0, 10L), ProduceResponse(self.topic, 1, 6, 20L), ProduceResponse(topic2, 5, 0, 30L), ] next_resp = [ProduceResponse(self.topic, 2, 0, 10L), ProduceResponse(self.topic, 1, 0, 20L), ProduceResponse(topic2, 4, 0, 30L), ] failed_payloads = [(ProduceRequest(self.topic, ANY, ANY), NotLeaderForPartitionError()), (ProduceRequest(topic2, ANY, ANY), BrokerNotAvailableError()), ] f = Failure(FailedPayloadsError(init_resp, failed_payloads)) ret = [fail(f), succeed(next_resp)] client.send_produce_request.side_effect = ret msgs = self.msgs(range(10)) results = [] clock = MemoryReactorClock() producer = Producer(client, batch_send=True, batch_every_t=0, clock=clock) # Send 5 total requests: 4 here, one after we make sure we didn't # send early results.append(producer.send_messages(self.topic, msgs=msgs[0:3])) results.append(producer.send_messages(topic2, msgs=msgs[3:5])) results.append(producer.send_messages(self.topic, msgs=msgs[5:8])) results.append(producer.send_messages(topic2, msgs=msgs[8:9])) # No call yet, not enough messages self.assertFalse(client.send_produce_request.called) # Enough messages to start the request results.append(producer.send_messages(self.topic, msgs=msgs[9:10])) # Before the retry, there should be some results self.assertEqual(init_resp[0], self.successResultOf(results[0])) self.assertEqual(init_resp[2], self.successResultOf(results[3])) # Advance the clock clock.advance(producer._retry_interval) # Check the otehr results came in self.assertEqual(next_resp[0], self.successResultOf(results[4])) self.assertEqual(next_resp[1], self.successResultOf(results[2])) self.assertEqual(next_resp[2], self.successResultOf(results[1])) producer.stop()
def test_subscribersListCalls(self): """ test_subscribersListCalls Test that a brokerclient's connSubscribers callbacks are called in the proper order, and that all the deferreds of a previous call are resolved before the next round of calls is done. """ reactor = MemoryReactorClock() callList = [] def c1(c, conn, reason): s = 'c1:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) def c2(c, conn, reason): def c2_cb(_, c, conn, reason): callList.append('c2_cb:{0}'.format(conn)) d = Deferred() d.addCallback(c2_cb, c, conn, reason) reactor.callLater(1.0, d.callback, None) s = 'c2:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) return d def c3(c, conn, reason): s = 'c3:{0}'.format(conn) if reason is not None: s += ':' + reason callList.append(s) def c4(c, conn, reason): callList.append('c4:{0}'.format(conn)) def c5(c, conn, reason): callList.append('c5:{0}'.format(conn)) sublist = [c1, c2, c3] c = KafkaBrokerClient('slc', subscribers=sublist, reactor=reactor) # Trigger the call to the 3 subscribers c._notify(True) self.assertEqual(callList, ['c1:True', 'c2:True', 'c3:True']) callList = [] c._notify(False) # Nothing should be called yet, because the c2_cb # callback hasn't been called yet... self.assertEqual(callList, []) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) self.assertEqual(callList, ['c2_cb:True', 'c1:False', 'c2:False', 'c3:False']) callList = [] reactor.advance(1.0) self.assertEqual(callList, ['c2_cb:False']) callList = [] # Trigger the call to the subscribers c._notify(True, reason='TheReason') c.addSubscriber(c4) self.assertEqual(callList, ['c1:True:TheReason', 'c2:True:TheReason', 'c3:True:TheReason']) callList = [] c._notify(False) self.assertEqual(callList, []) # Add a subscriber after the notify call, but before the advance # and ensure that the new subscriber isn't notified for the event # which occurred before it was added c.addSubscriber(c5) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) self.assertEqual(callList, ['c2_cb:True', 'c1:False', 'c2:False', 'c3:False', 'c4:False']) callList = [] c.delSubscriber(c2) # advance the clock to trigger the callback to c2_cb reactor.advance(1.0) # We should still get the c2_cb:False here... self.assertEqual(callList, ['c2_cb:False']) callList = [] c.delSubscriber(c4) # Trigger the call to the subscribers c._notify(True) reactor.advance(1.0) self.assertEqual(callList, ['c1:True', 'c3:True', 'c5:True']) callList = [] c._notify(False) reactor.advance(1.0) self.assertEqual(callList, ['c1:False', 'c3:False', 'c5:False']) callList = []