Example #1
0
 def test_makeUnconnectedRequest(self):
     """ test_makeUnconnectedRequest
     Ensure that sending a request when not connected will attempt to bring
     up a connection if one isn't already in the process of being brought up
     """
     id1 = 65432
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('testmakeUnconnectedRequest',
                           reactor=reactor)
     request = KafkaCodec.encode_fetch_request(
         'testmakeUnconnectedRequest', id1)
     d = c.makeRequest(id1, request)
     self.assertIsInstance(d, Deferred)
     # Make sure the request shows unsent
     self.assertFalse(c.requests[id1].sent)
     # Make sure a connection was attempted
     self.assertTrue(c.connector)
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     # Bring up the "connection"...
     c.buildProtocol(None)
     # Replace the created proto with a mock
     c.proto = Mock()
     reactor.advance(1.0)
     # Now, we should have seen the 'sendString' called
     c.proto.sendString.assert_called_once_with(request)
Example #2
0
    def test_producer_send_messages_batched(self):
        client = Mock()
        f = Failure(BrokerNotAvailableError())
        ret = [fail(f), succeed([ProduceResponse(self.topic, 0, 0, 10L)])]
        client.send_produce_request.side_effect = ret
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        clock = MemoryReactorClock()
        batch_n = 2

        producer = Producer(client, batch_every_n=batch_n, batch_send=True,
                            clock=clock)
        d = producer.send_messages(self.topic, msgs=msgs)
        # Check the expected request was sent
        msgSet = create_message_set(
            make_send_requests(msgs), producer.codec)
        req = ProduceRequest(self.topic, ANY, msgSet)
        client.send_produce_request.assert_called_once_with(
            [req], acks=producer.req_acks, timeout=producer.ack_timeout,
            fail_on_error=False)
        # At first, there's no result. Have to retry due to first failure
        self.assertNoResult(d)
        clock.advance(producer._retry_interval)
        self.successResultOf(d)

        producer.stop()
Example #3
0
    def test_makeRequest(self):
        id1 = 54321
        id2 = 76543
        reactor = MemoryReactorClock()
        c = KafkaBrokerClient('testmakeRequest', reactor=reactor)
        request = KafkaCodec.encode_fetch_request('testmakeRequest', id1)
        d = c.makeRequest(id1, request)
        eb1 = Mock()
        self.assertIsInstance(d, Deferred)
        d.addErrback(eb1)
        # Make sure the request shows unsent
        self.assertFalse(c.requests[id1].sent)
        # Make sure a connection was attempted
        self.assertTrue(c.connector)
        c.connector.factory = c  # MemoryReactor doesn't make this connection.
        # Bring up the "connection"...
        c.buildProtocol(None)
        # Replace the created proto with a mock
        c.proto = Mock()
        # Advance the clock so sendQueued() will be called
        reactor.advance(1.0)
        # The proto should have be asked to sendString the request
        c.proto.sendString.assert_called_once_with(request)

        # now call with 'expectReply=False'
        c.proto = Mock()
        request = KafkaCodec.encode_fetch_request('testmakeRequest2', id2)
        d2 = c.makeRequest(id2, request, expectResponse=False)
        self.assertIsInstance(d2, Deferred)
        c.proto.sendString.assert_called_once_with(request)

        # Now close the KafkaBrokerClient
        c.close()
        fail1 = eb1.call_args[0][0]  # The actual failure sent to errback
        self.assertTrue(fail1.check(CancelledError))
Example #4
0
    def test_producer_send_messages_unknown_topic(self):
        client = Mock()
        ds = [Deferred() for _ in range(Producer.DEFAULT_REQ_ATTEMPTS)]
        clock = MemoryReactorClock()
        client.load_metadata_for_topics.side_effect = ds
        client.metadata_error_for_topic.return_value = 3
        client.topic_partitions = {}
        msgs = [self.msg("one"), self.msg("two")]
        ack_timeout = 5

        producer = Producer(client, ack_timeout=ack_timeout, clock=clock)
        d = producer.send_messages(self.topic, msgs=msgs)
        # d is waiting on result from ds[0] for load_metadata_for_topics
        self.assertNoResult(d)

        # fire it with client still reporting no metadata for topic
        # The producer will retry the lookup DEFAULT_REQ_ATTEMPTS times...
        for i in range(Producer.DEFAULT_REQ_ATTEMPTS):
            ds[i].callback(None)
            # And then wait producer._retry_interval for a call back...
            clock.advance(producer._retry_interval + 0.01)
        self.failureResultOf(d, UnknownTopicOrPartitionError)
        self.assertFalse(client.send_produce_request.called)

        producer.stop()
Example #5
0
    def test_producer_send_timer_failed(self):
        """test_producer_send_timer_failed
        Test that the looping call is restarted when _send_batch errs
        Somewhat artificial test to confirm that when failures occur in
        _send_batch (which cause the looping call to terminate) that the
        looping call is restarted.
        """
        client = Mock()
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        batch_t = 5
        clock = MemoryReactorClock()

        with patch.object(aProducer, 'log') as klog:
            producer = Producer(client, batch_send=True, batch_every_t=batch_t,
                                clock=clock)
            msgs = [self.msg("one"), self.msg("two")]
            d = producer.send_messages(self.topic, msgs=msgs)
            # Check no request was yet sent
            self.assertFalse(client.send_produce_request.called)
            # Patch Producer's Deferred to throw an exception
            with patch.object(aProducer, 'Deferred') as d:
                d.side_effect = ValueError(
                    "test_producer_send_timer_failed induced failure")
                # Advance the clock
                clock.advance(batch_t)
            # Check the expected message was logged by the looping call restart
            klog.warning.assert_called_once_with('_send_timer_failed:%r: %s',
                                                 ANY, ANY)
        # Check that the looping call was restarted
        self.assertTrue(producer.sendLooper.running)

        producer.stop()
Example #6
0
    def _cancelConnectTimeoutTest(self, connect):
        """
        Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
        cancelled after the connection is set up but before it is fired with the
        resulting protocol instance.
        """
        reactor = MemoryReactorClock()
        cc = ClientCreator(reactor, Protocol)
        d = connect(reactor, cc)
        connector = reactor.connectors.pop()
        # Sanity check - there is an outstanding delayed call to fire the
        # Deferred.
        self.assertEqual(len(reactor.getDelayedCalls()), 1)

        # Cancel the Deferred, disconnecting the transport just set up and
        # cancelling the delayed call.
        d.cancel()

        self.assertEqual(reactor.getDelayedCalls(), [])

        # A real connector implementation is responsible for disconnecting the
        # transport as well.  For our purposes, just check that someone told the
        # connector to disconnect.
        self.assertTrue(connector._disconnected)

        return self.assertFailure(d, CancelledError)
Example #7
0
 def test_connect(self):
     reactor = MemoryReactorClock()
     reactor.running = True
     c = KafkaBrokerClient('test_connect', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     # Let's pretend we've connected, which will schedule the firing
     c.buildProtocol(None)
     reactor.advance(1.0)
Example #8
0
 def test_connectNotify(self):
     from afkak.protocol import KafkaProtocol
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_connectNotify', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     proto = c.buildProtocol(None)
     self.assertIsInstance(proto, KafkaProtocol)
     reactor.advance(1.0)
     self.assertFalse(c.clock.getDelayedCalls())
Example #9
0
 def test_closeNotifyDuringConnect(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_closeNotify', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     reactor.advance(1.0)
     self.assertFalse(c.clock.getDelayedCalls())
     c.close()
     c.clientConnectionFailed(c.connector, Failure(UserError()))
     reactor.advance(1.0)
     self.assertFalse(c.clock.getDelayedCalls())
Example #10
0
    def test_producer_send_messages_batched_fail(self):
        client = Mock()
        ret = [Deferred(), Deferred(), Deferred()]
        client.send_produce_request.side_effect = ret
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        batch_t = 5
        clock = MemoryReactorClock()

        producer = Producer(client, batch_every_t=batch_t, batch_send=True,
                            clock=clock, max_req_attempts=3)
        # Advance the clock to ensure when no messages to send no error
        clock.advance(batch_t)
        d = producer.send_messages(self.topic, msgs=msgs)
        # Check no request was yet sent
        self.assertFalse(client.send_produce_request.called)
        # Advance the clock
        clock.advance(batch_t)
        # Check the expected request was sent
        msgSet = create_message_set(
            make_send_requests(msgs), producer.codec)
        req = ProduceRequest(self.topic, 0, msgSet)
        produce_request_call = call([req], acks=producer.req_acks,
                                    timeout=producer.ack_timeout,
                                    fail_on_error=False)
        produce_request_calls = [produce_request_call]
        client.send_produce_request.assert_has_calls(produce_request_calls)
        self.assertNoResult(d)
        # Fire the failure from the first request to the client
        ret[0].errback(OffsetOutOfRangeError(
            'test_producer_send_messages_batched_fail'))
        # Still no result, producer should retry first
        self.assertNoResult(d)
        # Check retry wasn't immediate
        self.assertEqual(client.send_produce_request.call_count, 1)
        # Advance the clock by the retry delay
        clock.advance(producer._retry_interval)
        # Check 2nd send_produce_request (1st retry) was sent
        produce_request_calls.append(produce_request_call)
        client.send_produce_request.assert_has_calls(produce_request_calls)
        # Fire the failure from the 2nd request to the client
        ret[1].errback(BrokerNotAvailableError(
            'test_producer_send_messages_batched_fail_2'))
        # Still no result, producer should retry one more time
        self.assertNoResult(d)
        # Advance the clock by the retry delay
        clock.advance(producer._retry_interval * 1.1)
        # Check 3nd send_produce_request (2st retry) was sent
        produce_request_calls.append(produce_request_call)
        client.send_produce_request.assert_has_calls(produce_request_calls)
        # Fire the failure from the 2nd request to the client
        ret[2].errback(LeaderNotAvailableError(
            'test_producer_send_messages_batched_fail_3'))

        self.failureResultOf(d, LeaderNotAvailableError)

        producer.stop()
    def test_converge_complete(self):
        """
        At the end of a convergence iteration, ``_CONVERGE_COMPLETE`` is updated
        to the current time.
        """
        interval = 45

        reactor = MemoryReactorClock()

        deploy_config = DeploymentConfiguration(
            domain=u"s4.example.com",
            kubernetes_namespace=u"testing",
            subscription_manager_endpoint=URL.from_text(u"http://localhost:8000"),
            s3_access_key_id=u"access key id",
            s3_secret_key=u"secret key",
            introducer_image=u"introducer:abcdefgh",
            storageserver_image=u"storageserver:abcdefgh",
        )

        state_path = FilePath(self.mktemp().decode("ascii"))
        state_path.makedirs()
        subscription_client = memory_client(
            state_path,
            deploy_config.domain,
        )
        k8s_client = KubeClient(k8s=memory_kubernetes().client())
        aws_region = FakeAWSServiceRegion(
            access_key=deploy_config.s3_access_key_id,
            secret_key=deploy_config.s3_secret_key,
        )
        d = aws_region.get_route53_client().create_hosted_zone(
            u"foo", deploy_config.domain,
        )
        self.successResultOf(d)

        service = _convergence_service(
            reactor,
            interval,
            deploy_config,
            subscription_client,
            k8s_client,
            aws_region,
        )
        service.startService()
        reactor.advance(interval)
        last_completed = next(iter(list(
            metric.samples[-1][-1]
            for metric
            in REGISTRY.collect()
            if metric.name == u"s4_last_convergence_succeeded"
        )))
        self.assertThat(reactor.seconds(), Equals(last_completed))
Example #12
0
 def test_closeNotify(self):
     from twisted.internet.error import ConnectionDone
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_closeNotify', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     c.buildProtocol(None)
     reactor.advance(1.0)
     self.assertFalse(c.clock.getDelayedCalls())
     c.continueTrying = False
     c.close()
     c.clientConnectionLost(c.connector, Failure(ConnectionDone()))
     reactor.advance(1.0)
     self.assertFalse(c.clock.getDelayedCalls())
Example #13
0
    def test_transfer(self):
        """
        An attempt is made to transfer the zone for the domain the
        L{SecondaryAuthority} was constructed with from the server address it
        was constructed with when L{SecondaryAuthority.transfer} is called.
        """
        secondary = SecondaryAuthority.fromServerAddressAndDomain(
            ('192.168.1.2', 1234), 'example.com')
        secondary._reactor = reactor = MemoryReactorClock()

        secondary.transfer()

        # Verify a connection attempt to the server address above
        host, port, factory, timeout, bindAddress = reactor.tcpClients.pop(0)
        self.assertEqual(host, '192.168.1.2')
        self.assertEqual(port, 1234)

        # See if a zone transfer query is issued.
        proto = factory.buildProtocol((host, port))
        transport = StringTransport()
        proto.makeConnection(transport)

        msg = Message()
        # DNSProtocol.writeMessage length encodes the message by prepending a
        # 2 byte message length to the buffered value.
        msg.decode(BytesIO(transport.value()[2:]))

        self.assertEqual(
            [dns.Query('example.com', dns.AXFR, dns.IN)], msg.queries)
Example #14
0
 def setUp(self):
     self.factory = BGPPeering(self.testASN, peerAddr='127.0.0.2')
     self.factory.reactor = MemoryReactorClock()
     # Mock factory.log and fsm.FSM.log for less noisy output
     self.factory.log = mock.Mock()
     self._log_patcher = mock.patch.object(fsm.FSM, 'log')
     self._log_patcher.start()
Example #15
0
    def setUp(self):
        self.testPeers = {'127.0.0.2': mock.Mock(spec=BGPPeering)}
        self.factory = BGPServerFactory(self.testPeers, self.testASN)
        self.factory.reactor = MemoryReactorClock()

        self._log_patcher = mock.patch.object(bgp.BGPFactory, 'log')
        self._log_patcher.start()
Example #16
0
    def test_producer_send_messages_bad_response(self):
        first_part = 68
        client = Mock(reactor=MemoryReactorClock())
        ret = Deferred()
        client.send_produce_request.return_value = ret
        client.topic_partitions = {self.topic: [first_part, 101, 102, 103]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        ack_timeout = 5

        producer = Producer(client, ack_timeout=ack_timeout)
        d = producer.send_messages(self.topic, msgs=msgs)
        # Check the expected request was sent
        msgSet = create_message_set(make_send_requests(msgs), producer.codec)
        req = ProduceRequest(self.topic, first_part, msgSet)
        client.send_produce_request.assert_called_once_with(
            [req],
            acks=producer.req_acks,
            timeout=ack_timeout,
            fail_on_error=False)
        # Check results when "response" fires
        self.assertNoResult(d)
        ret.callback([])
        self.failureResultOf(d, NoResponseError)
        producer.stop()
 def test_close(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_close', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     c.connector.state = 'connected'  # set the connector to connected state
     dd = c.close()
     self.assertIsInstance(dd, Deferred)
     self.assertNoResult(dd)
     f = Failure(ConnectionDone('test_close'))
     c.clientConnectionLost(c.connector, f)
     self.assertNoResult(dd)
     # Advance the clock so the notify() call fires
     reactor.advance(0.1)
     r = self.successResultOf(dd)
     self.assertIs(r, None)
 def test_closeNotConnected(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_closeNotConnected', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     d = c.close()
     self.assertIsInstance(d, Deferred)
Example #19
0
    def test_producer_cancel_one_request_getting_topic(self):
        # Test cancelling a request after it's begun to be processed
        client = Mock(reactor=MemoryReactorClock())
        client.topic_partitions = {}
        ret = Deferred()
        client.load_metadata_for_topics.return_value = ret
        msgs = [self.msg("one"), self.msg("two")]
        msgs2 = [self.msg("three"), self.msg("four")]
        batch_n = 4

        producer = Producer(client, batch_every_n=batch_n, batch_send=True)
        d1 = producer.send_messages(self.topic, msgs=msgs)
        # Check that no request was sent
        self.assertFalse(client.send_produce_request.called)
        # This will trigger the metadata lookup
        d2 = producer.send_messages(self.topic, msgs=msgs2)
        d1.cancel()
        self.failureResultOf(d1, CancelledError)
        # Check that still no request was sent
        self.assertFalse(client.send_produce_request.called)
        self.assertNoResult(d2)
        # Setup the client's topics and trigger the metadata deferred
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        ret.callback(None)
        # Expect that only the msgs2 messages were sent
        msgSet = create_message_set(make_send_requests(msgs2), producer.codec)
        req = ProduceRequest(self.topic, 1, msgSet)
        client.send_produce_request.assert_called_once_with(
            [req],
            acks=producer.req_acks,
            timeout=producer.ack_timeout,
            fail_on_error=False)

        producer.stop()
Example #20
0
    def test_producer_send_messages_keyed_same_partition(self):
        """test_producer_send_messages_keyed_same_partition
        Test that messages sent with a key are actually sent with that key,
        even if they go to the same topic/partition (batching preserves keys)
        """
        first_part = 43
        second_part = 55
        client = Mock(reactor=MemoryReactorClock())
        ret1 = Deferred()
        client.send_produce_request.side_effect = [ret1]
        client.topic_partitions = {self.topic: [first_part, second_part]}
        client.metadata_error_for_topic.return_value = False
        msgs1 = [self.msg("one"), self.msg("two")]
        msgs2 = [self.msg("odd_man_out")]
        msgs3 = [self.msg("three"), self.msg("four")]
        key1 = b'99'
        key3 = b'foo'
        ack_timeout = 5

        # Even though we're sending keyed messages, we use the default
        # round-robin partitioner, since the requests are easier to predict
        producer = Producer(client,
                            ack_timeout=ack_timeout,
                            batch_send=True,
                            batch_every_n=4)
        d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)
        d2 = producer.send_messages(self.topic, msgs=msgs2)
        d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)
        # Check the expected request was sent
        msgSet1 = create_message_set([
            make_send_requests(msgs1, key=key1)[0],
            make_send_requests(msgs3, key=key3)[0]
        ], producer.codec)
        msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)
        req1 = ProduceRequest(self.topic, first_part, msgSet1)
        req2 = ProduceRequest(self.topic, second_part, msgSet2)
        # Annoying, but order of requests is indeterminate...
        client.send_produce_request.assert_called_once_with(
            ANY,
            acks=producer.req_acks,
            timeout=ack_timeout,
            fail_on_error=False)
        self.assertEqual(sorted([req1, req2]),
                         sorted(client.send_produce_request.call_args[0][0]))
        # Check results when "response" fires
        self.assertNoResult(d1)
        self.assertNoResult(d2)
        self.assertNoResult(d3)
        resp = [
            ProduceResponse(self.topic, first_part, 0, 10),
            ProduceResponse(self.topic, second_part, 0, 23)
        ]
        ret1.callback(resp)
        result = self.successResultOf(d1)
        self.assertEqual(result, resp[0])
        result = self.successResultOf(d2)
        self.assertEqual(result, resp[1])
        result = self.successResultOf(d3)
        self.assertEqual(result, resp[0])
        producer.stop()
Example #21
0
    def test_stopTryingWhenConnected(self):
        """
        If a L{KafkaBrokerClient} has C{stopTrying} called while it is
        connected, it does not subsequently attempt to reconnect if the
        connection is later lost.
        """
        reactor = MemoryReactorClock()

        class NoConnectConnector(object):
            def stopConnecting(self):
                raise ClientError("Shouldn't be called, "
                                  "we're connected.")  # pragma: no cover

            def connect(self):
                raise ClientError(
                    "Shouldn't be reconnecting.")  # pragma: no cover

        c = KafkaBrokerClient(reactor, 'broker', 9092, 'clientId')
        c.protocol = Protocol
        # Let's pretend we've connected:
        c.buildProtocol(None)
        # Now we stop trying, then disconnect:
        c.stopTrying()
        c.clientConnectionLost(NoConnectConnector(), Failure(ConnectionDone()))
        self.assertFalse(c.continueTrying)
Example #22
0
 def prepare(self, reactor, clock, hs):
     self.clock = MemoryReactorClock()
     self.hs_clock = Clock(self.clock)
     self.url = "/_matrix/client/r0/register"
     self.registration_handler = Mock()
     self.auth_handler = Mock()
     self.device_handler = Mock()
 def test_makeRequest_after_close(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_closeNotConnected', reactor=reactor)
     d = c.close()
     self.assertIsInstance(d, Deferred)
     d2 = c.makeRequest(1, 'fake request')
     self.successResultOf(self.failUnlessFailure(d2, ClientError))
Example #24
0
 def test_close(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('test_close', reactor=reactor)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     c.connector.state = 'connected'  # set the connector to connected state
     dd = c.close()
     self.assertIsInstance(dd, Deferred)
     self.assertNoResult(dd)
     f = Failure(ConnectionDone('test_close'))
     c.clientConnectionLost(c.connector, f)
     self.assertNoResult(dd)
     # Advance the clock so the notify() call fires
     reactor.advance(0.1)
     r = self.successResultOf(dd)
     self.assertIs(r, None)
Example #25
0
 def setUp(self):
     self.deployer = ControllableDeployer(u"127.0.0.1", [], [])
     self.reactor = MemoryReactorClock()
     self.service = AgentLoopService(reactor=self.reactor,
                                     deployer=self.deployer,
                                     host=u"example.com",
                                     port=1234,
                                     context_factory=ClientContextFactory())
Example #26
0
 def test_repr(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor,
                           'kafka.example.com',
                           9092,
                           clientId='MyClient')
     self.assertEqual(("<KafkaBrokerClient kafka.example.com:9092 "
                       "clientId='MyClient' unconnected>"), repr(c))
Example #27
0
 def test_reconnect(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor, 'test_reconnect', 9092, 'clientId')
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     dd = c.close()
     self.assertIsInstance(dd, Deferred)
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
Example #28
0
    def setUp(self):
        self.factory = BGPFactory()
        self.factory.reactor = MemoryReactorClock()
        self.factory.protocol = BGP
        self.factory.myASN = 64600

        # Mock factory.log for less noisy output
        self._log_patcher = mock.patch.object(bgp.BGPFactory, 'log')
        self._log_patcher.start()
Example #29
0
 def test_list_logging(self, logger):
     """
     ``_NetworkClient.list`` logs an Eliot event describing its given type.
     """
     client = network_kubernetes(
         base_url=URL.fromText(u"http://127.0.0.1/"),
         agent=Agent(MemoryReactorClock()),
     ).client()
     client.list(v1.Pod)
Example #30
0
 def test_closeNotifyDuringConnect(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor, 'test_closeNotify', 9092, 'clientId')
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     reactor.advance(1.0)
     self.assertEqual([], reactor.getDelayedCalls())
     c.close()
     c.clientConnectionFailed(c.connector, Failure(UserError()))
     reactor.advance(1.0)
     self.assertEqual([], reactor.getDelayedCalls())
Example #31
0
    def test_default_os_environ(self):
        """
        If no value is passed for the ``environ`` parameter then
        ``network_kubernetes_from_context`` uses ``os.environ`` to look up the
        possible value of ``KUBECONFIG``.
        """
        key_path, cert_path = self_signed_certificate_paths(
            FilePath(self.mktemp()),
            FilePath(self.mktemp()),
            u"x.invalid",
        )
        userauth = {
            "client-certificate": cert_path.path,
            "client-key": key_path.path,
        }
        config = FilePath(self.mktemp())
        yaml = safe_dump({
            "apiVersion":
            "v1",
            "kind":
            "Config",
            "contexts": [
                {
                    "name": "a",
                    "context": {
                        "cluster": "a",
                        "user": "******"
                    }
                },
            ],
            "clusters": [{
                "name": "a",
                "cluster": {
                    "server": "https://a.example.com/",
                    "certificate-authority": cert_path.path,
                },
            }],
            "users": [
                {
                    "name": "a",
                    "user": userauth
                },
            ],
        })
        config.setContent(native_string_to_bytes(yaml))
        import os
        self.patch(os, "environ", {u"KUBECONFIG": config.path})

        kubernetes = network_kubernetes_from_context(
            MemoryReactorClock(),
            context=u"a",
        )
        self.assertEqual(
            "https://a.example.com/",
            kubernetes.base_url.asText(),
        )
Example #32
0
    def test_default_config_path(self):
        """
        When ``network_kubernetes_from_context`` does not find ``KUBECONFIG`` in
        the environment it uses ``default_config_path`` as the path to the
        configuration file.
        """
        key_path, cert_path = self_signed_certificate_paths(
            FilePath(self.mktemp()),
            FilePath(self.mktemp()),
            u"x.invalid",
        )
        userauth = {
            "client-certificate": cert_path.path,
            "client-key": key_path.path,
        }
        config = FilePath(self.mktemp())
        yaml = safe_dump({
            "apiVersion":
            "v1",
            "kind":
            "Config",
            "contexts": [
                {
                    "name": "a",
                    "context": {
                        "cluster": "a",
                        "user": "******"
                    }
                },
            ],
            "clusters": [{
                "name": "a",
                "cluster": {
                    "server": "https://a.example.com/",
                    "certificate-authority": cert_path.path,
                },
            }],
            "users": [
                {
                    "name": "a",
                    "user": userauth
                },
            ],
        })
        config.setContent(native_string_to_bytes(yaml))

        kubernetes = network_kubernetes_from_context(
            MemoryReactorClock(),
            context=u"a",
            environ={},
            default_config_path=config,
        )
        self.assertEqual(
            "https://a.example.com/",
            kubernetes.base_url.asText(),
        )
Example #33
0
 def test_close_disconnected(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor, 'test_close', 9092, 'clientId')
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     c.connector.state = 'disconnected'  # set connector's state for test
     dd = c.close()
     self.assertIsInstance(dd, Deferred)
     r = self.successResultOf(dd)
     self.assertIs(r, None)
Example #34
0
 def test_disconnect(self):
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor, 'test_close', 9092, 'clientId')
     c._connect()  # Force a connection attempt
     conn = c.connector
     conn.factory = c  # MemoryReactor doesn't make this connection.
     conn.state = 'connected'  # set the connector to connected state
     self.assertIs(conn._disconnected, False)
     c.disconnect()
     self.assertIs(conn._disconnected, True)
Example #35
0
 def setUp(self):
     super().setUp()
     self.manager = self.create_peer('testnet')
     self.manager.allow_mining_without_peers()
     self.factory = StratumFactory(self.manager, port=8123, reactor=MemoryReactorClock())
     self.factory.start()
     self.protocol = self.factory.buildProtocol('127.0.0.1')
     self.transport = StringTransportWithDisconnection()
     self.transport.protocol = self.protocol
     self.protocol.makeConnection(self.transport)
Example #36
0
 def prepare(self, reactor, clock, hs):
     self.clock = MemoryReactorClock()
     self.hs_clock = Clock(self.clock)
     self.url = "/_matrix/client/r0/register"
     self.registration_handler = Mock()
     self.auth_handler = Mock()
     self.device_handler = Mock()
     hs.config.enable_registration = True
     hs.config.registrations_require_3pid = []
     hs.config.auto_join_rooms = []
     hs.config.enable_registration_captcha = False
Example #37
0
 def __init__(self, rootResource):
     """
     :param rootResource: The Twisted `IResource` at the root of the
         resource tree.
     """
     self._memoryReactor = MemoryReactorClock()
     self._realAgent = Agent.usingEndpointFactory(
         reactor=self._memoryReactor,
         endpointFactory=_EndpointFactory(self._memoryReactor))
     self._rootResource = rootResource
     self._pumps = set()
Example #38
0
    def test_producer_send_timer_stopped_error(self):
        # Purely for coverage
        client = Mock(reactor=MemoryReactorClock())
        producer = Producer(client, batch_send=True)
        with patch.object(aProducer, 'log') as klog:
            producer._send_timer_stopped('Borg')
            klog.warning.assert_called_once_with(
                'commitTimerStopped with wrong timer:%s not:%s', 'Borg',
                producer._sendLooper)

        producer.stop()
Example #39
0
 def testOnFailure(self):
     treq_stub = StubTreq(_NonResponsiveTestResource())
     with patch.object(self.observer, 'reactor', MemoryReactorClock()):
         with patch.object(self.observer, 'client', treq_stub):
             self.observer.startObserving()
             self.assertTrue(self.observer.loop.running)
             with patch.object(self.observer,
                               'startObserving') as mockStartObserving:
                 self.observer.reactor.advance(self.observer.timeout)
                 self.assertFalse(self.observer.loop.running)
                 mockStartObserving.assert_called_once_with(now=False)
Example #40
0
 def test_missing_kubeconfig(self):
     """
     When ``network_kubernetes_from_context`` is given no value for
     ``default_config_path`` it uses ``~/.kube/config`` as the value for
     that parameter.
     """
     callargs = getcallargs(network_kubernetes_from_context,
                            MemoryReactorClock())
     self.assertEqual(
         FilePath(expanduser("~/.kube/config")),
         callargs["default_config_path"],
     )
Example #41
0
 def test_producer_init_batch(self):
     producer = Producer(Mock(reactor=MemoryReactorClock()),
                         batch_send=True)
     looper = producer._sendLooper
     self.assertEqual(type(looper), LoopingCall)
     self.assertTrue(looper.running)
     producer.stop()
     self.assertFalse(looper.running)
     self.assertEqual(
         producer.__repr__(),
         "<Producer <class 'afkak.partitioner.RoundRobinPartitioner'>:"
         "10cnt/32768bytes/30secs:1:1000>")
Example #42
0
    def test_producer_stop_during_request(self):
        # Test stopping producer while it's waiting for reply from client
        client = Mock()
        f = Failure(BrokerNotAvailableError())
        ret = [fail(f), Deferred()]
        client.send_produce_request.side_effect = ret
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        clock = MemoryReactorClock()
        batch_n = 2

        producer = Producer(client, batch_every_n=batch_n, batch_send=True,
                            clock=clock)
        d = producer.send_messages(self.topic, msgs=msgs)
        # At first, there's no result. Have to retry due to first failure
        self.assertNoResult(d)
        clock.advance(producer._retry_interval)

        producer.stop()
        self.failureResultOf(d, tid_CancelledError)
Example #43
0
 def test_closeNotify(self):
     from twisted.internet.error import ConnectionDone
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient(reactor, 'test_closeNotify', 9092, 'clientId')
     c._connect()  # Force a connection attempt
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     c.buildProtocol(None)
     reactor.advance(1.0)
     self.assertEqual([], reactor.getDelayedCalls())
     c.continueTrying = False
     c.close()
     c.clientConnectionLost(c.connector, Failure(ConnectionDone()))
     reactor.advance(1.0)
     self.assertEqual([], reactor.getDelayedCalls())
Example #44
0
    def test_producer_stop_during_request(self):
        """
        Test stopping producer while it's waiting for reply from client
        """
        clock = MemoryReactorClock()
        client = Mock(reactor=clock)
        f = Failure(BrokerNotAvailableError())
        ret = [fail(f), Deferred()]
        client.send_produce_request.side_effect = ret
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        batch_n = 2

        producer = Producer(client, batch_every_n=batch_n, batch_send=True)
        d = producer.send_messages(self.topic, msgs=msgs)
        # At first, there's no result. Have to retry due to first failure
        self.assertNoResult(d)
        clock.advance(producer._retry_interval)

        producer.stop()
        self.failureResultOf(d, tid_CancelledError)
Example #45
0
    def test_producer_cancel_getting_topic(self):
        # Test cancelling while waiting to retry getting metadata
        clock = MemoryReactorClock()
        client = Mock()
        client.topic_partitions = {}  # start with no metadata
        rets = [Deferred(), Deferred()]
        client.load_metadata_for_topics.side_effect = rets
        msgs = [self.msg("one"), self.msg("two")]

        producer = Producer(client, clock=clock)
        d1 = producer.send_messages(self.topic, msgs=msgs)
        # Check that no request was sent
        self.assertFalse(client.send_produce_request.called)
        # Fire the result of load_metadata_for_topics, but
        # metadata_error_for_topic is still True, so it'll retry after delay
        # Advance the clock, some, but not enough to retry
        rets[0].callback(None)
        # Advance to partway thru the delay
        clock.advance(producer._retry_interval / 2)

        # Cancel the request and ake sure we got the CancelledError
        d1.cancel()
        self.failureResultOf(d1, CancelledError)
        # Check that still no request was sent
        self.assertFalse(client.send_produce_request.called)

        # Setup the client's topics and trigger the metadata deferred
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        rets[1].callback(None)
        # Check that still no request was sent
        self.assertFalse(client.send_produce_request.called)
        # Advance the clock again to complete the delay
        clock.advance(producer._retry_interval)
        # Make sure the retry got reset
        self.assertEqual(producer._retry_interval,
                         producer._init_retry_interval)
        producer.stop()
Example #46
0
    def _cancelConnectFailedTimeoutTest(self, connect):
        """
        Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
        cancelled after the connection attempt has failed but before it is fired
        with the resulting failure.
        """
        reactor = MemoryReactorClock()
        cc = ClientCreator(reactor, Protocol)
        d, factory = connect(reactor, cc)
        connector = reactor.connectors.pop()
        factory.clientConnectionFailed(
            connector, Failure(Exception("Simulated failure")))

        # Sanity check - there is an outstanding delayed call to fire the
        # Deferred.
        self.assertEqual(len(reactor.getDelayedCalls()), 1)

        # Cancel the Deferred, cancelling the delayed call.
        d.cancel()

        self.assertEqual(reactor.getDelayedCalls(), [])

        return self.assertFailure(d, CancelledError)
Example #47
0
    def test_makeRequest_fails(self):
        id1 = 15432
        reactor = MemoryReactorClock()
        c = KafkaBrokerClient('testmakeRequest', reactor=reactor)
        request = KafkaCodec.encode_fetch_request('testmakeRequest', id1)
        d = c.makeRequest(id1, request)
        eb1 = Mock()
        self.assertIsInstance(d, Deferred)
        d.addErrback(eb1)
        c.connector.factory = c  # MemoryReactor doesn't make this connection.
        # Bring up the "connection"...
        c.buildProtocol(None)
        # Replace the created proto with a mock
        c.proto = Mock()
        c.proto.sendString.side_effect = StringTooLongError(
            "Tried to send too many bytes")
        # Advance the clock so sendQueued() will be called
        reactor.advance(1.0)
        # The proto should have be asked to sendString the request
        c.proto.sendString.assert_called_once_with(request)

        # Now close the KafkaBrokerClient
        c.close()
Example #48
0
    def test_producer_stop_waiting_to_retry(self):
        """
        Test stopping producer while it's waiting to retry a request
        """
        clock = MemoryReactorClock()
        client = Mock(reactor=clock)
        f = Failure(BrokerNotAvailableError())
        ret = [fail(f)]
        client.send_produce_request.side_effect = ret
        client.topic_partitions = {self.topic: [0, 1, 2, 3]}
        client.metadata_error_for_topic.return_value = False
        msgs = [self.msg("one"), self.msg("two")]
        batch_n = 2

        producer = Producer(client, batch_every_n=batch_n, batch_send=True)
        d = producer.send_messages(self.topic, msgs=msgs)
        # At first, there's no result. Have to retry due to first failure
        self.assertNoResult(d)
        # Advance the clock, some, but not enough to retry
        clock.advance(producer._retry_interval / 2)
        # Stop the producer before the retry
        producer.stop()
        self.failureResultOf(d, tid_CancelledError)
Example #49
0
 def test_requestsRetried(self):
     id1 = 65432
     reactor = MemoryReactorClock()
     c = KafkaBrokerClient('testrequestsRetried',
                           reactor=reactor)
     request = KafkaCodec.encode_fetch_request(
         'testrequestsRetried', id1)
     c.makeRequest(id1, request)
     # Make sure the request shows unsent
     self.assertFalse(c.requests[id1].sent)
     c.connector.factory = c  # MemoryReactor doesn't make this connection.
     # Bring up the "connection"...
     c.buildProtocol(None)
     # Replace the created proto with a mock
     c.proto = Mock()
     reactor.advance(0.1)
     # Now, we should have seen the 'sendString' called
     c.proto.sendString.assert_called_once_with(request)
     # And the request should be 'sent'
     self.assertTrue(c.requests[id1].sent)
     # Before the reply 'comes back' drop the connection
     from twisted.internet.main import CONNECTION_LOST
     c.clientConnectionLost(c.connector, Failure(CONNECTION_LOST))
     # Make sure the proto was reset
     self.assertIs(c.proto, None)
     # Advance the clock again
     reactor.advance(0.1)
     # Make sure the request shows unsent
     self.assertFalse(c.requests[id1].sent)
     # Bring up the "connection"...
     c.buildProtocol(None)
     # Replace the created proto with a mock
     c.proto = Mock()
     reactor.advance(0.1)
     # Now, we should have seen the 'sendString' called
     c.proto.sendString.assert_called_once_with(request)
     # And the request should be 'sent'
     self.assertTrue(c.requests[id1].sent)
Example #50
0
 def setUp(self):
     self.reactor = MemoryReactorClock()
     self.hs_clock = Clock(self.reactor)
     self.homeserver = setup_test_homeserver(
         self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
     )
Example #51
0
class JsonResourceTests(unittest.TestCase):
    def setUp(self):
        self.reactor = MemoryReactorClock()
        self.hs_clock = Clock(self.reactor)
        self.homeserver = setup_test_homeserver(
            self.addCleanup, http_client=None, clock=self.hs_clock, reactor=self.reactor
        )

    def test_handler_for_request(self):
        """
        JsonResource.handler_for_request gives correctly decoded URL args to
        the callback, while Twisted will give the raw bytes of URL query
        arguments.
        """
        got_kwargs = {}

        def _callback(request, **kwargs):
            got_kwargs.update(kwargs)
            return (200, kwargs)

        res = JsonResource(self.homeserver)
        res.register_paths(
            "GET", [re.compile("^/_matrix/foo/(?P<room_id>[^/]*)$")], _callback
        )

        request, channel = make_request(
            self.reactor, b"GET", b"/_matrix/foo/%E2%98%83?a=%E2%98%83"
        )
        render(request, res, self.reactor)

        self.assertEqual(request.args, {b'a': [u"\N{SNOWMAN}".encode('utf8')]})
        self.assertEqual(got_kwargs, {u"room_id": u"\N{SNOWMAN}"})

    def test_callback_direct_exception(self):
        """
        If the web callback raises an uncaught exception, it will be translated
        into a 500.
        """

        def _callback(request, **kwargs):
            raise Exception("boo")

        res = JsonResource(self.homeserver)
        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)

        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
        render(request, res, self.reactor)

        self.assertEqual(channel.result["code"], b'500')

    def test_callback_indirect_exception(self):
        """
        If the web callback raises an uncaught exception in a Deferred, it will
        be translated into a 500.
        """

        def _throw(*args):
            raise Exception("boo")

        def _callback(request, **kwargs):
            d = Deferred()
            d.addCallback(_throw)
            self.reactor.callLater(1, d.callback, True)
            return make_deferred_yieldable(d)

        res = JsonResource(self.homeserver)
        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)

        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
        render(request, res, self.reactor)

        self.assertEqual(channel.result["code"], b'500')

    def test_callback_synapseerror(self):
        """
        If the web callback raises a SynapseError, it returns the appropriate
        status code and message set in it.
        """

        def _callback(request, **kwargs):
            raise SynapseError(403, "Forbidden!!one!", Codes.FORBIDDEN)

        res = JsonResource(self.homeserver)
        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)

        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foo")
        render(request, res, self.reactor)

        self.assertEqual(channel.result["code"], b'403')
        self.assertEqual(channel.json_body["error"], "Forbidden!!one!")
        self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN")

    def test_no_handler(self):
        """
        If there is no handler to process the request, Synapse will return 400.
        """

        def _callback(request, **kwargs):
            """
            Not ever actually called!
            """
            self.fail("shouldn't ever get here")

        res = JsonResource(self.homeserver)
        res.register_paths("GET", [re.compile("^/_matrix/foo$")], _callback)

        request, channel = make_request(self.reactor, b"GET", b"/_matrix/foobar")
        render(request, res, self.reactor)

        self.assertEqual(channel.result["code"], b'400')
        self.assertEqual(channel.json_body["error"], "Unrecognized request")
        self.assertEqual(channel.json_body["errcode"], "M_UNRECOGNIZED")
Example #52
0
    def test_producer_send_messages_batched_partial_success(self):
        """test_producer_send_messages_batched_partial_success
        This tests the complexity of the error handling for a single batch
        request.
        Scenario: The producer's caller sends 5 requests to two (total) topics
                  The client's metadata is such that the producer will produce
                    requests to post msgs to 5 separate topic/partition tuples
                  The batch size is reached, so the producer sends the request
                  The caller then cancels one of the requests
                  The (mock) client returns partial success in the form of a
                    FailedPayloadsError.
                  The Producer then should return the successful results and
                    retry the failed.
                  The (mock) client then "succeeds" the remaining results.
        """
        client = Mock()
        topic2 = 'tpsmbps_two'
        client.topic_partitions = {self.topic: [0, 1, 2, 3], topic2: [4, 5, 6]}
        client.metadata_error_for_topic.return_value = False

        init_resp = [ProduceResponse(self.topic, 0, 0, 10L),
                     ProduceResponse(self.topic, 1, 6, 20L),
                     ProduceResponse(topic2, 5, 0, 30L),
                     ]
        next_resp = [ProduceResponse(self.topic, 2, 0, 10L),
                     ProduceResponse(self.topic, 1, 0, 20L),
                     ProduceResponse(topic2, 4, 0, 30L),
                     ]
        failed_payloads = [(ProduceRequest(self.topic, ANY, ANY),
                            NotLeaderForPartitionError()),
                           (ProduceRequest(topic2, ANY, ANY),
                            BrokerNotAvailableError()),
                           ]

        f = Failure(FailedPayloadsError(init_resp, failed_payloads))
        ret = [fail(f), succeed(next_resp)]
        client.send_produce_request.side_effect = ret

        msgs = self.msgs(range(10))
        results = []
        clock = MemoryReactorClock()

        producer = Producer(client, batch_send=True, batch_every_t=0,
                            clock=clock)
        # Send 5 total requests: 4 here, one after we make sure we didn't
        # send early
        results.append(producer.send_messages(self.topic, msgs=msgs[0:3]))
        results.append(producer.send_messages(topic2, msgs=msgs[3:5]))
        results.append(producer.send_messages(self.topic, msgs=msgs[5:8]))
        results.append(producer.send_messages(topic2, msgs=msgs[8:9]))
        # No call yet, not enough messages
        self.assertFalse(client.send_produce_request.called)
        # Enough messages to start the request
        results.append(producer.send_messages(self.topic, msgs=msgs[9:10]))
        # Before the retry, there should be some results
        self.assertEqual(init_resp[0], self.successResultOf(results[0]))
        self.assertEqual(init_resp[2], self.successResultOf(results[3]))
        # Advance the clock
        clock.advance(producer._retry_interval)
        # Check the otehr results came in
        self.assertEqual(next_resp[0], self.successResultOf(results[4]))
        self.assertEqual(next_resp[1], self.successResultOf(results[2]))
        self.assertEqual(next_resp[2], self.successResultOf(results[1]))

        producer.stop()
Example #53
0
    def test_subscribersListCalls(self):
        """
        test_subscribersListCalls
        Test that a brokerclient's connSubscribers callbacks
        are called in the proper order, and that all the deferreds
        of a previous call are resolved before the next round of calls
        is done.
        """
        reactor = MemoryReactorClock()
        callList = []

        def c1(c, conn, reason):
            s = 'c1:{0}'.format(conn)
            if reason is not None:
                s += ':' + reason
            callList.append(s)

        def c2(c, conn, reason):
            def c2_cb(_, c, conn, reason):
                callList.append('c2_cb:{0}'.format(conn))

            d = Deferred()
            d.addCallback(c2_cb, c, conn, reason)
            reactor.callLater(1.0, d.callback, None)
            s = 'c2:{0}'.format(conn)
            if reason is not None:
                s += ':' + reason
            callList.append(s)
            return d

        def c3(c, conn, reason):
            s = 'c3:{0}'.format(conn)
            if reason is not None:
                s += ':' + reason
            callList.append(s)

        def c4(c, conn, reason):
            callList.append('c4:{0}'.format(conn))

        def c5(c, conn, reason):
            callList.append('c5:{0}'.format(conn))

        sublist = [c1, c2, c3]
        c = KafkaBrokerClient('slc', subscribers=sublist,
                              reactor=reactor)

        # Trigger the call to the 3 subscribers
        c._notify(True)
        self.assertEqual(callList, ['c1:True', 'c2:True', 'c3:True'])
        callList = []
        c._notify(False)
        # Nothing should be called yet, because the c2_cb
        # callback hasn't been called yet...
        self.assertEqual(callList, [])

        # advance the clock to trigger the callback to c2_cb
        reactor.advance(1.0)
        self.assertEqual(callList, ['c2_cb:True', 'c1:False',
                                    'c2:False', 'c3:False'])
        callList = []
        reactor.advance(1.0)
        self.assertEqual(callList, ['c2_cb:False'])
        callList = []

        # Trigger the call to the subscribers
        c._notify(True, reason='TheReason')
        c.addSubscriber(c4)
        self.assertEqual(callList, ['c1:True:TheReason', 'c2:True:TheReason',
                                    'c3:True:TheReason'])
        callList = []
        c._notify(False)
        self.assertEqual(callList, [])
        # Add a subscriber after the notify call, but before the advance
        # and ensure that the new subscriber isn't notified for the event
        # which occurred before it was added
        c.addSubscriber(c5)
        # advance the clock to trigger the callback to c2_cb
        reactor.advance(1.0)
        self.assertEqual(callList, ['c2_cb:True', 'c1:False',
                                    'c2:False', 'c3:False', 'c4:False'])
        callList = []

        c.delSubscriber(c2)
        # advance the clock to trigger the callback to c2_cb
        reactor.advance(1.0)
        # We should still get the c2_cb:False here...
        self.assertEqual(callList, ['c2_cb:False'])
        callList = []

        c.delSubscriber(c4)
        # Trigger the call to the subscribers
        c._notify(True)
        reactor.advance(1.0)
        self.assertEqual(callList, ['c1:True', 'c3:True', 'c5:True'])
        callList = []
        c._notify(False)
        reactor.advance(1.0)
        self.assertEqual(callList, ['c1:False', 'c3:False', 'c5:False'])
        callList = []