Example #1
0
class KafkaIntegrationTestCase(unittest2.TestCase):
    create_client = True
    topic = None

    @deferred(timeout=10)
    @inlineCallbacks
    def setUp(self):
        super(KafkaIntegrationTestCase, self).setUp()
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            log.error('KAFKA_VERSION unset!')
            return

        if not self.topic:
            self.topic = "%s-%s" % (
                self.id()[self.id().rindex(".") + 1:], random_string(10))

        if self.create_client:
            self.client = KafkaClient(
                '%s:%d' % (self.server.host, self.server.port),
                clientId=self.topic)

        yield ensure_topic_creation(self.client, self.topic,
                                    reactor=self.reactor)

        self._messages = {}

    @deferred(timeout=10)
    @inlineCallbacks
    def tearDown(self):
        super(KafkaIntegrationTestCase, self).tearDown()
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            log.error('KAFKA_VERSION unset!')
            return

        if self.create_client:
            yield self.client.close()
            # Check for outstanding delayedCalls. Note, this may yield
            # spurious errors if the class's client has an outstanding
            # delayed call due to reconnecting.
            dcs = self.reactor.getDelayedCalls()
            if dcs:  # pragma: no cover
                log.error("Outstanding Delayed Calls at tearDown: %s\n\n",
                          ' '.join([str(dc) for dc in dcs]))
            self.assertFalse(dcs)

    @inlineCallbacks
    def current_offset(self, topic, partition):
        offsets, = yield self.client.send_offset_request(
            [OffsetRequest(topic, partition, -1, 1)])
        returnValue(offsets.offsets[0])

    def msg(self, s):
        if s not in self._messages:
            self._messages[s] = '%s-%s-%s' % (s, self.id(), str(uuid.uuid4()))

        return self._messages[s]
    def setUpClass(cls):
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            return

        # Single zookeeper, 3 kafka brokers
        zk_chroot = random_string(10)
        replicas = 3
        partitions = 2
        max_bytes = 12 * 1048576  # 12 MB

        cls.zk = ZookeeperFixture.instance()
        kk_args = [
            cls.zk.host, cls.zk.port, zk_chroot, replicas, partitions,
            max_bytes
        ]
        cls.kafka_brokers = [
            KafkaFixture.instance(i, *kk_args) for i in range(replicas)
        ]

        hosts = ['%s:%d' % (b.host, b.port) for b in cls.kafka_brokers]
        cls.client = KafkaClient(hosts, timeout=2500, clientId=__name__)

        # Startup the twisted reactor in a thread. We need this before the
        # the KafkaClient can work, since KafkaBrokerClient relies on the
        # reactor for its TCP connection
        cls.reactor, cls.thread = threaded_reactor()
    def setUpClass(cls):
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            return

        DEBUGGING = True
        setDebugging(DEBUGGING)
        DelayedCall.debug = DEBUGGING

        zk_chroot = random_string(10)
        replicas = 2
        partitions = 7

        # mini zookeeper, 2 kafka brokers
        cls.zk = ZookeeperFixture.instance()
        kk_args = [cls.zk.host, cls.zk.port, zk_chroot, replicas, partitions]
        cls.kafka_brokers = [
            KafkaFixture.instance(i, *kk_args) for i in range(replicas)
        ]

        hosts = ['%s:%d' % (b.host, b.port) for b in cls.kafka_brokers]
        # We want a short timeout on message sending for this test, since
        # we are expecting failures when we take down the brokers
        cls.client = KafkaClient(hosts, timeout=1000, clientId=__name__)

        # Startup the twisted reactor in a thread. We need this before the
        # the KafkaClient can work, since KafkaBrokerClient relies on the
        # reactor for its TCP connection
        cls.reactor, cls.thread = threaded_reactor()
Example #4
0
    def setUp(self):
        super(KafkaIntegrationTestCase, self).setUp()
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            log.error('KAFKA_VERSION unset!')
            return

        if not self.topic:
            self.topic = "%s-%s" % (
                self.id()[self.id().rindex(".") + 1:], random_string(10))

        if self.create_client:
            self.client = KafkaClient(
                '%s:%d' % (self.server.host, self.server.port),
                clientId=self.topic)

        yield ensure_topic_creation(self.client, self.topic,
                                    reactor=self.reactor)

        self._messages = {}
Example #5
0
    def setUp(self):
        log.info("Setting up test %s", self.id())

        self.harness = KafkaHarness.start(**self.harness_kw)
        self.addCleanup(self.harness.halt)

        if not self.topic:
            self.topic = "%s-%s" % (
                self.id()[self.id().rindex(".") + 1:], random_string(10))

        self.client = KafkaClient(
            self.harness.bootstrap_hosts,
            clientId=self.__class__.__name__,
            **self.client_kw
        )
        self.addCleanup(self.client.close)

        yield ensure_topic_creation(self.client, self.topic,
                                    fully_replicated=True)

        self._messages = {}
    def _count_messages(self, topic):
        messages = []
        hosts = '%s:%d,%s:%d' % (
            self.kafka_brokers[0].host, self.kafka_brokers[0].port,
            self.kafka_brokers[1].host, self.kafka_brokers[1].port)
        client = KafkaClient(hosts, clientId="CountMessages", timeout=500)

        try:
            yield ensure_topic_creation(client,
                                        topic,
                                        fully_replicated=False,
                                        reactor=self.reactor)

            # Need to retry this until we have a leader...
            while True:
                # Ask the client to load the latest metadata. This may avoid a
                # NotLeaderForPartitionError I was seeing upon re-start of the
                # broker.
                yield client.load_metadata_for_topics(topic)
                # if there is an error on the metadata for the topic, raise
                if check_error(client.metadata_error_for_topic(topic),
                               False) is None:
                    break
            # Ok, should be safe to get the partitions now...
            partitions = client.topic_partitions[topic]

            requests = [
                FetchRequest(topic, part, 0, 1024 * 1024)
                for part in partitions
            ]
            resps = []
            while not resps:
                try:
                    log.debug("_count_message: Fetching messages")
                    # Prevent log.error() call from causing test failure
                    with patch.object(kclient, 'log'):
                        resps = yield client.send_fetch_request(
                            requests, max_wait_time=400)
                except (NotLeaderForPartitionError,
                        UnknownTopicOrPartitionError,
                        KafkaUnavailableError):  # pragma: no cover
                    log.debug("_count_message: Metadata err, retrying...")
                    yield client.load_metadata_for_topics(topic)
                except FailedPayloadsError as e:  # pragma: no cover
                    if not e.args[1][0][1].check(RequestTimedOutError):
                        raise
                    log.debug("_count_message: Timed out err, retrying...")
        finally:
            yield client.close()
        for fetch_resp in resps:
            messages.extend(list(fetch_resp.messages))

        log.debug("Got %d messages:%r", len(messages), messages)

        returnValue(len(messages))
    def _count_messages(self, topic):
        log.debug("Counting messages on topic %s", topic)
        messages = []
        client = KafkaClient(self.harness.bootstrap_hosts,
                             clientId="CountMessages",
                             timeout=500,
                             reactor=self.reactor)

        try:
            yield ensure_topic_creation(client, topic, fully_replicated=False)

            # Need to retry this until we have a leader...
            while True:
                # Ask the client to load the latest metadata. This may avoid a
                # NotLeaderForPartitionError I was seeing upon re-start of the
                # broker.
                yield client.load_metadata_for_topics(topic)
                # if there is an error on the metadata for the topic, wait
                errno = client.metadata_error_for_topic(topic)
                if errno == 0:
                    break
                else:
                    log.debug("Topic %s in error errno=%d", topic, errno)
                    yield async_delay(1.0)

            # Ok, should be safe to get the partitions now...
            partitions = client.topic_partitions[topic]

            requests = [
                FetchRequest(topic, part, 0, 1024 * 1024)
                for part in partitions
            ]
            resps = []
            while not resps:
                try:
                    log.debug("_count_message: Fetching messages")
                    resps = yield client.send_fetch_request(requests,
                                                            max_wait_time=400)
                except (NotLeaderForPartitionError,
                        UnknownTopicOrPartitionError,
                        KafkaUnavailableError):  # pragma: no cover
                    log.debug("_count_message: Metadata err, retrying...")
                    yield client.load_metadata_for_topics(topic)
                except FailedPayloadsError as e:  # pragma: no cover
                    if not e.args[1][0][1].check(RequestTimedOutError):
                        raise
                    log.debug("_count_message: Timed out err, retrying...")
        finally:
            yield client.close()
        for fetch_resp in resps:
            messages.extend(list(fetch_resp.messages))

        log.debug("Got %d messages: %r", len(messages), messages)

        returnValue(len(messages))
    def _count_messages(self, topic):
        messages = []
        hosts = '%s:%d,%s:%d' % (self.kafka_brokers[0].host,
                                 self.kafka_brokers[0].port,
                                 self.kafka_brokers[1].host,
                                 self.kafka_brokers[1].port)
        client = KafkaClient(hosts, clientId="CountMessages", timeout=500)

        try:
            yield ensure_topic_creation(client, topic,
                                        reactor=self.reactor)

            # Need to retry this until we have a leader...
            while True:
                # Ask the client to load the latest metadata. This may avoid a
                # NotLeaderForPartitionError I was seeing upon re-start of the
                # broker.
                yield client.load_metadata_for_topics(topic)
                # if there is an error on the metadata for the topic, raise
                if check_error(
                        client.metadata_error_for_topic(topic), False) is None:
                    break
            # Ok, should be safe to get the partitions now...
            partitions = client.topic_partitions[topic]

            requests = [FetchRequest(topic, part, 0, 1024 * 1024)
                        for part in partitions]
            resps = []
            while not resps:
                try:
                    log.debug("_count_message: Fetching messages")
                    # Prevent log.error() call from causing test failure
                    with patch.object(kclient, 'log'):
                        resps = yield client.send_fetch_request(
                            requests, max_wait_time=400)
                except (NotLeaderForPartitionError,
                        UnknownTopicOrPartitionError,
                        KafkaUnavailableError):  # pragma: no cover
                    log.debug("_count_message: Metadata err, retrying...")
                    yield client.load_metadata_for_topics(topic)
                except FailedPayloadsError as e:  # pragma: no cover
                    if not e.args[1][0][1].check(RequestTimedOutError):
                        raise
                    log.debug("_count_message: Timed out err, retrying...")
        finally:
            yield client.close()
        for fetch_resp in resps:
            messages.extend(list(fetch_resp.messages))

        log.debug("Got %d messages:%r", len(messages), messages)

        returnValue(len(messages))
    def setUpClass(cls):
        replicas = 3
        partitions = 2
        max_bytes = 12 * 1048576  # 12 MB

        cls.harness = KafkaHarness.start(
            replicas=replicas,
            partitions=partitions,
            message_max_bytes=max_bytes,
        )

        cls.client = KafkaClient(cls.harness.bootstrap_hosts,
                                 timeout=2500, clientId=__name__)

        # Startup the twisted reactor in a thread. We need this before the
        # the KafkaClient can work, since KafkaBrokerClient relies on the
        # reactor for its TCP connection
        cls.reactor, cls.thread = threaded_reactor()
Example #10
0
class IntegrationMixin(object):
    """
    Mixin for tests that require a Kafka cluster.

    The `setUp()` and `tearDown()` methods bring up a Kafka cluster and create
    a topic for the test to use.

    Mix this into a subclass of `twisted.trial.unittest.TestCase`. Note that
    you must override *harness_kw* in the subclass.

    :data dict harness_kw:
        Keyword arguments for `harness`. Subclasses must set this to specify
        ``replicas`` (the number of Kafka brokers) and may specify other
        arguments — see `afkak.fixtures.KafkaHarness.start()`.

    :data dict client_kw:
        Keyword arguments for `client`. Subclasses may inject keyword arguments
        by overriding this. The default is empty.

    :ivar str topic:
        Kafka topic name. This may be set in subclasses. If ``None``, a random
        topic name is generated by `setUp()`.

    :ivar harness:
        `afkak.test.fixtures.KafkaHarness` instance. This is created by the
        `setUp()` method and automatically torn down.

    :ivar client:
        `afkak.KafkaClient` instance created by the `setUp()` method.

    :ivar reactor: Twisted reactor.
    """
    topic = None
    from twisted.internet import reactor
    client_kw = {}

    if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
        skip = 'KAFKA_VERSION is not set'

    @inlineCallbacks
    def setUp(self):
        log.info("Setting up test %s", self.id())

        self.harness = KafkaHarness.start(**self.harness_kw)
        self.addCleanup(self.harness.halt)

        if not self.topic:
            self.topic = "%s-%s" % (
                self.id()[self.id().rindex(".") + 1:], random_string(10))

        self.client = KafkaClient(
            self.harness.bootstrap_hosts,
            clientId=self.__class__.__name__,
            **self.client_kw
        )
        self.addCleanup(self.client.close)

        yield ensure_topic_creation(self.client, self.topic,
                                    fully_replicated=True)

        self._messages = {}

    def tearDown(self):
        log.info("Tearing down test: %r", self)

    @inlineCallbacks
    def current_offset(self, topic, partition):
        offsets, = yield self.client.send_offset_request(
            [OffsetRequest(topic, partition, -1, 1)])
        returnValue(offsets.offsets[0])

    @inlineCallbacks
    def retry_while_broker_errors(self, f, *a, **kw):
        """
        Call a function, retrying on retriable broker errors.

        If calling the function fails with one of these exception types it is
        called again after a short delay:

        * `afkak.common.RetriableBrokerResponseError` (or a subclass thereof)
        * `afkak.common.PartitionUnavailableError`

        The net effect is to keep trying until topic auto-creation completes.

        :param f: callable, which may return a `Deferred`
        :param a: arbitrary positional arguments
        :param kw: arbitrary keyword arguments
        """
        while True:
            try:
                returnValue((yield f(*a, **kw)))
                break
            except (RetriableBrokerResponseError, PartitionUnavailableError):
                yield async_delay(0.1, clock=self.reactor)

    def msg(self, s):
        if s not in self._messages:
            self._messages[s] = (u'%s-%s-%s' % (s, self.id(), uuid.uuid4())).encode('utf-8')

        return self._messages[s]
Example #11
0
    def test_consumer_rejoin(self):
        """
            trigger a rejoin via consumer commit failure
        """
        group = 'rejoin_group'
        self.client2 = KafkaClient(self.harness.bootstrap_hosts,
                                   clientId=self.topic + '2')
        self.addCleanup(self.client2.close)

        record_stream = DeferredQueue(backlog=1)

        def processor(consumer, records):
            log.debug('processor(%r, %r)', consumer, records)
            record_stream.put(records)

        coord = ConsumerGroup(
            self.client,
            group,
            topics=[self.topic],
            processor=processor,
            session_timeout_ms=6000,
            retry_backoff_ms=100,
            heartbeat_interval_ms=1000,
            fatal_backoff_ms=3000,
            consumer_kwargs=dict(auto_commit_every_ms=1000),
        )
        coord_start_d = coord.start()
        self.addCleanup(coord.stop)
        # FIXME: This doesn't seem to get fired reliably.
        coord_start_d
        # self.addCleanup(lambda: coord_start_d)

        yield wait_for_assignments(self.topic, self.num_partitions, [coord])

        # kill the heartbeat timer and start joining the second consumer
        while True:
            if coord._heartbeat_looper.running:
                coord._heartbeat_looper.stop()
                break
            else:
                yield async_delay()

        coord2 = ConsumerGroup(
            self.client2,
            group,
            topics=[self.topic],
            processor=processor,
            session_timeout_ms=6000,
            retry_backoff_ms=100,
            heartbeat_interval_ms=1000,
            fatal_backoff_ms=3000,
            consumer_kwargs=dict(auto_commit_every_ms=1000),
        )
        coord2_start_d = coord2.start()
        self.addCleanup(coord2.stop)
        # FIXME: This doesn't seem to get fired reliably.
        coord2_start_d
        # self.addCleanup(lambda: coord2_start_d)

        # send some messages and see that they're processed
        # the commit will eventually fail because we're rebalancing
        for part in range(15):
            yield async_delay()
            values = yield self.send_messages(part % self.num_partitions,
                                              [part])
            msgs = yield record_stream.get()
            if msgs[0].partition != part:
                # once the commit fails, we will see the msg twice
                break
            self.assertEqual(msgs[0].message.value, values[0])

        yield wait_for_assignments(self.topic, self.num_partitions,
                                   [coord, coord2])

        # Once assignments have been received we need to ensure that the record
        # stream is clear of any duplicate messages. We do this by producing
        # a sentinel to each partition and consuming messages from the stream
        # until all the sentinels have appeared at least once. At that point
        # any churn should have cleared up and we can depend on lock-step
        # delivery.
        pending_sentinels = {}
        for part in range(self.num_partitions):
            [value] = yield self.send_messages(part, ['sentinel'])
            pending_sentinels[part] = value
        while pending_sentinels:
            [message] = yield record_stream.get()
            if pending_sentinels.get(
                    message.partition) == message.message.value:
                del pending_sentinels[message.partition]

        # after the cluster has re-formed, send some more messages
        # and check that we get them too (and don't get the old messages again)
        record_stream = DeferredQueue(backlog=1)
        for part in range(self.num_partitions):
            yield async_delay()
            [value] = yield self.send_messages(part, [part])
            log.debug('waiting for messages from partition %d', part)
            [message] = yield record_stream.get()
            self.assertEqual(message.partition, part)
            self.assertEqual(message.message.value, value)
Example #12
0
    def test_two_consumergroup_join(self):
        """
        When a second member joins the consumer group it triggers a rebalance.
        After that completes some partitions are distributed to each member.
        """
        group_id = 'group_for_two'
        self.client2 = KafkaClient(self.harness.bootstrap_hosts,
                                   clientId=self.topic + '2')
        self.addCleanup(self.client2.close)

        record_stream = DeferredQueue(backlog=1)

        def processor(consumer, records):
            log.debug("processor(%r, %r)", consumer, records)
            record_stream.put(records)

        coord = ConsumerGroup(
            self.client,
            group_id,
            topics=[self.topic],
            processor=processor,
            retry_backoff_ms=100,
            heartbeat_interval_ms=1000,
            fatal_backoff_ms=3000,
        )
        de = self.when_called(coord, 'on_join_complete')
        coord_start_d = coord.start()
        self.addCleanup(coord.stop)

        # FIXME: This doesn't seem to get fired reliably.
        coord_start_d
        # self.addCleanup(lambda: coord_start_d)

        yield de

        # send some messages and see that they're processed
        for part in range(self.num_partitions):
            values = yield self.send_messages(part, [part])
            msgs = yield record_stream.get()
            self.assertEqual(msgs[0].partition, part)
            self.assertEqual(msgs[0].message.value, values[0])

        coord2 = ConsumerGroup(
            self.client2,
            group_id,
            topics=[self.topic],
            processor=processor,
            retry_backoff_ms=100,
            heartbeat_interval_ms=1000,
            fatal_backoff_ms=3000,
        )
        de = self.when_called(coord, 'on_join_complete')
        de2 = self.when_called(coord2, 'on_join_complete')
        coord2_start_d = coord2.start()
        self.addCleanup(coord2.stop)

        # FIXME: This doesn't seem to get fired reliably
        coord2_start_d
        # self.addCleanup(lambda: coord2_start_d)

        yield de
        yield de2
        self.assertIn(self.topic, coord.consumers)
        self.assertIn(self.topic, coord2.consumers)
        self.assertEqual(len(coord.consumers[self.topic]), 3)
        self.assertEqual(len(coord2.consumers[self.topic]), 3)
        self.assertNotEqual(coord.consumers[self.topic][0].partition,
                            coord2.consumers[self.topic][0].partition)

        # after the cluster has re-formed, send some more messages
        # and check that we get them too (and don't get the old messages again)
        for part in range(self.num_partitions):
            values = yield self.send_messages(part, [part])
            msgs = yield record_stream.get()
            self.assertEqual(msgs[0].partition, part)
            self.assertEqual(msgs[0].message.value, values[0])
Example #13
0
    def test_three_coordinator_join(self):
        self.client2 = KafkaClient(self.harness.bootstrap_hosts,
                                   clientId=self.topic + '2')
        self.addCleanup(self.client2.close)

        self.client3 = KafkaClient(self.harness.bootstrap_hosts,
                                   clientId=self.topic + '3')
        self.addCleanup(self.client3.close)
        coords = [
            Coordinator(client,
                        self.id(),
                        topics=["test-topic"],
                        retry_backoff_ms=100,
                        heartbeat_interval_ms=100,
                        fatal_backoff_ms=1000)
            for client in [self.client, self.client2, self.client3]
        ]
        coords[0].on_join_complete = lambda *args: a_joined.callback(*args)
        coords[0].start()

        a_joined = Deferred()

        # startup the first member of the group
        a_assignment = yield a_joined
        log.warn("first is in")

        self.assertNotEqual(coords[0].generation_id, None)
        self.assertEqual(coords[0].leader_id, coords[0].member_id)
        self.assertEqual(a_assignment, {"test-topic": (0, 1, 2, 3, 4, 5)})
        first_generation_id = coords[0].generation_id

        # now bring someone else into the group
        a_joined, b_joined = Deferred(), Deferred()

        coords[0].on_join_complete = lambda *args: a_joined.callback(*args)
        coords[1].on_join_complete = lambda *args: b_joined.callback(*args)
        log.warn("bringing in second")
        coords[1].start()
        log.warn("waiting for a_joined")
        yield a_joined
        log.warn("waiting for b_joined")
        yield b_joined

        self.assertEqual(coords[0].generation_id, coords[1].generation_id)
        self.assertEqual(coords[0].leader_id, coords[1].leader_id)
        self.assertNotEqual(coords[0].member_id, coords[1].member_id)
        self.assertNotEqual(coords[0].generation_id, first_generation_id)

        # and then bring in a third
        a_joined, b_joined, c_joined = Deferred(), Deferred(), Deferred()

        coords[0].on_join_complete = lambda *args: a_joined.callback(*args)
        coords[1].on_join_complete = lambda *args: b_joined.callback(*args)
        coords[2].on_join_complete = lambda *args: c_joined.callback(*args)
        log.warn("bringing in third")
        coords[2].start()
        log.warn("waiting for a_joined")
        a_assignment = yield a_joined
        log.warn("waiting for b_joined")
        b_assignment = yield b_joined
        log.warn("waiting for c_joined")
        c_assignment = yield c_joined

        self.assertTrue(len(a_assignment["test-topic"]) == 2, a_assignment)
        self.assertTrue(len(b_assignment["test-topic"]) == 2, b_assignment)
        self.assertTrue(len(c_assignment["test-topic"]) == 2, c_assignment)
        self.assertEqual(
            set(a_assignment["test-topic"] + b_assignment["test-topic"] +
                c_assignment["test-topic"]),
            set(range(6)),
        )
        # and remove one
        a_joined, b_joined = Deferred(), Deferred()

        coords[0].on_join_complete = lambda *args: a_joined.callback(*args)
        coords[1].on_join_complete = lambda *args: b_joined.callback(*args)
        log.warn("removing third")
        yield coords[2].stop()

        log.warn("waiting for a_joined")
        yield a_joined
        log.warn("waiting for b_joined")
        yield b_joined

        log.warn("done")
        yield coords[0].stop()
        yield coords[1].stop()
Example #14
0
class KafkaIntegrationTestCase(unittest.TestCase):
    create_client = True
    topic = None
    server = None
    reactor = None

    def shortDescription(self):
        """
        Show the ID of the test when nose displays its name, rather than
        a snippet of the docstring.
        """
        return self.id()

    @deferred(timeout=10)
    @inlineCallbacks
    def setUp(self):
        log.info("Setting up test %s", self.id())
        super(KafkaIntegrationTestCase, self).setUp()
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            log.error('KAFKA_VERSION unset!')
            return

        if not self.topic:
            self.topic = "%s-%s" % (self.id()[self.id().rindex(".") + 1:],
                                    random_string(10))

        if self.create_client:
            self.client = KafkaClient('%s:%d' %
                                      (self.server.host, self.server.port),
                                      clientId=self.topic)

        yield ensure_topic_creation(self.client,
                                    self.topic,
                                    fully_replicated=True,
                                    reactor=self.reactor)

        self._messages = {}

    @deferred(timeout=10)
    @inlineCallbacks
    def tearDown(self):
        log.info("Tearing down test: %r", self)
        super(KafkaIntegrationTestCase, self).tearDown()
        if not os.environ.get('KAFKA_VERSION'):  # pragma: no cover
            log.error('KAFKA_VERSION unset!')
            return

        if self.create_client:
            yield self.client.close()
            # Check for outstanding delayedCalls. Note, this may yield
            # spurious errors if the class's client has an outstanding
            # delayed call due to reconnecting.
            dcs = self.reactor.getDelayedCalls()
            if dcs:  # pragma: no cover
                log.error("Outstanding Delayed Calls at tearDown: %s\n\n",
                          ' '.join([str(dc) for dc in dcs]))
            self.assertFalse(dcs)

    @inlineCallbacks
    def current_offset(self, topic, partition):
        offsets, = yield self.client.send_offset_request(
            [OffsetRequest(topic, partition, -1, 1)])
        returnValue(offsets.offsets[0])

    def msg(self, s):
        if s not in self._messages:
            self._messages[s] = (u'%s-%s-%s' %
                                 (s, self.id(), uuid.uuid4())).encode('utf-8')

        return self._messages[s]