예제 #1
0
    def test_no_concurrent_send_on_connection(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=self.hosts,
            metadata_max_age_ms=10000)
        yield from client.bootstrap()
        self.add_cleanup(client.close)

        yield from self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)]
              )])
        vanila_request = MetadataRequest([])

        send_time = self.loop.time()
        long_task = self.loop.create_task(
            client.send(node_id, wait_request)
        )
        yield from asyncio.sleep(0.0001, loop=self.loop)
        self.assertFalse(long_task.done())

        yield from client.send(node_id, vanila_request)
        resp_time = self.loop.time()
        fetch_resp = yield from long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertGreaterEqual(resp_time - send_time, 0.5)
예제 #2
0
    def test_concurrent_send_on_different_connection_groups(self):
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        yield from client.bootstrap()
        self.add_cleanup(client.close)

        yield from self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)])])
        vanila_request = MetadataRequest([])

        send_time = self.loop.time()
        long_task = self.loop.create_task(client.send(node_id, wait_request))
        yield from asyncio.sleep(0.0001, loop=self.loop)
        self.assertFalse(long_task.done())

        yield from client.send(node_id,
                               vanila_request,
                               group=ConnectionGroup.COORDINATION)
        resp_time = self.loop.time()
        self.assertFalse(long_task.done())

        fetch_resp = yield from long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertLess(resp_time - send_time, 0.5)
예제 #3
0
    def test_send_timeout_deletes_connection(self):
        correct_response = MetadataResponse([], [])

        @asyncio.coroutine
        def send_exception(*args, **kwargs):
            raise asyncio.TimeoutError()

        @asyncio.coroutine
        def send(*args, **kwargs):
            return correct_response

        @asyncio.coroutine
        def get_conn(self, node_id, *, group=0):
            conn_id = (node_id, group)
            if conn_id in self._conns:
                conn = self._conns[conn_id]
                if not conn.connected():
                    del self._conns[conn_id]
                else:
                    return conn

            conn = mock.MagicMock()
            conn.send.side_effect = send
            self._conns[conn_id] = conn
            return conn

        node_id = 0
        conn = mock.MagicMock()
        conn.send.side_effect = send_exception
        conn.connected.return_value = True
        mocked_conns = {(node_id, 0): conn}
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=['broker_1:4567'])
        client._conns = mocked_conns
        client._get_conn = types.MethodType(get_conn, client)

        # first send timeouts
        with self.assertRaises(RequestTimedOutError):
            yield from client.send(0, MetadataRequest([]))

        conn.close.assert_called_once_with(
            reason=CloseReason.CONNECTION_TIMEOUT)
        # this happens because conn was closed
        conn.connected.return_value = False

        # second send gets new connection and obtains result
        response = yield from client.send(0, MetadataRequest([]))
        self.assertEqual(response, correct_response)
        self.assertNotEqual(conn, client._conns[(node_id, 0)])
예제 #4
0
    def test_send_timeout_deletes_connection(self):
        correct_response = MetadataResponse([], [])

        @asyncio.coroutine
        def send_exception(*args, **kwargs):
            raise asyncio.TimeoutError()

        @asyncio.coroutine
        def send(*args, **kwargs):
            return correct_response

        @asyncio.coroutine
        def get_conn(self, node_id, *, group=0):
            conn_id = (node_id, group)
            if conn_id in self._conns:
                conn = self._conns[conn_id]
                if not conn.connected():
                    del self._conns[conn_id]
                else:
                    return conn

            conn = mock.MagicMock()
            conn.send.side_effect = send
            self._conns[conn_id] = conn
            return conn

        node_id = 0
        conn = mock.MagicMock()
        conn.send.side_effect = send_exception
        conn.connected.return_value = True
        mocked_conns = {(node_id, 0): conn}
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=['broker_1:4567'])
        client._conns = mocked_conns
        client._get_conn = types.MethodType(get_conn, client)

        # first send timeouts
        with self.assertRaises(RequestTimedOutError):
            yield from client.send(0, MetadataRequest([]))

        conn.close.assert_called_once_with(
            reason=CloseReason.CONNECTION_TIMEOUT)
        # this happens because conn was closed
        conn.connected.return_value = False

        # second send gets new connection and obtains result
        response = yield from client.send(0, MetadataRequest([]))
        self.assertEqual(response, correct_response)
        self.assertNotEqual(conn, client._conns[(node_id, 0)])
예제 #5
0
    async def test_no_concurrent_send_on_connection(self):
        client = AIOKafkaClient(bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        await client.bootstrap()
        self.add_cleanup(client.close)

        await self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)])])
        vanila_request = MetadataRequest([])

        loop = get_running_loop()
        send_time = loop.time()
        long_task = create_task(client.send(node_id, wait_request))
        await asyncio.sleep(0.0001)
        self.assertFalse(long_task.done())

        await client.send(node_id, vanila_request)
        resp_time = loop.time()
        fetch_resp = await long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertGreaterEqual(resp_time - send_time, 0.5)
예제 #6
0
 def test_send_request(self):
     client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
     yield from client.bootstrap()
     node_id = client.get_random_node()
     resp = yield from client.send(node_id, MetadataRequest([]))
     self.assertTrue(isinstance(resp, MetadataResponse))
     yield from client.close()
예제 #7
0
    def test_compacted_topic_consumption(self):
        # Compacted topics can have offsets skipped
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        subscriptions = SubscriptionState(loop=self.loop)
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [(tp.topic, [(tp.partition, 155, 100000)])])

        builder = LegacyRecordBatchBuilder(
            magic=1, compression_type=0, batch_size=99999999)
        builder.append(160, value=b"12345", key=b"1", timestamp=None)
        builder.append(162, value=b"23456", key=b"2", timestamp=None)
        builder.append(167, value=b"34567", key=b"3", timestamp=None)
        batch = bytes(builder.build())

        resp = FetchResponse(
            [('test', [(
                0, 0, 3000,  # partition, error_code, highwater_offset
                batch  # Batch raw bytes
            )])])

        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)
        client.send.side_effect = asyncio.coroutine(lambda n, r: resp)

        tp_state.seek(155)
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        # Test successful getone, the closest in batch offset=160
        first = buf.getone()
        self.assertEqual(tp_state.position, 161)
        self.assertEqual(
            (first.value, first.key, first.offset),
            (b"12345", b"1", 160))

        # Test successful getmany
        second, third = buf.getall()
        self.assertEqual(tp_state.position, 168)
        self.assertEqual(
            (second.value, second.key, second.offset),
            (b"23456", b"2", 162))
        self.assertEqual(
            (third.value, third.key, third.offset),
            (b"34567", b"3", 167))
예제 #8
0
 def test_send_request(self):
     client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
     yield from client.bootstrap()
     node_id = client.get_random_node()
     resp = yield from client.send(node_id, MetadataRequest([]))
     self.assertTrue(isinstance(resp, MetadataResponse))
     yield from client.close()
예제 #9
0
    async def test_compacted_topic_consumption(self):
        # Compacted topics can have offsets skipped
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        subscriptions = SubscriptionState(loop=self.loop)
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [(tp.topic, [(tp.partition, 155, 100000)])])

        builder = LegacyRecordBatchBuilder(
            magic=1, compression_type=0, batch_size=99999999)
        builder.append(160, value=b"12345", key=b"1", timestamp=None)
        builder.append(162, value=b"23456", key=b"2", timestamp=None)
        builder.append(167, value=b"34567", key=b"3", timestamp=None)
        batch = bytes(builder.build())

        resp = FetchResponse(
            [('test', [(
                0, 0, 3000,  # partition, error_code, highwater_offset
                batch  # Batch raw bytes
            )])])

        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)
        client.send.side_effect = asyncio.coroutine(lambda n, r: resp)

        tp_state.seek(155)
        fetcher._in_flight.add(0)
        needs_wake_up = await fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        # Test successful getone, the closest in batch offset=160
        first = buf.getone()
        self.assertEqual(tp_state.position, 161)
        self.assertEqual(
            (first.value, first.key, first.offset),
            (b"12345", b"1", 160))

        # Test successful getmany
        second, third = buf.getall()
        self.assertEqual(tp_state.position, 168)
        self.assertEqual(
            (second.value, second.key, second.offset),
            (b"23456", b"2", 162))
        self.assertEqual(
            (third.value, third.key, third.offset),
            (b"34567", b"3", 167))
예제 #10
0
    def test_update_fetch_positions(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)
        partition = TopicPartition('test', 0)
        # partition is not assigned, should be ignored
        yield from fetcher.update_fetch_positions([partition])

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[partition] = state
        # partition is fetchable, no need to update position
        yield from fetcher.update_fetch_positions([partition])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse([('test', [(0, 0, [4])])]))
        state.await_reset(OffsetResetStrategy.LATEST)
        client.cluster.leader_for_partition = mock.MagicMock()
        client.cluster.leader_for_partition.side_effect = [None, -1, 0]
        yield from fetcher.update_fetch_positions([partition])
        self.assertEqual(state.position, 4)

        client.cluster.leader_for_partition = mock.MagicMock()
        client.cluster.leader_for_partition.return_value = 1
        client.send = mock.MagicMock()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse([('test', [(0, 3, [])])]))
        state.await_reset(OffsetResetStrategy.LATEST)
        with self.assertRaises(UnknownTopicOrPartitionError):
            yield from fetcher.update_fetch_positions([partition])

        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse([('test', [(0, -1, [])])]))
        with self.assertRaises(UnknownError):
            yield from fetcher.update_fetch_positions([partition])
        yield from fetcher.close()
예제 #11
0
    def test_update_fetch_positions(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)
        partition = TopicPartition('test', 0)
        # partition is not assigned, should be ignored
        yield from fetcher.update_fetch_positions([partition])

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[partition] = state
        # partition is fetchable, no need to update position
        yield from fetcher.update_fetch_positions([partition])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse[0]([('test', [(0, 0, [4])])]))
        state.await_reset(OffsetResetStrategy.LATEST)
        client.cluster.leader_for_partition = mock.MagicMock()
        client.cluster.leader_for_partition.side_effect = [None, -1, 0]
        yield from fetcher.update_fetch_positions([partition])
        self.assertEqual(state.position, 4)

        client.cluster.leader_for_partition = mock.MagicMock()
        client.cluster.leader_for_partition.return_value = 1
        client.send = mock.MagicMock()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse[0]([('test', [(0, 3, [])])]))
        state.await_reset(OffsetResetStrategy.LATEST)
        with self.assertRaises(UnknownTopicOrPartitionError):
            yield from fetcher.update_fetch_positions([partition])

        client.send.side_effect = asyncio.coroutine(
            lambda n, r: OffsetResponse[0]([('test', [(0, -1, [])])]))
        with self.assertRaises(UnknownError):
            yield from fetcher.update_fetch_positions([partition])
        yield from fetcher.close()
예제 #12
0
    def test_load_metadata(self):
        brokers = [(0, 'broker_1', 4567), (1, 'broker_2', 5678)]

        topics = [(NO_ERROR, 'topic_1', [(NO_ERROR, 0, 1, [1, 2], [1, 2])]),
                  (NO_ERROR, 'topic_2', [
                      (NO_LEADER, 0, -1, [], []),
                      (NO_LEADER, 1, 1, [], []),
                  ]), (NO_LEADER, 'topic_no_partitions', []),
                  (UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []),
                  (NO_ERROR, 'topic_3', [(NO_ERROR, 0, 0, [0, 1], [0, 1]),
                                         (NO_ERROR, 1, 1, [1, 0], [1, 0]),
                                         (NO_ERROR, 2, 0, [0, 1], [0, 1])]),
                  (NO_ERROR, 'topic_4', [
                      (NO_ERROR, 0, 0, [0, 1], [0, 1]),
                      (REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]),
                  ])]

        @asyncio.coroutine
        def send(request_id):
            return MetadataResponse(brokers, topics)

        mocked_conns = {0: mock.MagicMock()}
        mocked_conns[0].send.side_effect = send
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=['broker_1:4567'])
        task = asyncio. async (client._md_synchronizer(), loop=self.loop)
        client._conns = mocked_conns
        client.cluster.update_metadata(MetadataResponse(brokers[:1], []))

        self.loop.run_until_complete(client.force_metadata_update())
        task.cancel()

        md = client.cluster
        c_brokers = md.brokers()
        self.assertEqual(len(c_brokers), 2)
        expected_brokers = [(0, 'broker_1', 4567, None),
                            (1, 'broker_2', 5678, None)]
        self.assertEqual(sorted(expected_brokers), sorted(list(c_brokers)))
        c_topics = md.topics()
        self.assertEqual(len(c_topics), 4)
        self.assertEqual(md.partitions_for_topic('topic_1'), set([0]))
        self.assertEqual(md.partitions_for_topic('topic_2'), set([0, 1]))
        self.assertEqual(md.partitions_for_topic('topic_3'), set([0, 1, 2]))
        self.assertEqual(md.partitions_for_topic('topic_4'), set([0, 1]))
        self.assertEqual(md.available_partitions_for_topic('topic_2'),
                         set([1]))

        mocked_conns[0].connected.return_value = False
        is_ready = self.loop.run_until_complete(client.ready(0))
        self.assertEqual(is_ready, False)
        is_ready = self.loop.run_until_complete(client.ready(1))
        self.assertEqual(is_ready, False)
        self.assertEqual(mocked_conns, {})

        with self.assertRaises(NodeNotReadyError):
            self.loop.run_until_complete(client.send(0, None))
예제 #13
0
    def test_concurrent_send_on_different_connection_groups(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=self.hosts,
            metadata_max_age_ms=10000)
        yield from client.bootstrap()
        self.add_cleanup(client.close)

        yield from self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        broker = client.cluster.broker_metadata(node_id)
        client.cluster.add_coordinator(
            node_id, broker.host, broker.port, rack=None,
            purpose=(CoordinationType.GROUP, ""))

        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)]
              )])
        vanila_request = MetadataRequest([])

        send_time = self.loop.time()
        long_task = self.loop.create_task(
            client.send(node_id, wait_request)
        )
        yield from asyncio.sleep(0.0001, loop=self.loop)
        self.assertFalse(long_task.done())

        yield from client.send(
            node_id, vanila_request, group=ConnectionGroup.COORDINATION)
        resp_time = self.loop.time()
        self.assertFalse(long_task.done())

        fetch_resp = yield from long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertLess(resp_time - send_time, 0.5)
예제 #14
0
    async def test_concurrent_send_on_different_connection_groups(self):
        client = AIOKafkaClient(bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        await client.bootstrap()
        self.add_cleanup(client.close)

        await self.wait_topic(client, self.topic)

        node_id = client.get_random_node()
        broker = client.cluster.broker_metadata(node_id)
        client.cluster.add_coordinator(node_id,
                                       broker.host,
                                       broker.port,
                                       rack=None,
                                       purpose=(CoordinationType.GROUP, ""))

        wait_request = FetchRequest_v0(
            -1,  # replica_id
            500,  # max_wait_ms
            1024 * 1024,  # min_bytes
            [(self.topic, [(0, 0, 1024)])])
        vanila_request = MetadataRequest([])

        loop = get_running_loop()
        send_time = loop.time()
        long_task = create_task(client.send(node_id, wait_request))
        await asyncio.sleep(0.0001)
        self.assertFalse(long_task.done())

        await client.send(node_id,
                          vanila_request,
                          group=ConnectionGroup.COORDINATION)
        resp_time = loop.time()
        self.assertFalse(long_task.done())

        fetch_resp = await long_task
        # Check error code like resp->topics[0]->partitions[0]->error_code
        self.assertEqual(fetch_resp.topics[0][1][0][1], 0)

        # Check that vanila request actually executed after wait request
        self.assertLess(resp_time - send_time, 0.5)
예제 #15
0
    def test_metadata_updated_on_socket_disconnect(self):
        # Related to issue 176. A disconnect means that either we lost
        # connection to the node, or we have a node failure. In both cases
        # there's a high probability that Leader distribution will also change.
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=self.hosts,
                                metadata_max_age_ms=10000)
        yield from client.bootstrap()
        self.add_cleanup(client.close)

        # Init a clonnection
        node_id = client.get_random_node()
        assert node_id is not None
        req = MetadataRequest([])
        yield from client.send(node_id, req)

        # No metadata update pending atm
        self.assertFalse(client._md_update_waiter.done())

        # Connection disconnect should trigger an update
        conn = yield from client._get_conn(node_id)
        conn.close(reason=CloseReason.CONNECTION_BROKEN)
        self.assertTrue(client._md_update_waiter.done())
예제 #16
0
    def test_metadata_updated_on_socket_disconnect(self):
        # Related to issue 176. A disconnect means that either we lost
        # connection to the node, or we have a node failure. In both cases
        # there's a high probability that Leader distribution will also change.
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=self.hosts,
            metadata_max_age_ms=10000)
        yield from client.bootstrap()
        self.add_cleanup(client.close)

        # Init a clonnection
        node_id = client.get_random_node()
        assert node_id is not None
        req = MetadataRequest([])
        yield from client.send(node_id, req)

        # No metadata update pending atm
        self.assertFalse(client._md_update_waiter.done())

        # Connection disconnect should trigger an update
        conn = yield from client._get_conn(node_id)
        conn.close(reason=CloseReason.CONNECTION_BROKEN)
        self.assertTrue(client._md_update_waiter.done())
예제 #17
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState(loop=self.loop)
        fetcher = Fetcher(
            client, subscriptions, auto_offset_reset="latest", loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 4, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        builder = LegacyRecordBatchBuilder(
            magic=1, compression_type=0, batch_size=99999999)
        builder.append(offset=4, value=b"test msg", key=None, timestamp=None)
        raw_batch = bytes(builder.build())

        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 0, 9, raw_batch)])]))
        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)

        # The partition has no active position, so will ignore result
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(fetcher._records, {})

        # The partition's position does not match request's fetch offset
        subscriptions.seek(tp, 0)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(fetcher._records, {})

        subscriptions.seek(tp, 4)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # If position changed after fetch request passed
        subscriptions.seek(tp, 4)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        subscriptions.seek(tp, 10)
        self.assertIsNone(buf.getone())

        # If assignment is lost after fetch request passed
        subscriptions.seek(tp, 4)
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        subscriptions.unsubscribe()
        self.assertIsNone(buf.getone())

        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)

        # error -> no partition found (UnknownTopicOrPartitionError)
        subscriptions.seek(tp, 4)
        fetcher._records.clear()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 3, 9, raw_batch)])]))
        cc = client.force_metadata_update.call_count
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(client.force_metadata_update.call_count, cc + 1)

        # error -> topic auth failed (TopicAuthorizationFailedError)
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 29, 9, raw_batch)])]))
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, -1, 9, raw_batch)])]))
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range with offset strategy
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, raw_batch)])]))
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(tp_state.has_valid_position, False)
        self.assertEqual(tp_state.awaiting_reset, True)
        self.assertEqual(tp_state.reset_strategy, OffsetResetStrategy.LATEST)

        # error -> offset out of range without offset strategy
        subscriptions.seek(tp, 4)
        fetcher._default_reset_strategy = OffsetResetStrategy.NONE
        needs_wake_up = yield from fetcher._proc_fetch_request(
            assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()
예제 #18
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in blocking on the full commit of
    the record, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, rather than immediately sending out a
            record the producer will wait for up to the given delay to allow
            other records to be sent so that the sends can be batched together.
            This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
            would have the effect of reducing the number of requests sent but
            would add up to 5ms of latency to records sent in the absense of
            load. Default: 0.
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest, maximum waiting time can
            be up to 2 * request_timeout_ms.
            Default: 30000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    def __init__(self,
                 *,
                 loop,
                 bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 api_version='auto',
                 acks=1,
                 key_serializer=None,
                 value_serializer=None,
                 compression_type=None,
                 max_batch_size=16384,
                 partitioner=DefaultPartitioner(),
                 max_request_size=1048576,
                 linger_ms=0,
                 send_backoff_ms=100,
                 retry_backoff_ms=100):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if api_version not in ('auto', '0.9', '0.8.2', '0.8.1', '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._api_version = api_version
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(loop=loop,
                                     bootstrap_servers=bootstrap_servers,
                                     client_id=client_id,
                                     metadata_max_age_ms=metadata_max_age_ms,
                                     request_timeout_ms=request_timeout_ms)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, self._compression_type,
            self._request_timeout_ms / 1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        # Check Broker Version if not set explicitly
        if self._api_version == 'auto':
            self._api_version = yield from self.client.check_version()

        # Convert api_version config to tuple for easy comparisons
        self._api_version = tuple(map(int, self._api_version.split('.')))

        if self._compression_type == 'lz4':
            assert self._api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(self._sender_routine(),
                                          loop=self._loop)
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluser"""
        if self._closed:
            return

        # Wait untill all batches are Delivered and futures resolved
        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        self._closed = True
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self._wait_on_metadata(topic))

    @asyncio.coroutine
    def _wait_on_metadata(self, topic):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for

        Returns:
            set: partition ids for the topic

        Raises:
            UnknownTopicOrPartitionError: if no topic or partitions found
                in cluster metadata
        """
        if topic in self.client.cluster.topics():
            return self._metadata.partitions_for_topic(topic)

        # add topic to metadata topic list if it is not there already.
        self.client.add_topic(topic)
        yield from self.client.force_metadata_update()
        if topic not in self.client.cluster.topics():
            raise UnknownTopicOrPartitionError()

        return self._metadata.partitions_for_topic(topic)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.

        Returns:
            asyncio.Future: future object that will be set when message is
                            processed

        Note: The returned future will wait based on `request_timeout_ms`
            setting. Cancelling this future will not stop event from being
            sent.
        """
        assert value is not None or self._api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value, key_bytes,
                                    value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000)
        return fut

    @asyncio.coroutine
    def _sender_routine(self):
        """backgroud task that sends message batches to Kafka brokers"""
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(self._send_produce_req(
                        node_id, batches),
                                         loop=self._loop)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    self.client.force_metadata_update()
                    # Just to have at least 1 future in wait() call
                    fut = asyncio.sleep(self._retry_backoff, loop=self._loop)
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)
                tasks -= done

        except asyncio.CancelledError:
            pass
        except Exception:  # noqa
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        self._in_flight.add(node_id)
        t0 = self._loop.time()
        while True:
            topics = collections.defaultdict(list)
            for tp, batch in batches.items():
                topics[tp.topic].append((tp.partition, batch.data()))

            request = ProduceRequest(required_acks=self._acks,
                                     timeout=self._request_timeout_ms,
                                     topics=list(topics.items()))

            try:
                response = yield from self.client.send(node_id, request)
            except KafkaError as err:
                for batch in batches.values():
                    if not err.retriable or batch.expired():
                        batch.done(exception=err)
                log.warning("Got error produce response: %s", err)
                if not err.retriable:
                    break
            else:
                if response is None:
                    # noacks, just "done" batches
                    for batch in batches.values():
                        batch.done()
                    break

                for topic, partitions in response.topics:
                    for partition, error_code, offset in partitions:
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset)
                        elif not getattr(error, 'retriable', False) or \
                                batch.expired():
                            batch.done(exception=error())
                        else:
                            # Ok, we can retry this batch
                            batches[tp] = batch
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)

            if batches:
                yield from asyncio.sleep(self._retry_backoff, loop=self._loop)
            else:
                break

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value, serialized_key,
                   serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(serialized_key, all_partitions, available)
예제 #19
0
    async def test_proc_fetch_request(self):
        client = AIOKafkaClient(bootstrap_servers=[])
        subscriptions = SubscriptionState()
        fetcher = Fetcher(client, subscriptions, auto_offset_reset="latest")

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 4, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100,
            100,
            [tp_info])

        async def ready(conn):
            return True

        def force_metadata_update():
            fut = create_future()
            fut.set_result(False)
            return fut

        client.ready = mock.MagicMock()
        client.ready.side_effect = ready
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = force_metadata_update
        client.send = mock.MagicMock()

        builder = LegacyRecordBatchBuilder(magic=1,
                                           compression_type=0,
                                           batch_size=99999999)
        builder.append(offset=4, value=b"test msg", key=None, timestamp=None)
        raw_batch = bytes(builder.build())

        fetch_response = FetchResponse([('test', [(0, 0, 9, raw_batch)])])

        async def send(node, request):
            nonlocal fetch_response
            return fetch_response

        client.send.side_effect = send
        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)

        # The partition has no active position, so will ignore result
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(fetcher._records, {})

        # The partition's position does not match request's fetch offset
        subscriptions.seek(tp, 0)
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(fetcher._records, {})

        subscriptions.seek(tp, 4)
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # If position changed after fetch request passed
        subscriptions.seek(tp, 4)
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        subscriptions.seek(tp, 10)
        self.assertIsNone(buf.getone())

        # If assignment is lost after fetch request passed
        subscriptions.seek(tp, 4)
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        subscriptions.unsubscribe()
        self.assertIsNone(buf.getone())

        subscriptions.assign_from_user({tp})
        assignment = subscriptions.subscription.assignment
        tp_state = assignment.state_value(tp)

        # error -> no partition found (UnknownTopicOrPartitionError)
        subscriptions.seek(tp, 4)
        fetcher._records.clear()
        fetch_response = FetchResponse([('test', [(0, 3, 9, raw_batch)])])
        cc = client.force_metadata_update.call_count
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(client.force_metadata_update.call_count, cc + 1)

        # error -> topic auth failed (TopicAuthorizationFailedError)
        fetch_response = FetchResponse([('test', [(0, 29, 9, raw_batch)])])
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            await fetcher.next_record([])

        # error -> unknown
        fetch_response = FetchResponse([('test', [(0, -1, 9, raw_batch)])])
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range with offset strategy
        fetch_response = FetchResponse([('test', [(0, 1, 9, raw_batch)])])
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(tp_state.has_valid_position, False)
        self.assertEqual(tp_state.awaiting_reset, True)
        self.assertEqual(tp_state.reset_strategy, OffsetResetStrategy.LATEST)

        # error -> offset out of range without offset strategy
        subscriptions.seek(tp, 4)
        fetcher._default_reset_strategy = OffsetResetStrategy.NONE
        needs_wake_up = await fetcher._proc_fetch_request(assignment, 0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            await fetcher.next_record([])

        await fetcher.close()
예제 #20
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 155, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        msg = Message(b"test msg")
        msg._encode_self()
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 0, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone(), None)  # invalid offset, msg is ignored

        state.seek(4)
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # error -> no partition found
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 3, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> topic auth failed
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 29, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, -1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(state.is_fetchable(), False)

        state.seek(4)
        subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()
예제 #21
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(
            loop=self.loop,
            bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 155, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100, 100, [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        builder = LegacyRecordBatchBuilder(
            magic=1, compression_type=0, batch_size=99999999)
        builder.append(offset=4, value=b"test msg", key=None, timestamp=None)
        raw_batch = bytes(builder.build())

        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 0, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone(), None)  # invalid offset, msg is ignored

        state.seek(4)
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # error -> no partition found
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 3, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> topic auth failed
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 29, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, -1, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range with offset strategy
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        with mock.patch.object(fetcher, "update_fetch_positions") as mocked:
            mocked.side_effect = asyncio.coroutine(lambda o: None)
            needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
            self.assertEqual(needs_wake_up, False)
            self.assertEqual(state.is_fetchable(), False)
            mocked.assert_called_with([tp])

        # error -> offset out of range with strategy errors out
        state.seek(4)
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, [(4, 10, raw_batch)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        with mock.patch.object(fetcher, "update_fetch_positions") as mocked:
            # the exception should not fail execution here
            @asyncio.coroutine
            def mock_async_raises(offests):
                raise Exception()
            mocked.side_effect = mock_async_raises
            needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
            self.assertEqual(needs_wake_up, False)
            self.assertEqual(state.is_fetchable(), False)
            mocked.assert_called_with([tp])

        # error -> offset out of range without offset strategy
        state.seek(4)
        subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
        client.send.side_effect = asyncio.coroutine(
            lambda n, r: FetchResponse(
                [('test', [(0, 1, 9, raw_batch)])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()
예제 #22
0
    def test_compacted_topic_consumption(self):
        # Compacted topics can have offsets skipped
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=[])
        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()

        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        req = FetchRequest(
            -1,  # replica_id
            100,
            100,
            [(tp.topic, [(tp.partition, 155, 100000)])])
        msg1 = Message(b"12345", key=b"1")
        msg1._encode_self()
        msg2 = Message(b"23456", key=b"2")
        msg2._encode_self()
        msg3 = Message(b"34567", key=b"3")
        msg3._encode_self()
        resp = FetchResponse([(
            'test',
            [(
                0,
                0,
                3000,  # partition, error_code, highwater_offset
                [
                    (160, 5, msg1),  # offset, len_bytes, bytes
                    (162, 5, msg2),
                    (167, 5, msg3),
                ])])])

        client.send.side_effect = asyncio.coroutine(lambda n, r: resp)
        state = TopicPartitionState()
        state.seek(155)
        state.drop_pending_message_set = False
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)

        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        # Test successful getone
        first = buf.getone()
        self.assertEqual(state.position, 161)
        self.assertEqual((first.value, first.key, first.offset),
                         (msg1.value, msg1.key, 160))

        # Test successful getmany
        second, third = buf.getall()
        self.assertEqual(state.position, 168)
        self.assertEqual((second.value, second.key, second.offset),
                         (msg2.value, msg2.key, 162))
        self.assertEqual((third.value, third.key, third.offset),
                         (msg3.value, msg3.key, 167))
예제 #23
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in blocking on the full commit of
    the record, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Keyword Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, rather than immediately sending out a
            record the producer will wait for up to the given delay to allow
            other records to be sent so that the sends can be batched together.
            This setting defaults to 0 (i.e. no delay). Setting linger_ms=5
            would have the effect of reducing the number of requests sent but
            would add up to 5ms of latency to records sent in the absense of
            load. Default: 0.
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest, maximum waiting time can
            be up to 2 * request_timeout_ms.
            Default: 30000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto

    Note:
        Many configuration parameters are taken from Java Client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    def __init__(self, *, loop, bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000, request_timeout_ms=40000,
                 api_version='auto', acks=1,
                 key_serializer=None, value_serializer=None,
                 compression_type=None, max_batch_size=16384,
                 partitioner=DefaultPartitioner(), max_request_size=1048576,
                 linger_ms=0, send_backoff_ms=100,
                 retry_backoff_ms=100):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if api_version not in ('auto', '0.9', '0.8.2', '0.8.1', '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._api_version = api_version
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(
            loop=loop, bootstrap_servers=bootstrap_servers,
            client_id=client_id, metadata_max_age_ms=metadata_max_age_ms,
            request_timeout_ms=request_timeout_ms)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, self._compression_type,
            self._request_timeout_ms/1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        # Check Broker Version if not set explicitly
        if self._api_version == 'auto':
            self._api_version = yield from self.client.check_version()

        # Convert api_version config to tuple for easy comparisons
        self._api_version = tuple(
            map(int, self._api_version.split('.')))

        if self._compression_type == 'lz4':
            assert self._api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(
            self._sender_routine(), loop=self._loop)
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluser"""
        if self._closed:
            return

        # Wait untill all batches are Delivered and futures resolved
        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        self._closed = True
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self._wait_on_metadata(topic))

    @asyncio.coroutine
    def _wait_on_metadata(self, topic):
        """
        Wait for cluster metadata including partitions for the given topic to
        be available.

        Arguments:
            topic (str): topic we want metadata for

        Returns:
            set: partition ids for the topic

        Raises:
            UnknownTopicOrPartitionError: if no topic or partitions found
                in cluster metadata
        """
        if topic in self.client.cluster.topics():
            return self._metadata.partitions_for_topic(topic)

        # add topic to metadata topic list if it is not there already.
        self.client.add_topic(topic)
        yield from self.client.force_metadata_update()
        if topic not in self.client.cluster.topics():
            raise UnknownTopicOrPartitionError()

        return self._metadata.partitions_for_topic(topic)

    @asyncio.coroutine
    def send(self, topic, value=None, key=None, partition=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.

        Returns:
            asyncio.Future: future object that will be set when message is
                            processed

        Note: The returned future will wait based on `request_timeout_ms`
            setting. Cancelling this future will not stop event from being
            sent.
        """
        assert value is not None or self._api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value,
                                    key_bytes, value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp, key_bytes, value_bytes, self._request_timeout_ms / 1000)
        return fut

    @asyncio.coroutine
    def _sender_routine(self):
        """backgroud task that sends message batches to Kafka brokers"""
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(
                        self._send_produce_req(node_id, batches),
                        loop=self._loop)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    self.client.force_metadata_update()
                    # Just to have at least 1 future in wait() call
                    fut = asyncio.sleep(self._retry_backoff, loop=self._loop)
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)
                tasks -= done

        except asyncio.CancelledError:
            pass
        except Exception:  # noqa
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        self._in_flight.add(node_id)
        t0 = self._loop.time()
        while True:
            topics = collections.defaultdict(list)
            for tp, batch in batches.items():
                topics[tp.topic].append((tp.partition, batch.data()))

            request = ProduceRequest(
                required_acks=self._acks,
                timeout=self._request_timeout_ms,
                topics=list(topics.items()))

            try:
                response = yield from self.client.send(node_id, request)
            except KafkaError as err:
                for batch in batches.values():
                    if not err.retriable or batch.expired():
                        batch.done(exception=err)
                log.warning(
                    "Got error produce response: %s", err)
                if not err.retriable:
                    break
            else:
                if response is None:
                    # noacks, just "done" batches
                    for batch in batches.values():
                        batch.done()
                    break

                for topic, partitions in response.topics:
                    for partition, error_code, offset in partitions:
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset)
                        elif not getattr(error, 'retriable', False) or \
                                batch.expired():
                            batch.done(exception=error())
                        else:
                            # Ok, we can retry this batch
                            batches[tp] = batch
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)

            if batches:
                yield from asyncio.sleep(
                    self._retry_backoff, loop=self._loop)
            else:
                break

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = MessageSet.HEADER_SIZE + Message.HEADER_SIZE
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value,
                   serialized_key, serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(
            serialized_key, all_partitions, available)
예제 #24
0
    def test_load_metadata(self):
        brokers = [
            (0, 'broker_1', 4567),
            (1, 'broker_2', 5678)
        ]

        topics = [
            (NO_ERROR, 'topic_1', [
                (NO_ERROR, 0, 1, [1, 2], [1, 2])
            ]),
            (NO_ERROR, 'topic_2', [
                (NO_LEADER, 0, -1, [], []),
                (NO_LEADER, 1, 1, [], []),
            ]),
            (NO_LEADER, 'topic_no_partitions', []),
            (UNKNOWN_TOPIC_OR_PARTITION, 'topic_unknown', []),
            (NO_ERROR, 'topic_3', [
                (NO_ERROR, 0, 0, [0, 1], [0, 1]),
                (NO_ERROR, 1, 1, [1, 0], [1, 0]),
                (NO_ERROR, 2, 0, [0, 1], [0, 1])
            ]),
            (NO_ERROR, 'topic_4', [
                (NO_ERROR, 0, 0, [0, 1], [0, 1]),
                (REPLICA_NOT_AVAILABLE, 1, 1, [1, 0], [1, 0]),
            ])
        ]

        @asyncio.coroutine
        def send(request_id):
            return MetadataResponse(brokers, topics)

        mocked_conns = {0: mock.MagicMock()}
        mocked_conns[0].send.side_effect = send
        client = AIOKafkaClient(loop=self.loop,
                                bootstrap_servers=['broker_1:4567'])
        task = asyncio.async(client._md_synchronizer(), loop=self.loop)
        client._conns = mocked_conns
        client.cluster.update_metadata(MetadataResponse(brokers[:1], []))

        self.loop.run_until_complete(client.force_metadata_update())
        task.cancel()

        md = client.cluster
        c_brokers = md.brokers()
        self.assertEqual(len(c_brokers), 2)
        self.assertEqual(sorted(brokers), sorted(list(c_brokers)))
        c_topics = md.topics()
        self.assertEqual(len(c_topics), 4)
        self.assertEqual(md.partitions_for_topic('topic_1'), set([0]))
        self.assertEqual(md.partitions_for_topic('topic_2'), set([0, 1]))
        self.assertEqual(md.partitions_for_topic('topic_3'), set([0, 1, 2]))
        self.assertEqual(md.partitions_for_topic('topic_4'), set([0, 1]))
        self.assertEqual(
            md.available_partitions_for_topic('topic_2'), set([1]))

        mocked_conns[0].connected.return_value = False
        is_ready = self.loop.run_until_complete(client.ready(0))
        self.assertEqual(is_ready, False)
        is_ready = self.loop.run_until_complete(client.ready(1))
        self.assertEqual(is_ready, False)
        self.assertEqual(mocked_conns, {})

        with self.assertRaises(NodeNotReadyError):
            self.loop.run_until_complete(client.send(0, None))
예제 #25
0
class AIOKafkaProducer(object):
    """A Kafka client that publishes records to the Kafka cluster.

    The producer consists of a pool of buffer space that holds records that
    haven't yet been transmitted to the server as well as a background task
    that is responsible for turning these records into requests and
    transmitting them to the cluster.

    The send() method is asynchronous. When called it adds the record to a
    buffer of pending record sends and immediately returns. This allows the
    producer to batch together individual records for efficiency.

    The 'acks' config controls the criteria under which requests are considered
    complete. The "all" setting will result in waiting for all replicas to
    respond, the slowest but most durable setting.

    The key_serializer and value_serializer instruct how to turn the key and
    value objects the user provides into bytes.

    Arguments:
        bootstrap_servers: 'host[:port]' string (or list of 'host[:port]'
            strings) that the producer should contact to bootstrap initial
            cluster metadata. This does not have to be the full node list.
            It just needs to have at least one broker that will respond to a
            Metadata API Request. Default port is 9092. If no servers are
            specified, will default to localhost:9092.
        client_id (str): a name for this client. This string is passed in
            each request to servers and can be used to identify specific
            server-side log entries that correspond to this client.
            Default: 'aiokafka-producer-#' (appended with a unique number
            per instance)
        key_serializer (callable): used to convert user-supplied keys to bytes
            If not None, called as f(key), should return bytes. Default: None.
        value_serializer (callable): used to convert user-supplied message
            values to bytes. If not None, called as f(value), should return
            bytes. Default: None.
        acks (0, 1, 'all'): The number of acknowledgments the producer requires
            the leader to have received before considering a request complete.
            This controls the durability of records that are sent. The
            following settings are common:

            0: Producer will not wait for any acknowledgment from the server
                at all. The message will immediately be added to the socket
                buffer and considered sent. No guarantee can be made that the
                server has received the record in this case, and the retries
                configuration will not take effect (as the client won't
                generally know of any failures). The offset given back for each
                record will always be set to -1.
            1: The broker leader will write the record to its local log but
                will respond without awaiting full acknowledgement from all
                followers. In this case should the leader fail immediately
                after acknowledging the record but before the followers have
                replicated it then the record will be lost.
            all: The broker leader will wait for the full set of in-sync
                replicas to acknowledge the record. This guarantees that the
                record will not be lost as long as at least one in-sync replica
                remains alive. This is the strongest available guarantee.

            If unset, defaults to acks=1.
        compression_type (str): The compression type for all data generated by
            the producer. Valid values are 'gzip', 'snappy', 'lz4', or None.
            Compression is of full batches of data, so the efficacy of batching
            will also impact the compression ratio (more batching means better
            compression). Default: None.
        max_batch_size (int): Maximum size of buffered data per partition.
            After this amount `send` coroutine will block until batch is
            drained.
            Default: 16384
        linger_ms (int): The producer groups together any records that arrive
            in between request transmissions into a single batched request.
            Normally this occurs only under load when records arrive faster
            than they can be sent out. However in some circumstances the client
            may want to reduce the number of requests even under moderate load.
            This setting accomplishes this by adding a small amount of
            artificial delay; that is, if first request is processed faster,
            than `linger_ms`, producer will wait `linger_ms - process_time`.
            This setting defaults to 0 (i.e. no delay).
        partitioner (callable): Callable used to determine which partition
            each message is assigned to. Called (after key serialization):
            partitioner(key_bytes, all_partitions, available_partitions).
            The default partitioner implementation hashes each non-None key
            using the same murmur2 algorithm as the Java client so that
            messages with the same key are assigned to the same partition.
            When a key is None, the message is delivered to a random partition
            (filtered to partitions with available leaders only, if possible).
        max_request_size (int): The maximum size of a request. This is also
            effectively a cap on the maximum record size. Note that the server
            has its own cap on record size which may be different from this.
            This setting will limit the number of record batches the producer
            will send in a single request to avoid sending huge requests.
            Default: 1048576.
        metadata_max_age_ms (int): The period of time in milliseconds after
            which we force a refresh of metadata even if we haven't seen any
            partition leadership changes to proactively discover any new
            brokers or partitions. Default: 300000
        request_timeout_ms (int): Produce request timeout in milliseconds.
            As it's sent as part of ProduceRequest (it's a blocking call),
            maximum waiting time can be up to 2 * request_timeout_ms.
            Default: 40000.
        retry_backoff_ms (int): Milliseconds to backoff when retrying on
            errors. Default: 100.
        api_version (str): specify which kafka API version to use.
            If set to 'auto', will attempt to infer the broker version by
            probing various APIs. Default: auto
        security_protocol (str): Protocol used to communicate with brokers.
            Valid values are: PLAINTEXT, SSL. Default: PLAINTEXT.
        ssl_context (ssl.SSLContext): pre-configured SSLContext for wrapping
            socket connections. Directly passed into asyncio's
            `create_connection`_. For more information see :ref:`ssl_auth`.
            Default: None.

    Note:
        Many configuration parameters are taken from the Java client:
        https://kafka.apache.org/documentation.html#producerconfigs
    """
    _PRODUCER_CLIENT_ID_SEQUENCE = 0

    _COMPRESSORS = {
        'gzip': (has_gzip, LegacyRecordBatchBuilder.CODEC_GZIP),
        'snappy': (has_snappy, LegacyRecordBatchBuilder.CODEC_SNAPPY),
        'lz4': (has_lz4, LegacyRecordBatchBuilder.CODEC_LZ4),
    }

    def __init__(self,
                 *,
                 loop,
                 bootstrap_servers='localhost',
                 client_id=None,
                 metadata_max_age_ms=300000,
                 request_timeout_ms=40000,
                 api_version='auto',
                 acks=1,
                 key_serializer=None,
                 value_serializer=None,
                 compression_type=None,
                 max_batch_size=16384,
                 partitioner=DefaultPartitioner(),
                 max_request_size=1048576,
                 linger_ms=0,
                 send_backoff_ms=100,
                 retry_backoff_ms=100,
                 security_protocol="PLAINTEXT",
                 ssl_context=None):
        if acks not in (0, 1, -1, 'all'):
            raise ValueError("Invalid ACKS parameter")
        if compression_type not in ('gzip', 'snappy', 'lz4', None):
            raise ValueError("Invalid compression type!")
        if compression_type:
            checker, compression_attrs = self._COMPRESSORS[compression_type]
            if not checker():
                raise RuntimeError(
                    "Compression library for {} not found".format(
                        compression_type))
        else:
            compression_attrs = 0

        if api_version not in ('auto', '0.10', '0.9', '0.8.2', '0.8.1',
                               '0.8.0'):
            raise ValueError("Unsupported Kafka version")

        self._PRODUCER_CLIENT_ID_SEQUENCE += 1
        if client_id is None:
            client_id = 'aiokafka-producer-%s' % \
                self._PRODUCER_CLIENT_ID_SEQUENCE

        if acks == 'all':
            acks = -1
        self._acks = acks
        self._key_serializer = key_serializer
        self._value_serializer = value_serializer
        self._compression_type = compression_type
        self._partitioner = partitioner
        self._max_request_size = max_request_size
        self._request_timeout_ms = request_timeout_ms

        self.client = AIOKafkaClient(loop=loop,
                                     bootstrap_servers=bootstrap_servers,
                                     client_id=client_id,
                                     metadata_max_age_ms=metadata_max_age_ms,
                                     request_timeout_ms=request_timeout_ms,
                                     retry_backoff_ms=retry_backoff_ms,
                                     api_version=api_version,
                                     security_protocol=security_protocol,
                                     ssl_context=ssl_context)
        self._metadata = self.client.cluster
        self._message_accumulator = MessageAccumulator(
            self._metadata, max_batch_size, compression_attrs,
            self._request_timeout_ms / 1000, loop)
        self._sender_task = None
        self._in_flight = set()
        self._closed = False
        self._loop = loop
        self._retry_backoff = retry_backoff_ms / 1000
        self._linger_time = linger_ms / 1000
        self._producer_magic = 0

    @asyncio.coroutine
    def start(self):
        """Connect to Kafka cluster and check server version"""
        log.debug("Starting the Kafka producer")  # trace
        yield from self.client.bootstrap()

        if self._compression_type == 'lz4':
            assert self.client.api_version >= (0, 8, 2), \
                'LZ4 Requires >= Kafka 0.8.2 Brokers'

        self._sender_task = ensure_future(self._sender_routine(),
                                          loop=self._loop)
        self._message_accumulator.set_api_version(self.client.api_version)
        self._producer_magic = 0 if self.client.api_version < (0, 10) else 1
        log.debug("Kafka producer started")

    @asyncio.coroutine
    def flush(self):
        """Wait untill all batches are Delivered and futures resolved"""
        yield from self._message_accumulator.flush()

    @asyncio.coroutine
    def stop(self):
        """Flush all pending data and close all connections to kafka cluster"""
        if self._closed:
            return
        self._closed = True

        yield from self._message_accumulator.close()

        if self._sender_task:
            self._sender_task.cancel()
            yield from self._sender_task

        yield from self.client.close()
        log.debug("The Kafka producer has closed.")

    @asyncio.coroutine
    def partitions_for(self, topic):
        """Returns set of all known partitions for the topic."""
        return (yield from self.client._wait_on_metadata(topic))

    @asyncio.coroutine
    def send(self,
             topic,
             value=None,
             key=None,
             partition=None,
             timestamp_ms=None):
        """Publish a message to a topic.

        Arguments:
            topic (str): topic where the message will be published
            value (optional): message value. Must be type bytes, or be
                serializable to bytes via configured value_serializer. If value
                is None, key is required and message acts as a 'delete'.
                See kafka compaction documentation for more details:
                http://kafka.apache.org/documentation.html#compaction
                (compaction requires kafka >= 0.8.1)
            partition (int, optional): optionally specify a partition. If not
                set, the partition will be selected using the configured
                'partitioner'.
            key (optional): a key to associate with the message. Can be used to
                determine which partition to send the message to. If partition
                is None (and producer's partitioner config is left as default),
                then messages with the same key will be delivered to the same
                partition (but if key is None, partition is chosen randomly).
                Must be type bytes, or be serializable to bytes via configured
                key_serializer.
            timestamp_ms (int, optional): epoch milliseconds (from Jan 1 1970
                UTC) to use as the message timestamp. Defaults to current time.

        Returns:
            asyncio.Future: object that will be set when message is
            processed

        Raises:
            kafka.KafkaTimeoutError: if we can't schedule this record (
                pending buffer is full) in up to `request_timeout_ms`
                milliseconds.

        Note:
            The returned future will wait based on `request_timeout_ms`
            setting. Cancelling the returned future **will not** stop event
            from being sent, but cancelling the ``send`` coroutine itself
            **will**.
        """
        assert value is not None or self.client.api_version >= (0, 8, 1), (
            'Null messages require kafka >= 0.8.1')
        assert not (value is None and key is None), \
            'Need at least one: key or value'

        # first make sure the metadata for the topic is available
        yield from self.client._wait_on_metadata(topic)

        key_bytes, value_bytes = self._serialize(topic, key, value)
        partition = self._partition(topic, partition, key, value, key_bytes,
                                    value_bytes)

        tp = TopicPartition(topic, partition)
        log.debug("Sending (key=%s value=%s) to %s", key, value, tp)

        fut = yield from self._message_accumulator.add_message(
            tp,
            key_bytes,
            value_bytes,
            self._request_timeout_ms / 1000,
            timestamp_ms=timestamp_ms)
        return fut

    @asyncio.coroutine
    def send_and_wait(self,
                      topic,
                      value=None,
                      key=None,
                      partition=None,
                      timestamp_ms=None):
        """Publish a message to a topic and wait the result"""
        future = yield from self.send(topic, value, key, partition,
                                      timestamp_ms)
        return (yield from future)

    @asyncio.coroutine
    def _sender_routine(self):
        """ Background task, that sends pending batches to leader nodes for
        batch's partition. This incapsulates same logic as Java's `Sender`
        background thread. Because we use asyncio this is more event based
        loop, rather than counting timeout till next possible even like in
        Java.

            The procedure:
            * Group pending batches by partition leaders (write nodes)
            * Ignore not ready (disconnected) and nodes, that already have a
              pending request.
            * If we have unknown leaders for partitions, we request a metadata
              update.
            * Wait for any event, that can change the above procedure, like
              new metadata or pending send is finished and a new one can be
              done.
        """
        tasks = set()
        try:
            while True:
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = ensure_future(self._send_produce_req(
                        node_id, batches),
                                         loop=self._loop)
                    self._in_flight.add(node_id)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    fut = self.client.force_metadata_update()
                    waiters = tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters = tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                # * Metadata update if partition leader unknown
                done, _ = yield from asyncio.wait(
                    waiters,
                    return_when=asyncio.FIRST_COMPLETED,
                    loop=self._loop)

                # done tasks should never produce errors, if they are it's a
                # bug
                for task in done:
                    task.result()

                tasks -= done

        except asyncio.CancelledError:
            # done tasks should never produce errors, if they are it's a bug
            for task in tasks:
                yield from task
        except Exception:  # pragma: no cover
            log.error("Unexpected error in sender routine", exc_info=True)

    @asyncio.coroutine
    def _send_produce_req(self, node_id, batches):
        """ Create produce request to node
        If producer configured with `retries`>0 and produce response contain
        "failed" partitions produce request for this partition will try
        resend to broker `retries` times with `retry_timeout_ms` timeouts.

        Arguments:
            node_id (int): kafka broker identifier
            batches (dict): dictionary of {TopicPartition: MessageBatch}
        """
        t0 = self._loop.time()

        topics = collections.defaultdict(list)
        for tp, batch in batches.items():
            topics[tp.topic].append((tp.partition, batch.get_data_buffer()))

        if self.client.api_version >= (0, 10):
            version = 2
        elif self.client.api_version == (0, 9):
            version = 1
        else:
            version = 0

        request = ProduceRequest[version](required_acks=self._acks,
                                          timeout=self._request_timeout_ms,
                                          topics=list(topics.items()))

        reenqueue = []
        try:
            response = yield from self.client.send(node_id, request)
        except KafkaError as err:
            log.warning("Got error produce response: %s", err)
            if getattr(err, "invalid_metadata", False):
                self.client.force_metadata_update()

            for batch in batches.values():
                if not self._can_retry(err, batch):
                    batch.failure(exception=err)
                else:
                    reenqueue.append(batch)
        else:
            # noacks, just mark batches as "done"
            if request.required_acks == 0:
                for batch in batches.values():
                    batch.done_noack()
            else:
                for topic, partitions in response.topics:
                    for partition_info in partitions:
                        if response.API_VERSION < 2:
                            partition, error_code, offset = partition_info
                            # Mimic CREATE_TIME to take user provided timestamp
                            timestamp = -1
                        else:
                            partition, error_code, offset, timestamp = \
                                partition_info
                        tp = TopicPartition(topic, partition)
                        error = Errors.for_code(error_code)
                        batch = batches.pop(tp, None)
                        if batch is None:
                            continue

                        if error is Errors.NoError:
                            batch.done(offset, timestamp)
                        elif not self._can_retry(error(), batch):
                            batch.failure(exception=error())
                        else:
                            log.warning(
                                "Got error produce response on topic-partition"
                                " %s, retrying. Error: %s", tp, error)
                            # Ok, we can retry this batch
                            if getattr(error, "invalid_metadata", False):
                                self.client.force_metadata_update()
                            reenqueue.append(batch)

        if reenqueue:
            # Wait backoff before reequeue
            yield from asyncio.sleep(self._retry_backoff, loop=self._loop)

            for batch in reenqueue:
                self._message_accumulator.reenqueue(batch)
            # If some error started metadata refresh we have to wait before
            # trying again
            yield from self.client._maybe_wait_metadata()

        # if batches for node is processed in less than a linger seconds
        # then waiting for the remaining time
        sleep_time = self._linger_time - (self._loop.time() - t0)
        if sleep_time > 0:
            yield from asyncio.sleep(sleep_time, loop=self._loop)

        self._in_flight.remove(node_id)

    def _can_retry(self, error, batch):
        if batch.expired():
            return False
        # XXX: remove unknown topic check as we fix
        #      https://github.com/dpkp/kafka-python/issues/1155
        if error.retriable or isinstance(error, UnknownTopicOrPartitionError)\
                or error is UnknownTopicOrPartitionError:
            return True
        return False

    def _serialize(self, topic, key, value):
        if self._key_serializer:
            serialized_key = self._key_serializer(key)
        else:
            serialized_key = key
        if self._value_serializer:
            serialized_value = self._value_serializer(value)
        else:
            serialized_value = value

        message_size = LegacyRecordBatchBuilder.record_overhead(
            self._producer_magic)
        if serialized_key is not None:
            message_size += len(serialized_key)
        if serialized_value is not None:
            message_size += len(serialized_value)
        if message_size > self._max_request_size:
            raise MessageSizeTooLargeError(
                "The message is %d bytes when serialized which is larger than"
                " the maximum request size you have configured with the"
                " max_request_size configuration" % message_size)

        return serialized_key, serialized_value

    def _partition(self, topic, partition, key, value, serialized_key,
                   serialized_value):
        if partition is not None:
            assert partition >= 0
            assert partition in self._metadata.partitions_for_topic(topic), \
                'Unrecognized partition'
            return partition

        all_partitions = list(self._metadata.partitions_for_topic(topic))
        available = list(self._metadata.available_partitions_for_topic(topic))
        return self._partitioner(serialized_key, all_partitions, available)

    def create_batch(self):
        """Create and return an empty BatchBuilder.

        The batch is not queued for send until submission to ``send_batch``.

        Returns:
            BatchBuilder: empty batch to be filled and submitted by the caller.
        """
        return self._message_accumulator.create_builder()

    @asyncio.coroutine
    def send_batch(self, batch, topic, *, partition):
        """Submit a BatchBuilder for publication.

        Arguments:
            batch (BatchBuilder): batch object to be published.
            topic (str): topic where the batch will be published.
            partition (int): partition where this batch will be published.

        Returns:
            asyncio.Future: object that will be set when the batch is
                delivered.
        """
        partition = self._partition(topic, partition, None, None, None, None)
        tp = TopicPartition(topic, partition)
        log.debug("Sending batch to %s", tp)
        future = yield from self._message_accumulator.add_batch(
            batch, tp, self._request_timeout_ms / 1000)
        return future
예제 #26
0
    def test_proc_fetch_request(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=[])
        subscriptions = SubscriptionState('latest')
        fetcher = Fetcher(client, subscriptions, loop=self.loop)

        tp = TopicPartition('test', 0)
        tp_info = (tp.topic, [(tp.partition, 155, 100000)])
        req = FetchRequest(
            -1,  # replica_id
            100,
            100,
            [tp_info])

        client.ready = mock.MagicMock()
        client.ready.side_effect = asyncio.coroutine(lambda a: True)
        client.force_metadata_update = mock.MagicMock()
        client.force_metadata_update.side_effect = asyncio.coroutine(
            lambda: False)
        client.send = mock.MagicMock()
        msg = Message(b"test msg")
        msg._encode_self()
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 0, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        state = TopicPartitionState()
        state.seek(0)
        subscriptions.assignment[tp] = state
        subscriptions.needs_partition_assignment = False
        fetcher._in_flight.add(0)
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone(), None)  # invalid offset, msg is ignored

        state.seek(4)
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        buf = fetcher._records[tp]
        self.assertEqual(buf.getone().value, b"test msg")

        # error -> no partition found
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 3, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> topic auth failed
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 29, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(TopicAuthorizationFailedError):
            yield from fetcher.next_record([])

        # error -> unknown
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, -1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)

        # error -> offset out of range
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, False)
        self.assertEqual(state.is_fetchable(), False)

        state.seek(4)
        subscriptions._default_offset_reset_strategy = OffsetResetStrategy.NONE
        client.send.side_effect = asyncio.coroutine(lambda n, r: FetchResponse(
            [('test', [(0, 1, 9, [(4, 10, msg)])])]))
        fetcher._in_flight.add(0)
        fetcher._records.clear()
        needs_wake_up = yield from fetcher._proc_fetch_request(0, req)
        self.assertEqual(needs_wake_up, True)
        with self.assertRaises(OffsetOutOfRangeError):
            yield from fetcher.next_record([])

        yield from fetcher.close()