示例#1
0
 async def test_send__KafkaError(self, producer, _producer):
     _producer.send.coro.side_effect = KafkaError()
     with pytest.raises(ProducerSendError):
         await producer.send(
             'topic', 'k', 'v', 3, None, None,
             transactional_id='tid',
         )
示例#2
0
 def fetch_all_metadata(self):
     cluster_md = ClusterMetadata(
         metadata_max_age_ms=self._metadata_max_age_ms)
     updated = yield from self._metadata_update(cluster_md, [])
     if not updated:
         raise KafkaError(
             'Unable to get cluster metadata over all known brokers')
     return cluster_md
示例#3
0
    def test_failed_sync_group(self):
        client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts)
        subscription = SubscriptionState('latest')
        subscription.subscribe(topics=('topic1', ))
        coordinator = GroupCoordinator(client,
                                       subscription,
                                       loop=self.loop,
                                       heartbeat_interval_ms=20000)

        @asyncio.coroutine
        def do_sync_group():
            rebalance = CoordinatorGroupRebalance(
                coordinator,
                coordinator.group_id,
                coordinator.coordinator_id,
                subscription.subscription,
                coordinator._assignors,
                coordinator._session_timeout_ms,
                coordinator._retry_backoff_ms,
                loop=self.loop)
            yield from rebalance._on_join_follower()

        with self.assertRaises(GroupCoordinatorNotAvailableError):
            yield from do_sync_group()

        mocked = mock.MagicMock()
        coordinator._client = mocked

        coordinator.member_id = 'some_invalid_member_id'
        coordinator.coordinator_unknown = asyncio.coroutine(lambda: False)
        mocked.send.side_effect = Errors.UnknownMemberIdError()
        with self.assertRaises(Errors.UnknownMemberIdError):
            yield from do_sync_group()
        self.assertEqual(coordinator.member_id,
                         JoinGroupRequest.UNKNOWN_MEMBER_ID)

        mocked.send.side_effect = Errors.NotCoordinatorForGroupError()
        coordinator.coordinator_id = 'some_id'
        with self.assertRaises(Errors.NotCoordinatorForGroupError):
            yield from do_sync_group()
        self.assertEqual(coordinator.coordinator_id, None)

        mocked.send.side_effect = KafkaError()
        with self.assertRaises(KafkaError):
            yield from do_sync_group()

        # client sends LeaveGroupRequest to group coordinator
        # if generation > 0 (means that client is a member of group)
        # expecting no exception in this case (error should be ignored in close
        # method)
        coordinator.generation = 33
        yield from coordinator.close()
示例#4
0
    async def _find_coordinator(self, coordinator_type, coordinator_key):
        assert self._txn_manager is not None
        if coordinator_type in self._coordinators:
            return self._coordinators[coordinator_type]
        while True:
            try:
                coordinator_id = await self.client.coordinator_lookup(
                    coordinator_type, coordinator_key)
            except Errors.TransactionalIdAuthorizationFailed:
                err = Errors.TransactionalIdAuthorizationFailed(
                    self._txn_manager.transactional_id)
                raise err
            except Errors.GroupAuthorizationFailedError:
                err = Errors.GroupAuthorizationFailedError(coordinator_key)
                raise err
            except Errors.CoordinatorNotAvailableError:
                await self.client.force_metadata_update()
                await asyncio.sleep(self._retry_backoff)
                continue
            except Errors.KafkaError as err:
                log.error("FindCoordinator Request failed: %s", err)
                raise KafkaError(repr(err))

            # Try to connect to confirm that the connection can be
            # established.
            ready = await self.client.ready(
                coordinator_id, group=ConnectionGroup.COORDINATION)
            if not ready:
                await asyncio.sleep(self._retry_backoff)
                continue

            self._coordinators[coordinator_type] = coordinator_id

            if coordinator_type == CoordinationType.GROUP:
                log.info(
                    "Discovered coordinator %s for group id %s",
                    coordinator_id,
                    coordinator_key
                )
            else:
                log.info(
                    "Discovered coordinator %s for transactional id %s",
                    coordinator_id,
                    coordinator_key
                )
            return coordinator_id
示例#5
0
    async def _sender_routine(self):
        """ Background task, that sends pending batches to leader nodes for
        batch's partition. This incapsulates same logic as Java's `Sender`
        background thread. Because we use asyncio this is more event based
        loop, rather than counting timeout till next possible even like in
        Java.
        """

        tasks = set()
        txn_task = None  # Track a single task for transaction interactions
        try:
            while True:
                # If indempotence or transactions are turned on we need to
                # have a valid PID to send any request below
                await self._maybe_wait_for_pid()

                waiters = set()
                # As transaction coordination is done via a single, separate
                # socket we do not need to pump it to several nodes, as we do
                # with produce requests.
                # We will only have 1 task at a time and will try to spawn
                # another once that is done.
                txn_manager = self._txn_manager
                muted_partitions = self._muted_partitions
                if txn_manager is not None and \
                        txn_manager.transactional_id is not None:
                    if txn_task is None or txn_task.done():
                        txn_task = self._maybe_do_transactional_request()
                        if txn_task is not None:
                            tasks.add(txn_task)
                        else:
                            # Waiters will not be awaited on exit, tasks will
                            waiters.add(txn_manager.make_task_waiter())
                    # We can't have a race condition between
                    # AddPartitionsToTxnRequest and a ProduceRequest, so we
                    # mute the partition until added.
                    muted_partitions = (muted_partitions
                                        | txn_manager.partitions_to_add())
                batches, unknown_leaders_exist = \
                    self._message_accumulator.drain_by_nodes(
                        ignore_nodes=self._in_flight,
                        muted_partitions=muted_partitions)

                # create produce task for every batch
                for node_id, batches in batches.items():
                    task = create_task(self._send_produce_req(
                        node_id, batches))
                    self._in_flight.add(node_id)
                    for tp in batches:
                        self._muted_partitions.add(tp)
                    tasks.add(task)

                if unknown_leaders_exist:
                    # we have at least one unknown partition's leader,
                    # try to update cluster metadata and wait backoff time
                    fut = self.client.force_metadata_update()
                    waiters |= tasks.union([fut])
                else:
                    fut = self._message_accumulator.data_waiter()
                    waiters |= tasks.union([fut])

                # wait when:
                # * At least one of produce task is finished
                # * Data for new partition arrived
                # * Metadata update if partition leader unknown
                done, _ = await asyncio.wait(
                    waiters, return_when=asyncio.FIRST_COMPLETED)

                # done tasks should never produce errors, if they are it's a
                # bug
                for task in done:
                    task.result()

                tasks -= done

        except asyncio.CancelledError:
            # done tasks should never produce errors, if they are it's a bug
            for task in tasks:
                await task
        except (ProducerFenced, OutOfOrderSequenceNumber,
                TransactionalIdAuthorizationFailed):
            raise
        except Exception:  # pragma: no cover
            log.error("Unexpected error in sender routine", exc_info=True)
            raise KafkaError("Unexpected error during batch delivery")