def metadata_updated():
     client = PythonLibrdkafka(self.redpanda)
     brokers = client.brokers()
     self.redpanda.logger.debug(f"brokers metadata: {brokers}")
     ports = [b.port for _, b in brokers.items()]
     ports.sort()
     return ports == [10091, 10092, 10093]
Example #2
0
 def registered(self, node):
     idx = self.idx(node)
     self.logger.debug(
         f"Checking if broker {idx} ({node.name} is registered")
     client = PythonLibrdkafka(self)
     brokers = client.brokers()
     broker = brokers.get(idx, None)
     self.logger.debug(f"Found broker info: {broker}")
     return broker is not None
 def metadata_updated():
     client = PythonLibrdkafka(self.redpanda)
     brokers = client.brokers()
     self.logger.debug(f"brokers metadata: {brokers}")
     if brokers[1].port != 10091:
         return False
     if brokers[2].port != 10092:
         return False
     if brokers[3].port != 10093:
         return False
     return True
Example #4
0
    def registered(self, node):
        """
        Check if a newly added node is fully registered with the cluster, such
        that a kafka metadata request to any node in the cluster will include it.

        We first check the admin API to do a kafka-independent check, and then verify
        that kafka clients see the same thing.
        """
        idx = self.idx(node)
        self.logger.debug(
            f"registered: checking if broker {idx} ({node.name} is registered..."
        )

        # Query all nodes' admin APIs, so that we don't advance during setup until
        # the node is stored in raft0 AND has been replayed on all nodes.  Otherwise
        # a kafka metadata request to the last node to join could return incomplete
        # metadata and cause strange issues within a test.
        admin = Admin(self)
        for peer in self._started:
            try:
                admin_brokers = admin.get_brokers(node=peer)
            except requests.exceptions.RequestException as e:
                # We run during startup, when admin API may not even be listening yet: tolerate
                # API errors but presume that if some APIs are not up yet, then node registration
                # is also not complete.
                self.logger.debug(
                    f"registered: peer {peer.name} admin API not yet available ({e})"
                )
                return False

            found = idx in [b['node_id'] for b in admin_brokers]
            if not found:
                self.logger.info(
                    f"registered: node {node.name} not yet found in peer {peer.name}'s broker list ({admin_brokers})"
                )
                return False
            else:
                self.logger.debug(
                    f"registered: node {node.name} now visible in peer {peer.name}'s broker list ({admin_brokers})"
                )

        client = PythonLibrdkafka(self)
        brokers = client.brokers()
        broker = brokers.get(idx, None)
        if broker is None:
            # This should never happen, because we already checked via the admin API
            # that the node of interest had become visible to all peers.
            self.logger.error(
                f"registered: node {node.name} not found in kafka metadata!")
            assert broker is not None

        self.logger.debug(f"registered: found broker info: {broker}")
        return True
Example #5
0
 def brokers(self):
     """
     Note for implementers: this method is expected to return the set of
     brokers reported by the cluster, rather than the nodes allocated for use
     in the self._redpanda service.
     """
     client = PythonLibrdkafka(self._redpanda)
     brokers = client.brokers()
     return {
         b.id: BrokerDescription(id=b.id, host=b.host, port=b.port)
         for b in brokers.values()
     }
Example #6
0
    def test_enable_sasl_live(self):
        """
        Verify that when enable_sasl is set to true at runtime, subsequent
        unauthenticated kafka clients are rejected.
        """

        unauthenticated_client = PythonLibrdkafka(self.redpanda)
        topic = TopicSpec(replication_factor=1)
        unauthenticated_client.create_topic(topic)
        assert len(unauthenticated_client.topics()) == 1

        # Switch on authentication
        admin = Admin(self.redpanda)
        admin.patch_cluster_config(upsert={'enable_sasl': True})

        # An unauthenticated client should be rejected
        try:
            unauthenticated_client.topics()
        except Exception as e:
            self.logger.exception(f"Unauthenticated: {e}")
        else:
            self.logger.error(
                "Unauthenticated client should have been rejected")
            assert False

        # Switch off authentication
        admin.patch_cluster_config(upsert={'enable_sasl': False})

        # An unauthenticated client should be accepted again
        assert len(unauthenticated_client.topics()) == 1
    def test_custom_assignment_validation(self):
        client = PythonLibrdkafka(self.redpanda).get_client()

        def expect_failed_create_topic(name, custom_assignment,
                                       expected_error):
            topics = [
                NewTopic(name,
                         num_partitions=len(custom_assignment),
                         replica_assignment=custom_assignment)
            ]
            res = client.create_topics(topics, request_timeout=10)
            assert len(res) == 1
            fut = res[name]
            try:
                fut.result()
                assert False
            except KafkaException as e:
                kafka_error = e.args[0]
                self.redpanda.logger.debug(
                    f"topic {name} creation failed: {kafka_error}, expected error: {expected_error}"
                )
                assert kafka_error.code() == expected_error

        # not unique replicas
        expect_failed_create_topic("invalid-1", [[1, 1, 2]],
                                   KafkaError.INVALID_REQUEST)
        # not existing broker
        expect_failed_create_topic("invalid-1", [[1, 10, 2]],
                                   KafkaError.BROKER_NOT_AVAILABLE)

        # different replication factors
        expect_failed_create_topic("invalid-1", [[1, 2, 3], [4]],
                                   KafkaError.INVALID_REPLICATION_FACTOR)
Example #8
0
 def make_superuser_client(self, password_override=None):
     username, password, algorithm = self.redpanda.SUPERUSER_CREDENTIALS
     password = password_override or password
     return PythonLibrdkafka(self.redpanda,
                             username=username,
                             password=password,
                             algorithm=algorithm)
 def broker_configuration_updated():
     client = PythonLibrdkafka(self.redpanda)
     brokers = client.brokers()
     self.logger.debug(f"brokers metadata: {brokers}")
     return brokers[2].port == 10091