Exemplo n.º 1
0
 def _get_partition_leaders(self):
     kcat = KafkaCat(self.redpanda)
     m = kcat.metadata()
     self.logger.info(f"kcat.metadata() == {m}")
     brokers = {}
     for b in m['brokers']:
         id = b['id']
         ip = b['name']
         ip = ip[:ip.index(':')]
         for n in self.redpanda.nodes:
             n_ip = n.account.hostname
             self.logger.debug(f"matching {n_ip} over {ip}")
             if n_ip == ip:
                 brokers[id] = n
                 break
     self.logger.debug(f"found brokers {brokers}")
     assert len(brokers) == 3
     leaders = {}
     for topic in m['topics']:
         if topic['topic'] == ArchivalTest.s3_topic_name:
             for part in topic['partitions']:
                 leader_id = part['leader']
                 partition_id = part['partition']
                 leader = brokers[leader_id]
                 leaders[partition_id] = leader
     return leaders
Exemplo n.º 2
0
 def controller(self):
     kc = KafkaCat(self)
     cid = kc.metadata()["controllerid"]
     self.logger.debug("Controller reported with id: {}".format(cid))
     if cid != -1:
         node = self.get_node(cid)
         self.logger.debug("Controller node found: {}".format(node))
         return node
Exemplo n.º 3
0
 def registered(self, node):
     idx = self.idx(node)
     self.logger.debug("Checking if broker %d/%s is registered", idx, node)
     kc = KafkaCat(self)
     brokers = kc.metadata()["brokers"]
     brokers = {b["id"]: b for b in brokers}
     broker = brokers.get(idx, None)
     self.logger.debug("Found broker info: %s", broker)
     return broker is not None
Exemplo n.º 4
0
 def _registered(self, service, node):
     idx = service.idx(node)
     service.logger.debug("Checking if broker %d/%s is registered", idx,
                          node)
     kc = KafkaCat(RedpandaMuServiceServiceProxy(service, self))
     brokers = kc.metadata()["brokers"]
     brokers = {b["id"]: b for b in brokers}
     broker = brokers.get(idx, None)
     service.logger.debug("Found broker info: %s", broker)
     return broker is not None
Exemplo n.º 5
0
 def registered(self, node):
     idx = self.idx(node)
     self.logger.debug(
         f"Checking if broker {idx} ({node.name} is registered")
     kc = KafkaCat(self)
     brokers = kc.metadata()["brokers"]
     brokers = {b["id"]: b for b in brokers}
     broker = brokers.get(idx, None)
     self.logger.debug(f"Found broker info: {broker}")
     return broker is not None
Exemplo n.º 6
0
    def test_produce_topic(self):
        """
        Create a topic and verify that pandaproxy can produce to it.
        """
        name = "pandaproxy-topic-{}".format(uuid.uuid4())
        self.logger.debug("Topic name %s", name)

        prev = set(self._get_topics())
        self.logger.debug("Existing topics %s", prev)
        assert prev.isdisjoint(name)

        data = '{"records": [{"value": "dmVjdG9yaXplZA==", "partition": 0},{"value": "cGFuZGFwcm94eQ==", "partition": 1},{"value": "bXVsdGlicm9rZXI=", "partition": 2}]}'

        self.logger.debug("Producing to non-existant topic")
        produce_result = self._produce_topic(name, data)
        for o in produce_result["offsets"]:
            assert o["error_code"] == 3
            assert o["offset"] == -1

        kc = KafkaCat(self.redpanda)

        self.logger.debug("Creating test topic")
        kafka_tools = KafkaCliTools(self.redpanda)
        kafka_tools.create_topic(
            TopicSpec(name=name, replication_factor=1, partition_count=3))

        self.logger.debug("Waiting for leaders to settle")
        has_leaders = False
        while not has_leaders:
            topics = kc.metadata()["topics"]
            maybe_leaders = True
            for t in topics:
                if t["topic"] == name:
                    for p in t["partitions"]:
                        if p["leader"] == -1:
                            maybe_leaders = False
            has_leaders = maybe_leaders
        # TODO:
        #  Despite the above test, Pandaproxy can still get back no leaders
        #  Query Pandaproxy metadata to see when leaders have settled
        #  The retry logic for produce should have sufficient time for this
        #  additional settle time.

        self.logger.debug("Producing to topic")
        produce_result = self._produce_topic(name, data)
        self.logger.debug("Producing to topic: %s", produce_result)
        for o in produce_result["offsets"]:
            assert o["offset"] == 1, f'error_code {o["error_code"]}'

        self.logger.debug(f"Consuming topic: {name}")
        assert kc.consume_one(name, 0, 1)["payload"] == "vectorized"
        assert kc.consume_one(name, 1, 1)["payload"] == "pandaproxy"
        assert kc.consume_one(name, 2, 1)["payload"] == "multibroker"
Exemplo n.º 7
0
 def _wait_for_topic(self, name):
     kc = KafkaCat(self.redpanda)
     has_leaders = False
     while not has_leaders:
         topics = kc.metadata()["topics"]
         maybe_leaders = True
         for t in topics:
             if t["topic"] == name:
                 for p in t["partitions"]:
                     if p["leader"] == -1:
                         maybe_leaders = False
         has_leaders = maybe_leaders
Exemplo n.º 8
0
    def test_produce_topic(self):
        """
        Create a topic and verify that pandaproxy can produce to it.
        """
        name = create_topic_names(1)[0]
        data = '''
        {
            "records": [
                {"value": "dmVjdG9yaXplZA==", "partition": 0},
                {"value": "cGFuZGFwcm94eQ==", "partition": 1},
                {"value": "bXVsdGlicm9rZXI=", "partition": 2}
            ]
        }'''

        self.logger.info(f"Producing to non-existant topic: {name}")
        produce_result = self._produce_topic(name, data)
        for o in produce_result["offsets"]:
            assert o["error_code"] == 3
            assert o["offset"] == -1

        kc = KafkaCat(self.redpanda)

        self.logger.info(f"Creating test topic: {name}")
        self._create_topics([name], partitions=3)

        self.logger.debug("Waiting for leaders to settle")
        has_leaders = False
        while not has_leaders:
            topics = kc.metadata()["topics"]
            maybe_leaders = True
            for t in topics:
                if t["topic"] == name:
                    for p in t["partitions"]:
                        if p["leader"] == -1:
                            maybe_leaders = False
            has_leaders = maybe_leaders
        # TODO:
        #  Despite the above test, Pandaproxy can still get back no leaders
        #  Query Pandaproxy metadata to see when leaders have settled
        #  The retry logic for produce should have sufficient time for this
        #  additional settle time.

        self.logger.info(f"Producing to topic: {name}")
        produce_result = self._produce_topic(name, data)
        for o in produce_result["offsets"]:
            assert o["offset"] == 1, f'error_code {o["error_code"]}'

        self.logger.info(f"Consuming from topic: {name}")
        assert kc.consume_one(name, 0, 1)["payload"] == "vectorized"
        assert kc.consume_one(name, 1, 1)["payload"] == "pandaproxy"
        assert kc.consume_one(name, 2, 1)["payload"] == "multibroker"
Exemplo n.º 9
0
    def partitions(self, topic):
        """
        Return partition metadata for the topic.
        """
        kc = KafkaCat(self)
        md = kc.metadata()
        topic = next(filter(lambda t: t["topic"] == topic, md["topics"]))

        def make_partition(p):
            index = p["partition"]
            leader_id = p["leader"]
            leader = None if leader_id == -1 else self.get_node(leader_id)
            replicas = [self.get_node(r["id"]) for r in p["replicas"]]
            return Partition(index, leader, replicas)

        return [make_partition(p) for p in topic["partitions"]]
Exemplo n.º 10
0
    def test_controller_recovery(self):
        kc = KafkaCat(self.redpanda)

        # choose a partition and a target node
        partition = self._get_partition(kc)
        target_node_id = next(
            filter(lambda r: r["id"] != partition["leader"],
                   partition["replicas"]))["id"]
        self.logger.debug(
            f"Transfering leader from {partition['leader']} to {target_node_id}"
        )

        # build the transfer url
        meta = kc.metadata()
        brokers = meta["brokers"]
        source_broker = next(
            filter(lambda b: b["id"] == partition["leader"], brokers))
        target_broker = next(
            filter(lambda b: b["id"] == target_node_id, brokers))
        self.logger.debug(f"Source broker {source_broker}")
        self.logger.debug(f"Target broker {target_broker}")
        host = source_broker["name"]
        host = host.split(":")[0]
        partition_id = partition["partition"]
        url = "http://{}:9644/v1/kafka/{}/{}/transfer_leadership?target={}".format(
            host, self.topic, partition["partition"], target_node_id)

        def try_transfer():
            self.logger.debug(url)
            res = requests.post(url)
            self.logger.debug(res.text)
            for _ in range(3):  # just give it a moment
                time.sleep(1)
                meta = kc.metadata()
                partition = next(
                    filter(lambda p: p["partition"] == partition_id,
                           meta["topics"][0]["partitions"]))
                if partition["leader"] == target_node_id:
                    return True
            return False

        wait_until(lambda: try_transfer(),
                   timeout_sec=30,
                   backoff_sec=5,
                   err_msg="Transfer did not complete")
Exemplo n.º 11
0
    def test_controller_recovery(self):
        kc = KafkaCat(self.redpanda)

        # choose a partition and a target node
        partition = self._get_partition(kc)
        target_node_id = next(
            filter(lambda r: r["id"] != partition["leader"],
                   partition["replicas"]))["id"]
        self.logger.debug(
            f"Transfering leader from {partition['leader']} to {target_node_id}"
        )

        # build the transfer url
        meta = kc.metadata()
        brokers = meta["brokers"]
        source_broker = next(
            filter(lambda b: b["id"] == partition["leader"], brokers))
        target_broker = next(
            filter(lambda b: b["id"] == target_node_id, brokers))
        self.logger.debug(f"Source broker {source_broker}")
        self.logger.debug(f"Target broker {target_broker}")

        # Send the request to any host, they should redirect to
        # the leader of the partition.
        partition_id = partition['partition']

        admin = Admin(self.redpanda)
        admin.partition_transfer_leadership("kafka", self.topic, partition_id,
                                            target_node_id)

        def transfer_complete():
            for _ in range(3):  # just give it a moment
                time.sleep(1)
                meta = kc.metadata()
                partition = next(
                    filter(lambda p: p["partition"] == partition_id,
                           meta["topics"][0]["partitions"]))
                if partition["leader"] == target_node_id:
                    return True
            return False

        wait_until(lambda: transfer_complete(),
                   timeout_sec=30,
                   backoff_sec=5,
                   err_msg="Transfer did not complete")
Exemplo n.º 12
0
 def _get_leaders_by_node(self):
     kc = KafkaCat(self.redpanda)
     md = kc.metadata()
     topic = next(filter(lambda t: t["topic"] == self.topic, md["topics"]))
     leaders = (p["leader"] for p in topic["partitions"])
     return collections.Counter(leaders)