def test_consume_from_blocked_s3(self):
        produce_until_segments(redpanda=self.redpanda,
                               topic=self.s3_topic_name,
                               partition_idx=0,
                               count=5,
                               acks=-1)

        self.rpk.alter_topic_config(self.s3_topic_name,
                                    TopicSpec.PROPERTY_RETENTION_BYTES,
                                    self.retention_bytes)

        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.s3_topic_name,
                                  partition_idx=0,
                                  count=4)
        """Disconnect redpanda from S3 and try to read starting with offset 0"""
        with firewall_blocked(self.redpanda.nodes, self._s3_port):
            try:
                out = self.rpk.consume(topic=self.s3_topic_name)
            except RpkException as e:
                assert 'timed out' in e.msg
            else:
                raise RuntimeError(
                    f"RPK consume should have timed out, but ran with output: {out}"
                )
示例#2
0
    def test_retention_archival_coordination(self, acks):
        """
        Test that only archived segments can be evicted and that eviction
        restarts once the segments have been archived.
        """
        self.kafka_tools.alter_topic_config(
            self.topic,
            {
                TopicSpec.PROPERTY_RETENTION_BYTES: 5 * self.log_segment_size,
            },
        )

        with firewall_blocked(self.redpanda.nodes, self._s3_port):
            produce_until_segments(redpanda=self.redpanda,
                                   topic=self.topic,
                                   partition_idx=0,
                                   count=10,
                                   acks=acks)

            # Sleep some time sufficient for log eviction under normal conditions
            # and check that no segment has been evicted (because we can't upload
            # segments to the cloud storage).
            time.sleep(3 * self.log_compaction_interval_ms / 1000.0)
            counts = list(
                segments_count(self.redpanda, self.topic, partition_idx=0))
            self.logger.info(f"node segment counts: {counts}")
            assert len(counts) == len(self.redpanda.nodes)
            assert all(c >= 10 for c in counts)

        # Check that eviction restarts after we restored the connection to cloud
        # storage.
        wait_for_segments_removal(redpanda=self.redpanda,
                                  topic=self.topic,
                                  partition_idx=0,
                                  count=6)
示例#3
0
 def test_connection_drop(self):
     """Disconnect redpanda from S3 during the active upload, restore connection
     and check that everything is uploaded"""
     self.kafka_tools.produce(self.topic, 10000, 1024)
     with firewall_blocked(self.redpanda.nodes, self._s3_port):
         time.sleep(10)  # sleep is needed because we need to make sure that
         # reconciliation loop kicked in and started uploading
         # data, otherwse we can rejoin before archival storage
         # will even try to upload new segments
     validate(self._quick_verify, self.logger, 90)
示例#4
0
 def test_one_node_reconnect(self):
     """Disconnect one redpanda node from S3, write data, connect redpanda to S3
     and check that the data is uploaded"""
     self.kafka_tools.produce(self.topic, 1000, 1024)
     leaders = list(self._get_partition_leaders().values())
     with firewall_blocked(leaders[0:1], self._s3_port):
         self.kafka_tools.produce(self.topic, 9000, 1024)
         time.sleep(10)  # sleep is needed because we need to make sure that
         # reconciliation loop kicked in and started uploading
         # data, otherwse we can rejoin before archival storage
         # will even try to upload new segments
     validate(self._quick_verify, self.logger, 90)
示例#5
0
 def test_connection_flicker(self):
     """Disconnect redpanda from S3 during the active upload for short period of time
     during upload and check that everything is uploaded"""
     con_enabled = True
     for _ in range(0, 20):
         # upload data in batches
         if con_enabled:
             with firewall_blocked(self.redpanda.nodes, self._s3_port):
                 self.kafka_tools.produce(self.topic, 500, 1024)
         else:
             self.kafka_tools.produce(self.topic, 500, 1024)
         con_enabled = not con_enabled
         time.sleep(1)
     time.sleep(10)
     validate(self._quick_verify, self.logger, 90)
示例#6
0
    def test_isolate(self):
        """Verify that our isolate/rejoin facilities actually work"""
        with firewall_blocked(self.redpanda.nodes, self._s3_port):
            self.kafka_tools.produce(self.topic, 10000, 1024)
            time.sleep(10)  # can't busy wait here

            # Topic manifest can be present in the bucket because topic is created before
            # firewall is blocked. No segments or partition manifest should be present.
            topic_manifest_id = "d0000000/meta/kafka/panda-topic/topic_manifest.json"
            objects = self.s3_client.list_objects(self.s3_bucket_name)
            keys = [x.Key for x in objects]

            assert len(keys) < 2, \
                f"Bucket should be empty or contain only {topic_manifest_id}, but contains {keys}"

            if len(keys) == 1:
                assert topic_manifest_id == keys[0], \
                    f"Bucket should be empty or contain only {topic_manifest_id}, but contains {keys[0]}"