Exemple #1
0
    def test_end_to_end_latency(self,
                                compression_type="none",
                                security_protocol="PLAINTEXT",
                                interbroker_security_protocol=None,
                                client_version=str(DEV_BRANCH),
                                broker_version=str(DEV_BRANCH)):
        """
        Setup: 1 node zk + 3 node kafka cluster
        Produce (acks = 1) and consume 10e3 messages to a topic with 6 partitions and replication-factor 3,
        measuring the latency between production and consumption of each message.

        Return aggregate latency statistics.

        (Under the hood, this simply runs EndToEndLatency.scala)
        """
        client_version = KafkaVersion(client_version)
        broker_version = KafkaVersion(broker_version)
        self.validate_versions(client_version, broker_version)
        if interbroker_security_protocol is None:
            interbroker_security_protocol = security_protocol
        self.start_kafka(security_protocol, interbroker_security_protocol,
                         broker_version)
        self.logger.info("BENCHMARK: End to end latency")
        self.perf = EndToEndLatencyService(self.test_context,
                                           1,
                                           self.kafka,
                                           topic=TOPIC_REP_THREE,
                                           num_records=10000,
                                           compression_type=compression_type,
                                           version=client_version)
        self.perf.run()
        return latency(self.perf.results[0]['latency_50th_ms'],
                       self.perf.results[0]['latency_99th_ms'],
                       self.perf.results[0]['latency_999th_ms'])
Exemple #2
0
    def test_end_to_end_latency(self, compression_type="none", security_protocol="PLAINTEXT",
                                interbroker_security_protocol=None, client_version=str(DEV_BRANCH),
                                broker_version=str(DEV_BRANCH)):
        """
        Setup: 1 node zk + 3 node kafka cluster
        Produce (acks = 1) and consume 10e3 messages to a topic with 6 partitions and replication-factor 3,
        measuring the latency between production and consumption of each message.

        Return aggregate latency statistics.

        (Under the hood, this simply runs EndToEndLatency.scala)
        """
        client_version = KafkaVersion(client_version)
        broker_version = KafkaVersion(broker_version)
        self.validate_versions(client_version, broker_version)
        if interbroker_security_protocol is None:
            interbroker_security_protocol = security_protocol
        self.start_kafka(security_protocol, interbroker_security_protocol, broker_version)
        self.logger.info("BENCHMARK: End to end latency")
        self.perf = EndToEndLatencyService(
            self.test_context, 1, self.kafka,
            topic=TOPIC_REP_THREE, num_records=10000,
            compression_type=compression_type, version=client_version
        )
        self.perf.run()
        return latency(self.perf.results[0]['latency_50th_ms'],  self.perf.results[0]['latency_99th_ms'], self.perf.results[0]['latency_999th_ms'])
Exemple #3
0
    def test_version(self, version=str(LATEST_0_9), new_consumer=True, metadata_quorum=quorum.zk):
        """
        Sanity check out producer performance service - verify that we can run the service with a small
        number of messages. The actual stats here are pretty meaningless since the number of messages is quite small.
        """
        version = KafkaVersion(version)
        self.kafka = KafkaService(
            self.test_context, 1,
            self.zk, topics={self.topic: {'partitions': 1, 'replication-factor': 1}}, version=version)
        self.kafka.start()

        # check basic run of producer performance
        self.producer_perf = ProducerPerformanceService(
            self.test_context, 1, self.kafka, topic=self.topic,
            num_records=self.num_records, record_size=self.record_size,
            throughput=1000000000,  # Set impossibly for no throttling for equivalent behavior between 0.8.X and 0.9.X
            version=version,
            settings={
                'acks': 1,
                'batch.size': 8*1024,
                'buffer.memory': 64*1024*1024})
        self.producer_perf.run()
        producer_perf_data = compute_aggregate_throughput(self.producer_perf)
        assert producer_perf_data['records_per_sec'] > 0

        # check basic run of end to end latency
        self.end_to_end = EndToEndLatencyService(
            self.test_context, 1, self.kafka,
            topic=self.topic, num_records=self.num_records, version=version)
        self.end_to_end.run()
        end_to_end_data = latency(self.end_to_end.results[0]['latency_50th_ms'],  self.end_to_end.results[0]['latency_99th_ms'], self.end_to_end.results[0]['latency_999th_ms'])

        # check basic run of consumer performance service
        self.consumer_perf = ConsumerPerformanceService(
            self.test_context, 1, self.kafka, new_consumer=new_consumer,
            topic=self.topic, version=version, messages=self.num_records)
        self.consumer_perf.group = "test-consumer-group"
        self.consumer_perf.run()
        consumer_perf_data = compute_aggregate_throughput(self.consumer_perf)
        assert consumer_perf_data['records_per_sec'] > 0

        return {
            "producer_performance": producer_perf_data,
            "end_to_end_latency": end_to_end_data,
            "consumer_performance": consumer_perf_data
        }
    def test_version(self, version=str(LATEST_0_9), new_consumer=False):
        """
        Sanity check out producer performance service - verify that we can run the service with a small
        number of messages. The actual stats here are pretty meaningless since the number of messages is quite small.
        """
        version = KafkaVersion(version)
        self.kafka = KafkaService(
            self.test_context, 1,
            self.zk, topics={self.topic: {'partitions': 1, 'replication-factor': 1}}, version=version)
        self.kafka.start()

        # check basic run of producer performance
        self.producer_perf = ProducerPerformanceService(
            self.test_context, 1, self.kafka, topic=self.topic,
            num_records=self.num_records, record_size=self.record_size,
            throughput=1000000000,  # Set impossibly for no throttling for equivalent behavior between 0.8.X and 0.9.X
            version=version,
            settings={
                'acks': 1,
                'batch.size': 8*1024,
                'buffer.memory': 64*1024*1024})
        self.producer_perf.run()
        producer_perf_data = compute_aggregate_throughput(self.producer_perf)

        # check basic run of end to end latency
        self.end_to_end = EndToEndLatencyService(
            self.test_context, 1, self.kafka,
            topic=self.topic, num_records=self.num_records, version=version)
        self.end_to_end.run()
        end_to_end_data = latency(self.end_to_end.results[0]['latency_50th_ms'],  self.end_to_end.results[0]['latency_99th_ms'], self.end_to_end.results[0]['latency_999th_ms'])

        # check basic run of consumer performance service
        self.consumer_perf = ConsumerPerformanceService(
            self.test_context, 1, self.kafka, new_consumer=new_consumer,
            topic=self.topic, version=version, messages=self.num_records)
        self.consumer_perf.group = "test-consumer-group"
        self.consumer_perf.run()
        consumer_perf_data = compute_aggregate_throughput(self.consumer_perf)

        return {
            "producer_performance": producer_perf_data,
            "end_to_end_latency": end_to_end_data,
            "consumer_performance": consumer_perf_data
        }