示例#1
0
    def test_metadata_upgrade(self, from_version, to_version):
        """
        Starts 3 KafkaStreams instances with version <from_version> and upgrades one-by-one to <to_version>
        """

        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics=self.topics)
        self.kafka.start()

        self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
        self.driver.disable_auto_terminate()
        self.processor1 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
        self.processor2 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
        self.processor3 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)

        self.driver.start()
        self.start_all_nodes_with(from_version)

        self.processors = [self.processor1, self.processor2, self.processor3]

        counter = 1
        random.seed()

        # first rolling bounce
        random.shuffle(self.processors)
        for p in self.processors:
            p.CLEAN_NODE_ENABLED = False
            self.do_stop_start_bounce(p, from_version[:-2], to_version, counter)
            counter = counter + 1

        # second rolling bounce
        random.shuffle(self.processors)
        for p in self.processors:
            self.do_stop_start_bounce(p, None, to_version, counter)
            counter = counter + 1

        # shutdown
        self.driver.stop()

        random.shuffle(self.processors)
        for p in self.processors:
            node = p.node
            with node.account.monitor_log(p.STDOUT_FILE) as monitor:
                p.stop()
                monitor.wait_until("UPGRADE-TEST-CLIENT-CLOSED",
                                   timeout_sec=60,
                                   err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on" + str(node.account))
示例#2
0
    def __init__(self, test_context, num_zk, num_brokers, topics=None):
        super(KafkaTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.topics = topics

        self.zk = ZookeeperService(test_context,
                                   self.num_zk) if quorum.for_test(
                                       test_context) == quorum.zk else None

        self.kafka = KafkaService(test_context,
                                  self.num_brokers,
                                  self.zk,
                                  topics=self.topics,
                                  controller_num_nodes_override=self.num_zk)
示例#3
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityProduceConsumeTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=3)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics={self.topic:{
                                                                    "partitions": 10,
                                                                    "replication-factor": 2}})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = 2
        self.messages_per_producer = 1000
        self.num_consumers = 1
    def __init__(self, test_context):
        super(GetOffsetShellTest, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 1
        self.messages_received_count = 0
        self.topics = {
            TOPIC: {
                'partitions': NUM_PARTITIONS,
                'replication-factor': REPLICATION_FACTOR
            }
        }

        self.zk = ZookeeperService(test_context,
                                   self.num_zk) if quorum.for_test(
                                       test_context) == quorum.zk else None
示例#5
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(SecurityTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=1, zk=self.zk, topics={self.topic: {
                                                                    "partitions": 2,
                                                                    "replication-factor": 1}
                                                                })
        self.num_partitions = 2
        self.timeout_sec = 10000
        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
    def setup_system(self):
         # Setup phase
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()
        
        self.kafka = KafkaService(self.test_context, num_nodes=self.replication,
                                  zk=self.zk, topics=self.topics)
        self.kafka.start()
        # Start test harness
        self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
        self.processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)

        
        self.driver.start()
        self.processor1.start()
示例#7
0
 def __init__(self, test_context):
     super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=1,
                               zk=self.zk,
                               topics={
                                   self.input: {'partitions': 1, 'replication-factor': 1},
                                   self.output: {'partitions': 1, 'replication-factor': 1}
                               })
     self.consumer = VerifiableConsumer(test_context,
                                        1,
                                        self.kafka,
                                        self.output,
                                        "stream-broker-compatibility-verify-consumer")
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ReplicationTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk,
                                  topics={self.topic: {
                                      "partitions": 3,
                                      "replication-factor": 3,
                                      'configs': {"min.insync.replicas": 2}}
                                  })
        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
示例#9
0
    def test_upgrade_brokers(self, from_version, to_version):
        """
        Start a smoke test client then perform rolling upgrades on the broker.
        """
        if from_version != to_version:
            # Setup phase
            self.zk = ZookeeperService(self.test_context, num_nodes=1)
            self.zk.start()

            # number of nodes needs to be >= 3 for the smoke test
            self.kafka = KafkaService(self.test_context,
                                      num_nodes=3,
                                      zk=self.zk,
                                      version=KafkaVersion(from_version),
                                      topics=self.topics)
            self.kafka.start()

            # allow some time for topics to be created
            time.sleep(10)

            # use the current (dev) version driver
            self.driver = StreamsSmokeTestDriverService(
                self.test_context, self.kafka)
            self.driver.node.version = KafkaVersion(from_version)
            self.driver.start()

            self.processor1 = StreamsSmokeTestJobRunnerService(
                self.test_context, self.kafka)
            self.processor1.node.version = KafkaVersion(from_version)
            self.processor1.start()

            time.sleep(15)

            self.perform_broker_upgrade(to_version)

            time.sleep(15)
            self.driver.wait()
            self.driver.stop()

            self.processor1.stop()

            self.driver.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" %
                                         self.driver.STDOUT_FILE,
                                         allow_fail=False)
            self.processor1.node.account.ssh_capture(
                "grep SMOKE-TEST-CLIENT-CLOSED %s" %
                self.processor1.STDOUT_FILE,
                allow_fail=False)
示例#10
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityFeaturesTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=3) if quorum.for_test(test_context) == quorum.zk else None

        # Generate a unique topic name
        topic_name = "client_compat_features_topic_%d%d" % (int(time.time()), randint(0, 2147483647))
        self.topics = { topic_name: {
            "partitions": 1, # Use only one partition to avoid worrying about ordering
            "replication-factor": 3
            }}
        self.kafka = KafkaService(test_context, num_nodes=3, zk=self.zk, topics=self.topics)
        # Always use the latest version of org.apache.kafka.tools.ClientCompatibilityTest
        # so store away the path to the DEV version before we set the Kafka version
        self.dev_script_path = self.kafka.path.script("kafka-run-class.sh", self.kafka.nodes[0])
    def __init__(self, test_context):
        super(StreamsStaticMembershipTest, self).__init__(test_context)
        self.topics = {
            self.input_topic: {'partitions': 18},
        }

        self.zookeeper = ZookeeperService(self.test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context, num_nodes=3,
                                  zk=self.zookeeper, topics=self.topics)

        self.producer = VerifiableProducer(self.test_context,
                                           1,
                                           self.kafka,
                                           self.input_topic,
                                           throughput=1000,
                                           acks=1)
示例#12
0
    def __init__(self, test_context):
        super(Benchmark, self).__init__(test_context)
        self.num_zk = 1
        self.num_brokers = 3
        self.topics = {
            TOPIC_REP_ONE: {'partitions': 6, 'replication-factor': 1},
            TOPIC_REP_THREE: {'partitions': 6, 'replication-factor': 3}
        }

        self.zk = ZookeeperService(test_context, self.num_zk)

        self.msgs_large = 10000000
        self.batch_size = 8*1024
        self.buffer_memory = 64*1024*1024
        self.msg_sizes = [10, 100, 1000, 10000, 100000]
        self.target_data_size = 128*1024*1024
        self.target_data_size_gb = self.target_data_size/float(1024*1024*1024)
 def __init__(self, test_context):
     super(StreamsBrokerDownResilience,
           self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=1,
                               zk=self.zk,
                               topics={
                                   self.inputTopic: {
                                       'partitions': 3,
                                       'replication-factor': 1
                                   },
                                   self.outputTopic: {
                                       'partitions': 1,
                                       'replication-factor': 1
                                   }
                               })
示例#14
0
    def test_version_probing_upgrade(self):
        """
        Starts 3 KafkaStreams instances, and upgrades one-by-one to "future version"
        """

        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, topics=self.topics)
        self.kafka.start()

        self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
        self.driver.disable_auto_terminate()
        self.processor1 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
        self.processor2 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)
        self.processor3 = StreamsUpgradeTestJobRunnerService(self.test_context, self.kafka)

        self.driver.start()
        self.start_all_nodes_with("") # run with TRUNK

        self.processors = [self.processor1, self.processor2, self.processor3]
        self.old_processors = [self.processor1, self.processor2, self.processor3]
        self.upgraded_processors = []

        counter = 1
        current_generation = 3

        random.seed()
        random.shuffle(self.processors)

        for p in self.processors:
            p.CLEAN_NODE_ENABLED = False
            current_generation = self.do_rolling_bounce(p, counter, current_generation)
            counter = counter + 1

        # shutdown
        self.driver.stop()

        random.shuffle(self.processors)
        for p in self.processors:
            node = p.node
            with node.account.monitor_log(p.STDOUT_FILE) as monitor:
                p.stop()
                monitor.wait_until("UPGRADE-TEST-CLIENT-CLOSED",
                                   timeout_sec=60,
                                   err_msg="Never saw output 'UPGRADE-TEST-CLIENT-CLOSED' on" + str(node.account))
示例#15
0
    def __init__(self, test_context):
        super(ConsoleConsumerTest, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            self.test_context,
            num_nodes=1,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }})
        self.consumer = ConsoleConsumer(self.test_context,
                                        num_nodes=1,
                                        kafka=self.kafka,
                                        topic=self.topic)
    def __init__(self, test_context):
        super(StreamsCooperativeRebalanceUpgradeTest, self).__init__(test_context)
        self.topics = {
            self.source_topic: {'partitions': 9},
            self.sink_topic: {'partitions': 9}
        }

        self.zookeeper = ZookeeperService(self.test_context, num_nodes=1)
        self.kafka = KafkaService(self.test_context, num_nodes=3,
                                  zk=self.zookeeper, topics=self.topics)

        self.producer = VerifiableProducer(self.test_context,
                                           1,
                                           self.kafka,
                                           self.source_topic,
                                           throughput=1000,
                                           acks=1)
示例#17
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(EverythingRunsTest, self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=2)
        self.kafka = KafkaService(test_context, 1, self.zk)
        self.schema_registry = SchemaRegistryService(test_context, 1, self.zk,
                                                     self.kafka)
        self.rest_proxy = KafkaRestService(test_context, 1, self.zk,
                                           self.kafka, self.schema_registry)
        self.register_driver = RegisterSchemasService(test_context,
                                                      1,
                                                      self.schema_registry,
                                                      retry_wait_sec=.02,
                                                      num_tries=5,
                                                      max_time_seconds=10,
                                                      max_schemas=50)
示例#18
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(QuotaTest, self).__init__(test_context=test_context)

        self.topic = 'test_topic'
        self.logger.info('use topic ' + self.topic)

        # quota related parameters
        self.quota_config = {
            'quota_producer_default': 2500000,
            'quota_consumer_default': 2000000,
            'quota_producer_bytes_per_second_overrides':
            'overridden_id=3750000',
            'quota_consumer_bytes_per_second_overrides':
            'overridden_id=3000000'
        }
        self.maximum_client_deviation_percentage = 100.0
        self.maximum_broker_deviation_percentage = 5.0
        self.num_records = 100000
        self.record_size = 3000
        self.security_protocol = 'PLAINTEXT'
        self.interbroker_security_protocol = 'PLAINTEXT'

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            security_protocol=self.security_protocol,
            interbroker_security_protocol=self.interbroker_security_protocol,
            topics={
                self.topic: {
                    'partitions': 6,
                    'replication-factor': 1,
                    'min.insync.replicas': 1
                }
            },
            quota_config=self.quota_config,
            jmx_object_names=[
                'kafka.server:type=BrokerTopicMetrics,name=BytesInPerSec',
                'kafka.server:type=BrokerTopicMetrics,name=BytesOutPerSec'
            ],
            jmx_attributes=['OneMinuteRate'])
        self.num_producers = 1
        self.num_consumers = 2
示例#19
0
 def __init__(self, test_context):
     """:type test_context: ducktape.tests.test.TestContext"""
     super(RoundTripFaultTest, self).__init__(test_context)
     self.zk = ZookeeperService(test_context, num_nodes=3)
     self.kafka = KafkaService(test_context, num_nodes=4, zk=self.zk)
     self.workload_service = RoundTripWorkloadService(
         test_context, self.kafka)
     self.trogdor = TrogdorService(
         context=self.test_context,
         client_services=[self.zk, self.kafka, self.workload_service])
     self.round_trip_spec = RoundTripWorkloadSpec(
         0,
         TaskSpec.MAX_DURATION_MS,
         self.workload_service.client_node,
         self.workload_service.bootstrap_servers,
         target_messages_per_sec=10000,
         partition_assignments={0: [0, 1, 2]},
         max_messages=100000)
示例#20
0
    def __init__(self, test_context, num_zk, num_brokers, num_hadoop, num_schema_registry, num_rest,
                 hadoop_distro='cdh', hadoop_version=2, topics=None):

        super(CamusTest, self).__init__(test_context)
        self.num_zk = num_zk
        self.num_brokers = num_brokers
        self.num_hadoop = num_hadoop
        self.num_schema_registry = num_schema_registry
        self.num_rest = num_rest
        self.topics = topics
        self.hadoop_distro = hadoop_distro
        self.hadoop_version = hadoop_version

        self.zk = ZookeeperService(test_context, self.num_zk)
        self.kafka = KafkaService(test_context, self.num_brokers, self.zk, topics=self.topics)
        self.hadoop = create_hadoop_service(test_context, self.num_hadoop, self.hadoop_distro, self.hadoop_version)
        self.schema_registry = SchemaRegistryService(test_context, self.num_schema_registry, self.zk, self.kafka)
        self.rest = KafkaRestService(test_context, self.num_rest, self.zk, self.kafka, self.schema_registry)
示例#21
0
    def __init__(self, test_context):
        super(TestBounce, self).__init__(test_context)

        self.topic = "topic"
        self.zk = ZookeeperService(test_context,
                                   num_nodes=1) if quorum.for_test(
                                       test_context) == quorum.zk else None
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 1,
                "replication-factor": 1
            }},
            controller_num_nodes_override=3
            if quorum.for_test(test_context) == quorum.remote_raft else 1)
        self.num_messages = 1000
 def __init__(self, test_context):
     super(StreamsBrokerCompatibility, self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=1,
                               zk=self.zk,
                               topics={
                                   self.input: {'partitions': 1, 'replication-factor': 1},
                                   self.output: {'partitions': 1, 'replication-factor': 1}
                               },
                               server_prop_overrides=[
                                   ["transaction.state.log.replication.factor", "1"],
                                   ["transaction.state.log.min.isr", "1"]
                               ])
     self.consumer = VerifiableConsumer(test_context,
                                        1,
                                        self.kafka,
                                        self.output,
                                        "stream-broker-compatibility-verify-consumer")
示例#23
0
 def setUp(self):
     self.topic = "test_topic"
     self.producer_throughput = 100
     self.num_producers = 1
     self.num_consumers = 1
     self.zk = ZookeeperService(self.test_context, num_nodes=1)
     self.kafka = KafkaService(self.test_context,
                               num_nodes=3,
                               zk=self.zk,
                               topics={
                                   self.topic: {
                                       "partitions": 3,
                                       "replication-factor": 3,
                                       'configs': {
                                           "min.insync.replicas": 2
                                       }
                                   }
                               })
     self.zk.start()
示例#24
0
    def test_upgrade_downgrade_streams(self, from_version, to_version):
        """
        Start a smoke test client, then abort (kill -9) and restart it a few times.
        Ensure that all records are delivered.

        Note, that just like tests/core/upgrade_test.py, a prerequisite for this test to succeed
        if the inclusion of all parametrized versions of kafka in kafka/vagrant/base.sh 
        (search for get_kafka()). For streams in particular, that means that someone has manually
        copies the kafka-stream-$version-test.jar in the right S3 bucket as shown in base.sh.
        """
        if from_version != to_version:
            # Setup phase
            self.zk = ZookeeperService(self.test_context, num_nodes=1)
            self.zk.start()

            # number of nodes needs to be >= 3 for the smoke test
            self.kafka = KafkaService(self.test_context, num_nodes=3,
                                      zk=self.zk, version=KafkaVersion(from_version), topics=self.topics)
            self.kafka.start()

            # allow some time for topics to be created
            time.sleep(10)

            self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
            self.driver.node.version = KafkaVersion(from_version)
            self.driver.start()

            self.processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)
            self.processor1.node.version = KafkaVersion(from_version)
            self.processor1.start()

            time.sleep(15)

            self.perform_streams_upgrade(to_version)

            time.sleep(15)
            self.driver.wait()
            self.driver.stop()

            self.processor1.stop()

            self.driver.node.account.ssh("grep ALL-RECORDS-DELIVERED %s" % self.driver.STDOUT_FILE, allow_fail=False)
            self.processor1.node.account.ssh_capture("grep SMOKE-TEST-CLIENT-CLOSED %s" % self.processor1.STDOUT_FILE, allow_fail=False)
示例#25
0
 def __init__(self, test_context):
     super(StreamsStandbyTask, self).__init__(test_context=test_context)
     self.zk = ZookeeperService(test_context, num_nodes=1)
     self.kafka = KafkaService(test_context,
                               num_nodes=3,
                               zk=self.zk,
                               topics={
                                   self.streams_source_topic: {
                                       'partitions': 6,
                                       'replication-factor': 1
                                   },
                                   self.streams_sink_topic_1: {
                                       'partitions': 1,
                                       'replication-factor': 1
                                   },
                                   self.streams_sink_topic_2: {
                                       'partitions': 1,
                                       'replication-factor': 1
                                   }
                               })
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(TransactionsTest, self).__init__(test_context=test_context)

        self.input_topic = "input-topic"
        self.output_topic = "output-topic"

        self.num_brokers = 3

        # Test parameters
        self.num_input_partitions = 2
        self.num_output_partitions = 3
        self.num_seed_messages = 100000
        self.transaction_size = 750
        self.consumer_group = "transactions-test-consumer-group"

        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(test_context,
                                  num_nodes=self.num_brokers,
                                  zk=self.zk)
示例#27
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(CompressionTest, self).__init__(test_context=test_context)

        self.topic = "test_topic"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=1,
            zk=self.zk,
            topics={self.topic: {
                "partitions": 10,
                "replication-factor": 1
            }})
        self.num_partitions = 10
        self.timeout_sec = 60
        self.producer_throughput = 1000
        self.num_producers = len(self.COMPRESSION_TYPES)
        self.messages_per_producer = 1000
        self.num_consumers = 1
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(LogDirFailureTest, self).__init__(test_context=test_context)

        self.topic1 = "test_topic_1"
        self.topic2 = "test_topic_2"
        self.zk = ZookeeperService(test_context, num_nodes=1)
        self.kafka = KafkaService(
            test_context,
            num_nodes=3,
            zk=self.zk,
            topics={
                self.topic1: {
                    "partitions": 1,
                    "replication-factor": 3,
                    "configs": {
                        "min.insync.replicas": 1
                    }
                },
                self.topic2: {
                    "partitions": 1,
                    "replication-factor": 3,
                    "configs": {
                        "min.insync.replicas": 2
                    }
                }
            },
            # Set log.roll.ms to 3 seconds so that broker will detect disk error sooner when it creates log segment
            # Otherwise broker will still be able to read/write the log file even if the log directory is inaccessible.
            server_prop_overides=[
                [config_property.OFFSETS_TOPIC_NUM_PARTITIONS, "1"],
                [config_property.LOG_FLUSH_INTERVAL_MESSAGE, "5"],
                [
                    config_property.
                    REPLICA_HIGHWATERMARK_CHECKPOINT_INTERVAL_MS, "60000"
                ], [config_property.LOG_ROLL_TIME_MS, "3000"]
            ])

        self.producer_throughput = 1000
        self.num_producers = 1
        self.num_consumers = 1
示例#29
0
    def __init__(self, test_context):
        """:type test_context: ducktape.tests.test.TestContext"""
        super(ClientCompatibilityFeaturesTest,
              self).__init__(test_context=test_context)

        self.zk = ZookeeperService(test_context, num_nodes=3)

        # Generate a unique topic name
        topic_name = "client_compat_features_topic_%d%d" % (int(
            time.time()), randint(0, 2147483647))
        self.topics = {
            topic_name: {
                "partitions":
                1,  # Use only one partition to avoid worrying about ordering
                "replication-factor": 3
            }
        }
        self.kafka = KafkaService(test_context,
                                  num_nodes=3,
                                  zk=self.zk,
                                  topics=self.topics)
    def setup_system(self, start_processor=True):
        # Setup phase
        self.zk = ZookeeperService(self.test_context, num_nodes=1)
        self.zk.start()

        self.kafka = KafkaService(self.test_context, num_nodes=self.replication, zk=self.zk, topics=self.topics)
        self.kafka.start()

        # allow some time for topics to be created
        wait_until(lambda: self.confirm_topics_on_all_brokers(set(self.topics.keys())),
                   timeout_sec=60,
                   err_msg="Broker did not create all topics in 60 seconds ")

        # Start test harness
        self.driver = StreamsSmokeTestDriverService(self.test_context, self.kafka)
        self.processor1 = StreamsSmokeTestJobRunnerService(self.test_context, self.kafka)

        self.driver.start()

        if (start_processor):
           self.processor1.start()