Esempio n. 1
0
    def test_upgrade_optimized_topology(self):
        self.zookeeper.start()
        self.kafka.start()

        processor1 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)
        processor2 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)
        processor3 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)

        processors = [processor1, processor2, processor3]

        self.logger.info("produce records continually during the test")
        self.producer.start()

        self.logger.info("start all processors unoptimized")
        for processor in processors:
            self.set_topics(processor)
            processor.CLEAN_NODE_ENABLED = False
            self.verify_running_repartition_topic_count(processor, 4)

        self.logger.info("verify unoptimized")
        self.verify_processing(processors, verify_individual_operations=False)

        self.logger.info("stop unoptimized")
        stop_processors(processors, self.stopped_message)

        self.logger.info("reset")
        self.reset_application()
        for processor in processors:
            processor.node.account.ssh("mv " + processor.LOG_FILE + " " +
                                       processor.LOG_FILE + ".1",
                                       allow_fail=False)
            processor.node.account.ssh("mv " + processor.STDOUT_FILE + " " +
                                       processor.STDOUT_FILE + ".1",
                                       allow_fail=False)
            processor.node.account.ssh("mv " + processor.STDERR_FILE + " " +
                                       processor.STDERR_FILE + ".1",
                                       allow_fail=False)
            processor.node.account.ssh("mv " + processor.CONFIG_FILE + " " +
                                       processor.CONFIG_FILE + ".1",
                                       allow_fail=False)

        self.logger.info("start again with topology optimized")
        for processor in processors:
            processor.OPTIMIZED_CONFIG = 'all'
            self.verify_running_repartition_topic_count(processor, 1)

        self.logger.info("verify optimized")
        self.verify_processing(processors, verify_individual_operations=True)

        self.logger.info("stop optimized")
        stop_processors(processors, self.stopped_message)

        self.logger.info("teardown")
        self.producer.stop()
        self.kafka.stop()
        self.zookeeper.stop()
    def test_rolling_bounces_will_not_trigger_rebalance_under_static_membership(
            self):
        self.zookeeper.start()
        self.kafka.start()

        numThreads = 3
        processor1 = StaticMemberTestService(self.test_context, self.kafka,
                                             "consumer-A", numThreads)
        processor2 = StaticMemberTestService(self.test_context, self.kafka,
                                             "consumer-B", numThreads)
        processor3 = StaticMemberTestService(self.test_context, self.kafka,
                                             "consumer-C", numThreads)

        processors = [processor1, processor2, processor3]

        self.producer.start()

        for processor in processors:
            processor.CLEAN_NODE_ENABLED = False
            self.set_topics(processor)
            verify_running(processor, self.running_message)

        self.verify_processing(processors)

        # do several rolling bounces
        num_bounces = 3
        for i in range(0, num_bounces):
            for processor in processors:
                verify_stopped(processor, self.stopped_message)
                verify_running(processor, self.running_message)

        stable_generation = -1
        for processor in processors:
            generations = extract_generation_from_logs(processor)
            num_bounce_generations = num_bounces * numThreads
            assert num_bounce_generations <= len(generations), \
                "Smaller than minimum expected %d generation messages, actual %d" % (num_bounce_generations, len(generations))

            for generation in generations[-num_bounce_generations:]:
                generation = int(generation)
                if stable_generation == -1:
                    stable_generation = generation
                assert stable_generation == generation, \
                    "Stream rolling bounce have caused unexpected generation bump %d" % generation

        self.verify_processing(processors)

        stop_processors(processors, self.stopped_message)

        self.producer.stop()
        self.kafka.stop()
        self.zookeeper.stop()
Esempio n. 3
0
    def test_upgrade_optimized_topology(self):
        self.zookeeper.start()
        self.kafka.start()

        processor1 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)
        processor2 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)
        processor3 = StreamsOptimizedUpgradeTestService(
            self.test_context, self.kafka)

        processors = [processor1, processor2, processor3]

        # produce records continually during the test
        self.producer.start()

        # start all processors unoptimized
        for processor in processors:
            self.set_topics(processor)
            processor.CLEAN_NODE_ENABLED = False
            self.verify_running_repartition_topic_count(processor, 4)

        self.verify_processing(processors, verify_individual_operations=False)

        stop_processors(processors, self.stopped_message)

        self.reset_application()

        # start again with topology optimized
        for processor in processors:
            processor.OPTIMIZED_CONFIG = 'all'
            self.verify_running_repartition_topic_count(processor, 1)

        self.verify_processing(processors, verify_individual_operations=True)

        stop_processors(processors, self.stopped_message)

        self.producer.stop()
        self.kafka.stop()
        self.zookeeper.stop()
Esempio n. 4
0
    def test_upgrade_topology_with_named_repartition_topic(self):
        self.zookeeper.start()
        self.kafka.start()

        processor1 = StreamsNamedRepartitionTopicService(
            self.test_context, self.kafka)
        processor2 = StreamsNamedRepartitionTopicService(
            self.test_context, self.kafka)
        processor3 = StreamsNamedRepartitionTopicService(
            self.test_context, self.kafka)

        processors = [processor1, processor2, processor3]

        self.producer.start()

        for processor in processors:
            processor.CLEAN_NODE_ENABLED = False
            self.set_topics(processor)
            verify_running(processor, 'REBALANCING -> RUNNING')

        self.verify_processing(processors)

        # do rolling upgrade
        for processor in processors:
            verify_stopped(processor, self.stopped_message)
            #  will tell app to add operations before repartition topic
            processor.ADD_ADDITIONAL_OPS = 'true'
            verify_running(processor, 'UPDATED Topology')

        self.verify_processing(processors)

        stop_processors(processors, self.stopped_message)

        self.producer.stop()
        self.kafka.stop()
        self.zookeeper.stop()
Esempio n. 5
0
    def test_upgrade_to_cooperative_rebalance(self, upgrade_from_version):
        self.zookeeper.start()
        self.kafka.start()

        processor1 = CooperativeRebalanceUpgradeService(
            self.test_context, self.kafka)
        processor2 = CooperativeRebalanceUpgradeService(
            self.test_context, self.kafka)
        processor3 = CooperativeRebalanceUpgradeService(
            self.test_context, self.kafka)

        processors = [processor1, processor2, processor3]

        # produce records continually during the test
        self.producer.start()

        # start all processors without upgrade_from config; normal operations mode
        self.logger.info("Starting all streams clients in normal running mode")
        for processor in processors:
            processor.set_version(upgrade_from_version)
            self.set_props(processor)
            processor.CLEAN_NODE_ENABLED = False
            # can't use state as older version don't have state listener
            # so just verify up and running
            verify_running(processor, self.processing_message)

        # all running rebalancing has ceased
        for processor in processors:
            self.verify_processing(processor, self.processing_message)

        # first rolling bounce with "upgrade.from" config set
        previous_phase = ""
        self.maybe_upgrade_rolling_bounce_and_verify(processors,
                                                     previous_phase,
                                                     self.first_bounce_phase,
                                                     upgrade_from_version)

        # All nodes processing, rebalancing has ceased
        for processor in processors:
            self.verify_processing(
                processor, self.first_bounce_phase + self.processing_message)

        # second rolling bounce without "upgrade.from" config
        self.maybe_upgrade_rolling_bounce_and_verify(processors,
                                                     self.first_bounce_phase,
                                                     self.second_bounce_phase)

        # All nodes processing, rebalancing has ceased
        for processor in processors:
            self.verify_processing(
                processor, self.second_bounce_phase + self.processing_message)

        # now verify tasks are unique
        for processor in processors:
            self.get_tasks_for_processor(processor)
            self.logger.info("Active tasks %s" % processor.active_tasks)

        overlapping_tasks = processor1.active_tasks.intersection(
            processor2.active_tasks)
        assert len(overlapping_tasks) == int(0), \
            "Final task assignments are not unique %s %s" % (processor1.active_tasks, processor2.active_tasks)

        overlapping_tasks = processor1.active_tasks.intersection(
            processor3.active_tasks)
        assert len(overlapping_tasks) == int(0), \
            "Final task assignments are not unique %s %s" % (processor1.active_tasks, processor3.active_tasks)

        overlapping_tasks = processor2.active_tasks.intersection(
            processor3.active_tasks)
        assert len(overlapping_tasks) == int(0), \
            "Final task assignments are not unique %s %s" % (processor2.active_tasks, processor3.active_tasks)

        # test done close all down
        stop_processors(processors,
                        self.second_bounce_phase + self.stopped_message)

        self.producer.stop()
        self.kafka.stop()
        self.zookeeper.stop()