예제 #1
0
    def test_mixed_cluster_load_caches_old_server(self):
        """
        1. start mixed cluster (new version servers + old version servers)
        2. activate from new version control.sh
        3. start old version server
        4. add it to baseline
        5. smoke check:
        5.1. create dynamic caches from old server node
        5.2. do some load from old server node
        """

        self.ignite_new_version.cu.activate()
        created_caches = []
        self.server_config = Ignite.config_builder.get_config(
            'server', config_set_name='base')
        ignite = self.ignite_old_version
        with PiClient(ignite, self.server_config, nodes_num=1) as piclient:
            ignite.cu.add_node_to_baseline(
                ignite.get_node_consistent_id(piclient.node_ids[0]))

            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')

                ignite = piclient.get_ignite()

                ignite.getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

            with TransactionalLoading(MixedTestLoadingAdapter(self),
                                      config_file=self.server_config,
                                      loading_profile=LoadingProfile(
                                          delay=1,
                                          transaction_timeout=100000)):
                sleep(60)
예제 #2
0
    def test_old_cluster_load_caches_new_client(self):
        """
        1. start old version grid
        2. activate from old version control.sh
        3. start new version client
        4. smoke check:
        4.1. create dynamic caches
        4.2. do some load
        """
        created_caches = []

        self.ignite_old_version.cu.activate()
        with PiClient(self.ignite_new_version, self.client_config,
                      nodes_num=1) as piclient:
            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')
                piclient.get_ignite().getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

            with TransactionalLoading(MixedTestLoadingAdapter(self),
                                      config_file=self.client_config,
                                      loading_profile=LoadingProfile(
                                          delay=1,
                                          transaction_timeout=100000)):
                sleep(60)
예제 #3
0
    def test_pme_bench_dynamic_caches_client(self):
        self.start_grid_no_activate()
        self.ignite.jmx.activate(1)
        self.ignite.cu.set_current_topology_as_baseline()
        self.load_data_with_streamer(0,
                                     1 * self.LOAD_FACTOR,
                                     ignite=self.ignite.ignite_cli_load,
                                     nodes_num=self.num_loading_nodes)

        expected_total_num_clients = len(self.ignite.get_all_client_nodes() +
                                         self.ignite.get_all_common_nodes())

        num_clients = 32

        # start num_clients client nodes on 'flaky' hosts
        with PiClient(self.ignite.ignite_cli_flaky,
                      self.get_client_config(),
                      nodes_num=num_clients,
                      new_instance=True) as piclient:
            self.ignite.ignite_srvs.wait_for_topology_snapshot(
                client_num=expected_total_num_clients + num_clients)

            from itertools import cycle
            client_nodes = cycle(
                self.ignite.ignite_cli_flaky.get_all_common_nodes() +
                self.ignite.ignite_cli_flaky.get_all_client_nodes())

            # data_model = ModelTypes.VALUE_ALL_TYPES.value
            # created_caches = []
            dynamic_caches_factory = DynamicCachesFactory()
            # async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                node_id = next(client_nodes)
                print('Start %s on node %s ... ' % (cache_name, node_id))

                gateway = piclient.get_gateway(node_id)
                ignite = piclient.get_ignite(node_id)

                ignite.getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name,
                                                            gateway=gateway))
예제 #4
0
    def _create_dynamic_caches_with_data(self, with_index=False):
        log_print("Create dynamic caches and load data")

        data_model = ModelTypes.VALUE_ALL_TYPES.value
        created_caches = []
        with PiClient(self.ignite, self.get_client_config(),
                      nodes_num=1) as piclient:
            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                print_green('Loading %s...' % cache_name)

                gateway = piclient.get_gateway()
                ignite = piclient.get_ignite()

                ignite.getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name,
                                                            gateway=gateway))

                if with_index:
                    data_model = ModelTypes.VALUE_ALL_TYPES.value
                async_operation = create_async_operation(
                    create_streamer_operation,
                    cache_name,
                    1,
                    self.max_key + 2,
                    value_type=data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='blue')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

        log_print("Dynamic caches with data created")
        return created_caches
예제 #5
0
    def _calc_checksums_over_dynamic_caches(self):
        log_print("Calculating checksums")

        with PiClient(self.ignite, self.get_client_config()):
            dynamic_caches_factory = DynamicCachesFactory()

            async_operations = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method

                checksum_operation = create_checksum_operation(
                    cache_name, 1, 1000)
                async_operation = create_async_operation(checksum_operation)
                async_operations.append(async_operation)
                async_operation.evaluate()

            checksums = ''

            for async_operation in async_operations:
                checksums += str(async_operation.getResult())

        log_print("Calculating checksums done")

        return checksums
예제 #6
0
    def test_24_fitness_two_clients_with_snapshot(self):
        """
        """
        created_caches = []

        self.ignite_new_version.cu.activate()

        with PiClient(self.ignite_new_version, self.client_config,
                      nodes_num=1) as piclient_new:

            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')
                piclient_new.get_ignite().getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

            with PiClient(self.ignite_old_version,
                          self.client_config,
                          nodes_num=2,
                          new_instance=True) as piclient_old:

                log_print('Rolling upgrade', color='green')
                self.ignite_new_version.su.snapshot_utility(
                    'snapshot', '-type=full')
                log_print('Snapshot completed', color='debug')

                sorted_cache_names = []
                for cache_name in piclient_old.get_ignite().cacheNames(
                ).toArray():
                    sorted_cache_names.append(cache_name)

                sorted_cache_names.sort()

                async_operations = []
                cache_operation = {}
                cache_checksum = {}
                for cache_name in sorted_cache_names:
                    async_operation = create_async_operation(
                        create_checksum_operation, cache_name, 1, 10000)
                    async_operations.append(async_operation)
                    cache_operation[async_operation] = cache_name
                    async_operation.evaluate()

                checksums = ''

                for async_operation in async_operations:
                    result = str(async_operation.getResult())
                    cache_checksum[cache_operation.get(
                        async_operation)] = result
                    checksums += result

                log_print(checksums, color='debug')
                self.ignite_new_version.su.snapshot_utility(
                    'restore', '-id={}'.format(
                        self.ignite_new_version.su.get_created_snapshot_id(1)))
                log_print('Test completed', color='debug')
예제 #7
0
    def test_24_fitness_set_baseline_with_properties(self):
        """
        This test checks the cluster behaviour with option GG_DISABLE_SNAPSHOT_ON_BASELINE_CHANGE_WITH_ENABLED_PITR
        that could be set in different ways:
            1. Set at one of the server nodes.
            2. Set on some client node/nodes.
        """
        created_caches = []

        self.ignite_old_version.cu.activate()

        # Preloading
        with PiClient(self.ignite_new_version, self.client_config,
                      nodes_num=1) as piclient:

            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')
                piclient.get_ignite().getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

        util_sleep_for_a_while(20)

        new_client_config = Ignite.config_builder.get_config(
            'client', config_set_name='24_fit_with_consist_id')
        jvm_options = self.ignite_new_version.get_jvm_options(1)
        jvm_options.append(
            '-DGG_DISABLE_SNAPSHOT_ON_BASELINE_CHANGE_WITH_ENABLED_PITR=true')
        # with PiClient(self.ignite_new_version, self.client_config, jvm_options=jvm_options, nodes_num=1) as piclient:
        with PiClient(self.ignite_new_version, self.client_config,
                      nodes_num=1) as piclient:
            for i in range(1, 5):
                self.ignite_old_version.cu.control_utility('--baseline')
                log_print('Stopping node {}'.format(i), color='green')

                jvm_options = self.ignite_new_version.get_jvm_options(i)
                jvm_options.append(
                    '-DGG_DISABLE_SNAPSHOT_ON_BASELINE_CHANGE_WITH_ENABLED_PITR=false'
                )
                self.ignite_new_version.set_node_option(
                    '*', 'config',
                    Ignite.config_builder.get_config(
                        'server', config_set_name='24_fit_with_consist_id'))
                log_print("Starting node {} with new consistent id".format(i),
                          color='debug')
                self.ignite_new_version.start_nodes(i,
                                                    already_nodes=4,
                                                    other_nodes=4,
                                                    timeout=240)
                log_print("Changing baseline", color='debug')
                self.ignite_old_version.cu.set_current_topology_as_baseline()
                util_sleep_for_a_while(60,
                                       msg='Wait for rebalance to completed')

        log_print('Test is done')
예제 #8
0
    def test_24_fitness_rolling_upgrade(self):
        """
        This test checks the main rolling upgrade scenario under the load:
            1. Old cluster up and running (consistent_id's are not set).
            2. First cycle (upgrade to new version and set property
                GG_DISABLE_SNAPSHOT_ON_BASELINE_CHANGE_WITH_ENABLED_PITR):
            3. Second cycle (set correct consistent_id with adding to baseline topology).

        """
        created_caches = []

        self.ignite_old_version.cu.activate()

        with PiClient(self.ignite_new_version, self.client_config,
                      nodes_num=1) as piclient:

            dynamic_caches_factory = DynamicCachesFactory()
            async_ops = []
            for method in dynamic_caches_factory.dynamic_cache_configs:
                cache_name = "cache_group_%s" % method
                log_print('Loading {}...'.format(cache_name), color='green')
                piclient.get_ignite().getOrCreateCache(
                    getattr(dynamic_caches_factory, method)(cache_name))

                async_operation = create_async_operation(
                    create_put_all_operation,
                    cache_name,
                    1,
                    1001,
                    10,
                    value_type=self.data_model)
                async_ops.append(async_operation)
                async_operation.evaluate()
                created_caches.append(cache_name)

            log_print('Waiting async results...', color='debug')
            # wait for streamer to complete
            for async_op in async_ops:
                async_op.getResult()

        util_sleep_for_a_while(60)

        with PiClient(self.ignite_old_version, self.client_config,
                      nodes_num=4) as piclient:
            cache_names = piclient.get_ignite().cacheNames()

            # Start transaction loading for TTL caches
            with TransactionalLoading(MixedTestLoadingAdapter(self),
                                      config_file=self.client_config,
                                      loading_profile=LoadingProfile(
                                          delay=0,
                                          transaction_timeout=100000,
                                          run_for_seconds=600)):
                util_sleep_for_a_while(20)
                log_print('Rolling upgrade', color='green')
                async_ops = []
                for cache_name in [
                        cache_name for cache_name in cache_names.toArray()
                        if cache_name.startswith("M2_PRODUCT")
                ]:
                    async_operation = create_async_operation(
                        create_put_all_operation,
                        cache_name,
                        1001,
                        400001,
                        10,
                        value_type=ModelTypes.VALUE_ALL_TYPES.value)
                    async_ops.append(async_operation)
                    async_operation.evaluate()

                # First cycle: upgrade version and set property.
                for i in range(1, 5):
                    self.ignite_old_version.cu.control_utility('--baseline')
                    log_print('Stopping node {}'.format(i), color='green')
                    self.ignite_old_version.kill_nodes(i)

                    self.ignite_new_version.cleanup_work_dir(i)
                    folder = self.ignite_old_version.get_work_dir(i)
                    log_print(folder, color='debug')
                    self.ignite_new_version.copy_work_dir_from(i, folder)

                    jvm_options = self.ignite_new_version.get_jvm_options(i)
                    jvm_options.append(
                        '-DGG_DISABLE_SNAPSHOT_ON_BASELINE_CHANGE_WITH_ENABLED_PITR=true'
                    )

                    util_sleep_for_a_while(10)
                    self.ignite_new_version.start_nodes(i,
                                                        already_nodes=(4 - i),
                                                        other_nodes=(4 - i),
                                                        timeout=240)
                    self.ignite_new_version.cu.control_utility('--baseline')

                for async_op in async_ops:
                    async_op.getResult()

                util_sleep_for_a_while(30)
                log_print('Change consistent ID', color='green')

                self.ignite_new_version.set_node_option(
                    '*', 'config',
                    Ignite.config_builder.get_config(
                        'server', config_set_name='24_fit_with_consist_id'))

                # Second cycle - change consistent_id and add to baseline topology.
                for i in range(1, 5):
                    self.ignite_new_version.cu.control_utility('--baseline')
                    log_print('Stopping node {}'.format(i), color='green')
                    self.ignite_new_version.kill_nodes(i)
                    log_print(
                        "Starting node {} with new consistent id".format(i),
                        color='debug')
                    self.ignite_new_version.start_nodes(i, timeout=240)
                    log_print("Changing baseline", color='debug')
                    self.ignite_new_version.cu.set_current_topology_as_baseline(
                    )
                    util_sleep_for_a_while(
                        60, msg='Wait for rebalance to completed')

                log_print('Transactional loading done', color='green')

            # Just to check client node still can interact with cluster - calculate checksum from client node.
            sorted_cache_names = []
            for cache_name in piclient.get_ignite().cacheNames().toArray():
                sorted_cache_names.append(cache_name)

            sorted_cache_names.sort()

            async_operations = []
            cache_operation = {}
            for cache_name in sorted_cache_names:
                async_operation = create_async_operation(
                    create_checksum_operation, cache_name, 1, 10000)
                async_operations.append(async_operation)
                cache_operation[async_operation] = cache_name
                async_operation.evaluate()

            checksums = ''
            cache_checksum = {}
            for async_operation in async_operations:
                result = str(async_operation.getResult())
                cache_checksum[cache_operation.get(async_operation)] = result
                checksums += result

            log_print('Calculating checksums done')