コード例 #1
0
    def setup(self):
        default_context = self.contexts['default']
        default_context.add_context_variables(
            authentication_enabled=False,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
        )

        default_context = self.create_test_context('auth_enabled')
        default_context.add_context_variables(
            authentication_enabled=True,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            auth_login='******',
            auth_password='******',
            client_auth_login='******',
            client_auth_password='******',
        )

        context_auth_server = self.create_test_context(
            'auth_enabled_server_creds')
        context_auth_server.add_context_variables(
            authentication_enabled=True,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            auth_login='******',
            auth_password='******',
            client_auth_login='******',
            client_auth_password='******',
        )

        super().setup()
    def setup(self):
        default_context = self.contexts['default']
        default_context.add_context_variables(
            authentication_enabled=True,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            auth_login='******',
            auth_password='******',
        )

        self.auth_creds = [
            'no_access',
            'task_execute',
            'admin_ops',
            'admin_cache',
        ]

        for auth_cred_name in self.auth_creds:
            context = self.create_test_context(auth_cred_name)
            context.add_context_variables(
                authentication_enabled=True,
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                auth_login=auth_cred_name,
                auth_password=auth_cred_name,
            )

        pp = PrettyPrinter()
        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))

        super().setup()
コード例 #3
0
    def test_two_blinking_nodes_clean_lfs(self):
        with PiClient(self.ignite, self.get_client_config()):

            with ExitStack() as stack:
                if is_enabled(self.config.get('zookeeper_enabled')) and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 2))

                for iteration in range(0, self.iterations):
                    log_print("Iteration {}/{}".format(str(iteration + 1),
                                                       str(self.iterations)),
                              color='blue')

                    self.assert_nodes_alive()

                    self.ignite.kill_node(2)
                    self.cleanup_lfs(2)
                    # self.ignite.start_node(2)
                    self.start_node(2)

                    self.ignite.kill_node(3)

                    util_sleep(10)

                    # self.ignite.start_node(3)
                    self.start_node(3)

                    self.ignite.jmx.wait_for_finish_rebalance(
                        self.rebalance_timeout, self.group_names)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
コード例 #4
0
    def setup(self):
        zookeeper_enabled = is_enabled(
            self.tiden.config.get('zookeeper_enabled'))

        self.create_app_config_set(
            Ignite,
            'base',
            additional_configs=['caches.tmpl.xml'],
            snapshots_enabled=True,
            logger=False,
            caches_list_file='caches_base.xml',
            logger_path='%s/ignite-log4j2.xml' %
            self.tiden.config['rt']['remote']['test_module_dir'],
            disabled_cache_configs=False,
            zookeeper_enabled=zookeeper_enabled,
            num_backups=self.pme_config['num_backups'],
            num_partitions=self.pme_config['num_partitions'],
        )

        # self.create_app_config_set(Ignite, 'snapshot',
        #                            snapshots_enabled=False,
        #                            logger=False,
        #                            logger_path='%s/ignite-log4j2.xml' % self.tiden.config['rt']['remote'][
        #                                'test_module_dir'],
        #                            disabled_cache_configs=False,
        #                            zookeeper_enabled=False)

        self._fix_max_servers()
        super().setup()
コード例 #5
0
    def setup(self):
        self.ignite.jmx.rebalance_collect_timeout = 5

        self.ignite.activate_module('ignite-log4j2')

        self.the_glue_timeout = self.config.get('the_glue_timeout', None)

        default_context = self.contexts['default']
        default_context.add_context_variables(
            caches_file='caches.xml',
            snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')),
            pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            rebalance_pool_size=4,
            caches=True,
            logger=True,
            logger_path='%s/ignite-log4j2.xml' %
            self.config['rt']['remote']['test_module_dir'],
        )

        indexed_types = self.create_test_context('indexed_types')
        indexed_types.add_context_variables(
            caches_file='caches_index.xml',
            snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')),
            pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            rebalance_pool_size=4,
            caches=True,
            logger=True,
            logger_path='%s/ignite-log4j2.xml' %
            self.config['rt']['remote']['test_module_dir'],
        )

        in_memory_context = self.create_test_context('in_memory')
        in_memory_context.add_context_variables(
            caches_file='caches.xml',
            snapshots_enabled=is_enabled(self.config.get('snapshots_enabled')),
            pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            rebalance_pool_size=4,
            in_memory=True,
            caches=False,
            logger=True,
            logger_path='%s/ignite-log4j2.xml' %
            self.config['rt']['remote']['test_module_dir'],
        )

        super().setup()

        self.logger = get_logger('tiden')
        self.logger.set_suite('[TestStressGrid]')
コード例 #6
0
    def setup(self):
        default_context = self.contexts['default']
        self.auth_login = self.config.get('auth_login', 'server_user')
        self.auth_password = self.config.get('auth_password',
                                             'server_password')
        default_context.add_context_variables(
            authentication_enabled=True,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            auth_login=self.auth_login,
            auth_password=self.auth_password,
        )

        insecure_context = self.create_test_context('insecure')
        insecure_context.add_context_variables(
            authentication_enabled=False,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
        )

        super().setup()
コード例 #7
0
    def test_loading_blinking_node_baseline(self):
        with PiClient(self.ignite, self.get_client_config()) as piclient:
            self.wait_for_running_clients_num(piclient.nodes_num, 90)

            with ExitStack() as stack:
                stack.enter_context(
                    TransactionalLoading(
                        self,
                        cross_cache_batch=2,
                        skip_atomic=True,
                        post_checksum_action=self.idle_verify_action))

                if is_enabled(self.config.get('zookeeper_enabled')) and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 2))

                for iteration in range(0, self.iterations):
                    log_print("Iteration {}/{}".format(str(iteration + 1),
                                                       str(self.iterations)),
                              color='blue')

                    self.assert_nodes_alive()

                    self.ignite.kill_node(2)
                    self.ignite.wait_for_topology_snapshot(
                        server_num=len(self.ignite.get_alive_default_nodes()))

                    self.cu.set_current_topology_as_baseline()

                    util_sleep(5)

                    self.start_node(2)
                    self.ignite.wait_for_topology_snapshot(
                        server_num=len(self.ignite.get_alive_default_nodes()))
                    self.cu.set_current_topology_as_baseline()

                    self.ignite.jmx.wait_for_finish_rebalance(
                        self.rebalance_timeout, self.group_names)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
コード例 #8
0
    def test_loading_blinking_two_nodes_blt_and_extra_node(self):
        with PiClient(self.ignite, self.get_client_config()):
            additional_node = self.ignite.add_additional_nodes(
                self.get_server_config())[0]
            self.ignite.start_additional_nodes(additional_node)

            with ExitStack() as stack:
                stack.enter_context(
                    TransactionalLoading(
                        self,
                        cross_cache_batch=2,
                        skip_atomic=True,
                        post_checksum_action=self.idle_verify_action))

                if is_enabled(self.config.get('zookeeper_enabled')) and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 2))

                for iteration in range(0, self.iterations):
                    log_print("Iteration {}/{}".format(str(iteration + 1),
                                                       str(self.iterations)),
                              color='blue')

                    self.assert_nodes_alive()

                    self.ignite.kill_node(2)
                    self.ignite.kill_node(additional_node)

                    # self.ignite.start_node(2)
                    # self.ignite.start_additional_nodes(additional_node)
                    self.start_node(2)
                    self.start_node(additional_node)

                    self.ignite.jmx.wait_for_finish_rebalance(
                        self.rebalance_timeout, self.group_names)

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")
コード例 #9
0
    def setup(self):
        self.ignite = IgniteBenchAdapter(self.config, self.ssh)
        self.cu = self.ignite.cu
        self.su = self.ignite.su
        self.zoo = self.ignite.zoo

        self.consistency_check = is_enabled(
            self.config.get('consistency_check_enabled', False))

        self.optimistic_possibility = self.config.get('optimistic_possibility',
                                                      0.5)
        self.cross_cache_batch = self.config.get('cross_cache_batch', 4)

        self.tx_delay = self.config.get('tx_delay', 4)
        self.collect_timeout = self.config.get('collect_timeout', 100)
        self.collect_timeout_metrics_thread = self.config.get(
            'collect_timeout_metrics_thread', 100)

        zookeeper_enabled = is_enabled(self.config.get('zookeeper_enabled'))
        sbt_model_enabled = is_enabled(self.config.get('sbt_model_enabled'))

        default_context = self.contexts['default']
        default_context.add_config('caches.xml', 'caches_default.xml')
        default_context.add_context_variables(
            caches_file="caches_default.xml",
            zookeeper_enabled=zookeeper_enabled,
            sbt_model_enabled=sbt_model_enabled,
            num_backups=self.num_backups,
            num_partitions=self.num_partitions,
        )

        if zookeeper_enabled:
            self.zoo.deploy_zookeeper()
            default_context.add_context_variables(
                zoo_connection=self.zoo._get_zkConnectionString(), )

        super().setup()
コード例 #10
0
    def setup(self):
        self.zoo = Zookeeper(self.config, self.ssh)

        zookeeper_enabled = is_enabled(self.config.get('zookeeper_enabled'))
        default_context = self.contexts['default']
        default_context.add_context_variables(
            caches_file="caches.xml",
            zookeeper_enabled=zookeeper_enabled,
        )

        if zookeeper_enabled:
            self.zoo.deploy_zookeeper()
            default_context.add_context_variables(
                zoo_connection=self.zoo._get_zkConnectionString(), )

        super().setup()
コード例 #11
0
    def setup(self):
        self.auth_creds = {
            '': {
                'name': 'server',
                'context': 'default',
            },
            'read_only': {
                'name': 'read_only',
                'context': 'read_only',
            },
            'no_access': {
                'name': 'no_access',
                'context': 'no_access',
            },
        }
        pp = PrettyPrinter()

        for auth_cred_name in self.auth_creds:
            # aka 'server_login' / 'server_password', etc.
            default_login = self.auth_creds[auth_cred_name]['name'] + '_user'
            default_password = self.auth_creds[auth_cred_name][
                'name'] + '_password'
            self.auth_creds[auth_cred_name].update({
                'login':
                self.config.get('auth_' + auth_cred_name + 'login',
                                default_login),
                'password':
                self.config.get('auth_' + auth_cred_name + 'password',
                                default_password),
            })
            context_name = self.auth_creds[auth_cred_name]['context']
            if context_name != 'default':
                context = self.create_test_context(context_name)
            else:
                context = self.contexts['default']

            context.add_context_variables(
                authentication_enabled=True,
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                auth_login=self.auth_creds[auth_cred_name]['login'],
                auth_password=self.auth_creds[auth_cred_name]['password'],
            )

        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))
        super().setup()
コード例 #12
0
    def test_cycling_restart_grid_dynamic_caches_no_client(self):
        """
        Scenario The Glue
        (Assertions should be enabled)

        1. Start grid, load some data
        2. In the loop:
            2.1 define node restart timeout (0.5 - 2.0 seconds)
            2.2 Load more data
            2.3 Restart each node with defined timeout (DOES NOT LOOK ON TOPOLOGY SNAPSHOT)
            2.4 Try to activate, check AssertionErrors
            2.5 Try to baseline (If 2 operation failed -> PME, kill all nodes, start new test iteration)
            2.6 Try to load data
            2.7 Try to calculate checksum

        :return:
        """
        import random

        PiClient.read_timeout = 240

        # sleep_for_time = float(random.randrange(1, 15, 1)) / 5

        self.set_current_context('in_memory')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        self.start_dynamic_caches_with_node_filter()

        last_loaded_key = 1000
        PiClientIgniteUtils.load_data_with_streamer(self.ignite,
                                                    self.get_client_config(),
                                                    end_key=last_loaded_key,
                                                    jvm_options=['-ea'])

        nodes_before = self.ignite.get_alive_default_nodes()

        iterations = 50
        last_loaded_key += 1
        for i in range(0, iterations):
            with ExitStack() as stack:
                # load data before start zk restart thread
                self.start_dynamic_caches_with_node_filter()
                # PiClientIgniteUtils.wait_for_running_clients_num(self.ignite, 0, 120)
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    start_key=last_loaded_key,
                    end_key=last_loaded_key + 500,
                    jvm_options=['-ea'],
                    check_clients=True)
                last_loaded_key += 500

                if self.get_context_variable('zookeeper_enabled') and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 3))

                log_print('Current iteration %s from %s' % (i, iterations),
                          color='debug')

                sleep_for_time = float(
                    self.the_glue_timeout) if self.the_glue_timeout else round(
                        random.uniform(0.5, 2.5), 1)
                log_print(
                    "In this run we are going to sleep for {} seconds after each node restart"
                    .format(sleep_for_time),
                    color='green')

                log_print('Trying to load data into created/existing caches',
                          color='yellow')

                log_print("Round restart")
                for node_id in self.ignite.get_alive_default_nodes():
                    self.ignite.kill_node(node_id)
                    self.ignite.start_node(node_id, skip_topology_check=True)
                    sleep(sleep_for_time)

                log_print("Wait for topology messages")
                for node_id in self.ignite.get_all_default_nodes():
                    self.ignite.update_started_node_status(node_id)

                sleep(15)

            last_loaded_key = self.verify_cluster(nodes_before,
                                                  last_loaded_key)
コード例 #13
0
    def setup(self):
        self.auth_creds = {
            '': {
                'user': '******',
                'pwd': 'qwe123',
                'context': 'default',
                'description': '{ "defaultAllow": "true" }'
            },
            'read_only': {
                'user':
                '******',
                'pwd':
                'qwe123',
                'context':
                'read_only',
                'description':
                '{'
                '{ "cache": "cache_ro*", "permissions":["CACHE_READ"] }, '
                '{ "cache": "cache_rw*", "permissions":["CACHE_READ", "CACHE_PUT", "CACHE_REMOVE"] }, '
                '{ "task": "*", "permissions":["TASK_EXECUTE"] }, '
                '{ "system":["ADMIN_CACHE", "CACHE_CREATE"] }, '
                '"defaultAllow":"false"'
                '}'
            },
            'utility': {
                'user': '******',
                'pwd': 'qwe123',
                'context': 'utility',
                'description': '{ "defaultAllow":"true" }'
            },
        }
        pp = PrettyPrinter()

        for auth_cred_name in self.auth_creds:
            context_name = self.auth_creds[auth_cred_name]['context']
            if context_name != 'default':
                context = self.create_test_context(context_name)
            else:
                context = self.contexts['default']

            context.add_context_variables(
                ssl_enabled=is_enabled(self.config.get('ssl_enabled')),
                authentication_enabled=True,
                client_key_store_file_path='%s/client.jks' %
                self.config['rt']['remote']['test_module_dir'],
                server_key_store_file_path='%s/server.jks' %
                self.config['rt']['remote']['test_module_dir'],
                trust_store_file_path='%s/trust.jks' %
                self.config['rt']['remote']['test_module_dir'],
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
                rolling_updates_enabled=is_enabled(
                    self.config.get('rolling_updates_enabled')),
                auth_login=self.auth_creds[auth_cred_name]['user'],
                auth_password=self.auth_creds[auth_cred_name]['pwd'],
            )

        self.cu.enable_authentication('utility', 'qwe123')
        self.su.enable_authentication('utility', 'qwe123')

        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))
        super().setup()
コード例 #14
0
    def _run_iteration(self, ignite, iteration):
        """
        One iteration of server PME benchmark is as follows:

            1. start transactional loading, sleep `warmup_servers_delay` so that load stabilize
            2.   kill random N nodes, measure LEAVE exchange time, sleep `stabilization_delay`
            3.   restart killed nodes, measure JOIN exchange time, sleep `cooldown_delay`
            4. stop load

        :param ignite:
        :param iteration:
        :return:
        """
        log_print(
            "===> PME {} Server(s) Left-Join Benchmark iteration {}/{} started "
            .format(self.config['num_servers_to_kill'], iteration,
                    self.config['iterations']),
            color='green')

        # if debug:
        #     from pt.util import read_yaml_file
        #     from os.path import join
        #     base_path = 'pt/tests/res/exchanges'
        #     exch_test = iteration
        #     start_exch = read_yaml_file(join(base_path, 'start_exch.%d.yaml' % exch_test))
        #     finish_exch = read_yaml_file(join(base_path, 'finish_exch.%d.yaml' % exch_test))
        #     merge_exch = read_yaml_file(join(base_path, 'merge_exch.%d.yaml' % exch_test))
        #     self.test_class.exchanges = ExchangesCollection.create_from_log_data(start_exch, finish_exch, merge_exch)
        #     self.test_class.new_topVer = 5
        #     x1_leave_time, x2_time = self.test_class._measurements_after_test('test_leave', skip_exch=1)
        #     self.test_class.new_topVer = 6
        #     x1_join_time, x2_time = self.test_class._measurements_after_test('test_join', skip_exch=1)
        #
        #     return x1_leave_time, x1_join_time

        loading_client_hosts = self._get_loading_client_hosts()
        num_servers = self._get_num_server_nodes()
        num_servers_to_kill = self.config['num_servers_to_kill']
        kill_coordinator = self.config['kill_coordinator']

        metrics = None
        ex = None
        x1_join_time = None
        x1_leave_time = None

        try:
            # start loading clients ...
            with PiClient(ignite,
                          self.test_class.client_config,
                          client_hosts=loading_client_hosts,
                          clients_per_host=self.config.get(
                              'loading_clients_per_host', 1)):
                # ... and initiate transactional load
                with TransactionalLoading(
                        self.test_class,
                        ignite=ignite,
                        kill_transactions_on_exit=self.
                        config['kill_transactions_on_exit'],
                        cross_cache_batch=self.config['cross_cache_batch'],
                        skip_atomic=self.config['skip_atomic'],
                        skip_consistency_check=not self.
                        config['consistency_check_enabled'],
                        loading_profile=LoadingProfile(
                            delay=self.config['tx_delay'],
                            commit_possibility=self.
                            config['commit_possibility'],
                            start_key=1,
                            end_key=self.config['load_factor'] - 1,
                            transaction_timeout=self.
                            config['transaction_timeout']),
                        tx_metrics=[
                            'txCreated', 'txCommit', 'txFailed', 'txRollback'
                        ]) as tx_loading:
                    metrics = tx_loading.metrics

                    # pick random server nodes
                    node_ids = ignite.get_random_server_nodes(
                        num_servers_to_kill,
                        use_coordinator=kill_coordinator,
                        node_ids=self.test_class.server_node_ids,
                    )

                    expected_total_server_num = num_servers - len(node_ids)

                    # ... wait load stabilize
                    util_sleep_for_a_while(self.config['warmup_servers_delay'],
                                           "Before LEAVE")

                    if is_enabled(self.config.get('jfr_enabled', False)):
                        ignite.make_cluster_jfr(60)

                    util_sleep_for_a_while(2)
                    self.test_class._prepare_before_test(
                        ignite, tx_loading,
                        'LEAVE %d server(s)' % len(node_ids))

                    # ... kill selected random nodes
                    ignite.kill_nodes(*node_ids)
                    ignite.wait_for_topology_snapshot(
                        server_num=expected_total_server_num)
                    tx_loading.metrics_thread.add_custom_event(
                        '%d server(s) left' % len(node_ids))

                    new_topVer = self.test_class._get_new_top_after_test(
                        ignite)
                    self.test_class._wait_exchange_finished(ignite, new_topVer)

                    x1_leave_time, x2_time = self.test_class._measurements_after_test(
                        'LEAVE %d server(s)' % len(node_ids), skip_exch=1)

                    if is_enabled(self.config.get('heapdump_enabled', False)):
                        ignite.make_cluster_heapdump(
                            [1], 'after_%d_server_leave' % len(node_ids))

                    # ... wait exchange stabilize
                    util_sleep_for_a_while(self.config['stabilization_delay'],
                                           "After LEAVE, before JOIN")

                    if self.config['measure_restart_nodes']:
                        self.test_class._prepare_before_test(
                            ignite, tx_loading,
                            'JOIN %d server(s)' % len(node_ids))

                    # ... restart killed nodes
                    ignite.start_nodes(*node_ids)
                    ignite.wait_for_topology_snapshot(
                        server_num=expected_total_server_num + len(node_ids))

                    if self.config['measure_restart_nodes']:
                        tx_loading.metrics_thread.add_custom_event(
                            '%d server(s) joined' % len(node_ids))

                        new_topVer = self.test_class._get_new_top_after_test(
                            ignite)
                        self.test_class._wait_exchange_finished(
                            ignite, new_topVer)
                        x1_join_time, x2_time = self.test_class._measurements_after_test(
                            'JOIN %d server(s)' % len(node_ids), skip_exch=1)
                        # if is_enabled(self.config.get('heapdump_enabled', False)):
                        #     ignite.make_cluster_heapdump([1], 'after_%d_server_join' % len(node_ids))

                    # ... wait exchange cooldown
                    util_sleep_for_a_while(self.config['cooldown_delay'],
                                           "After JOIN")

            ignite.wait_for_topology_snapshot(client_num=0)
        except Exception as e:
            ex = e
        if metrics:
            self.test_class.create_loading_metrics_graph(
                'pme_%d_servers_left_join_%s_%d' %
                (num_servers_to_kill, self.run_id, iteration),
                metrics,
                dpi_factor=0.75)
        if ex:
            raise ex

        return {
            'Exchange Server Join': x1_join_time,
            'Exchange Server Leave': x1_leave_time,
        }
コード例 #15
0
 def setup(self):
     super().setup()
     self.snapshot_arch_enabled = is_enabled(self.config.get('snapshot_arch'))
     self.logger = get_logger('tiden')
     self.logger.set_suite('[TessUtils]')
コード例 #16
0
    def setup(self):
        default_context = self.contexts['default']
        authentication_enabled = is_enabled(
            self.config.get('authentication_enabled'))
        self.reusable_lfs = is_enabled(self.config.get('reusable_lfs_enable'))

        default_context.add_context_variables(
            persistence_enabled=True,
            snapshots_enabled=is_enabled(
                self.config.get('snapshots_enabled', True)),
            pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            compaction_enabled=is_enabled(
                self.config.get('compaction_enabled')),
            authentication_enabled=authentication_enabled,
            sbt_model_enabled=is_enabled(self.config.get('sbt_model_enabled')),
            caches='caches_sbt.xml' if is_enabled(
                self.config.get('sbt_model_enabled')) else 'caches.xml',
            snapshot_archive_enabled=is_enabled(
                self.config.get('snapshot_arch')),
            dynamic_cache_enabled=is_enabled(
                self.config.get('dynamic_cache_enabled')),
            blt_auto_adjust_enabled=is_enabled(
                self.config.get('blt_auto_adjust_enabled')),
            community_edition_enabled=is_enabled(
                self.config.get('community_edition_enabled')))

        if authentication_enabled:
            auth_login = self.config.get('auth_login', 'server_user')
            auth_password = self.config.get('auth_password', 'server_password')

            default_context.add_context_variables(
                auth_login=auth_login,
                auth_password=auth_password,
            )

            self.cu.enable_authentication(auth_login, auth_password),
            self.su.enable_authentication(auth_login, auth_password),
            self.ignite.enable_authentication(auth_login, auth_password),

        if not is_enabled(self.config.get('community_edition_enabled')):
            self.gg_version = self.config['artifacts']['ignite'][
                'gridgain_version']

        super().setup()
        self.load_multiplier = float(
            self.config.get('pitr_load_multiplier',
                            UltimateUtils.load_multiplier))
        self.ignite.set_activation_timeout(20)
        self.ignite.set_snapshot_timeout(60)
        self.blt_auto_adjust_timeout = 60000
        self.logger = get_logger('tiden')
        self.logger.set_suite('[TestSnapshots]')
コード例 #17
0
    def test_indexes_rebuilded(self):
        """
        https://ggsystems.atlassian.net/browse/GG-17428

        1. Start cluster.
        2. Start transactional loading.
        3. Stop one node and remove index.bin files for the caches.
        4. Start node and let it finish rebalance.
        5. Check indexes are not broken after rebalance.
        :return:
        """
        self.need_delete_lfs_on_teardown = True
        debug = False

        with PiClient(self.ignite, self.get_client_config()) as piclient:
            self.wait_for_running_clients_num(piclient.nodes_num, 90)

            with ExitStack() as stack:
                # todo unreachable code
                if False:
                    stack.enter_context(
                        TransactionalLoading(
                            self,
                            cross_cache_batch=2,
                            skip_atomic=True,
                            post_checksum_action=self.idle_verify_action))

                if is_enabled(self.config.get('zookeeper_enabled')) and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 2))

                for iteration in range(0, self.iterations):
                    log_print("Iteration {}/{}".format(str(iteration + 1),
                                                       str(self.iterations)),
                              color='blue')

                    self.assert_nodes_alive()

                    with TransactionalLoading(self,
                                              cross_cache_batch=2,
                                              skip_atomic=True):

                        util_sleep(20)
                        self.ignite.kill_node(2)

                    if debug:
                        self.cu.control_utility(
                            '--cache idle_verify --dump --skip-zeros')

                    self.remove_index_bin_files(2)
                    util_sleep(10)

                    if debug:
                        self.cu.control_utility(
                            '--cache idle_verify --dump --skip-zeros')

                    self.start_node(2)

                    self.ignite.jmx.wait_for_finish_rebalance(
                        self.rebalance_timeout, self.group_names)
                    util_sleep(30)
                    log_print("Check indexes")
                    try:
                        if debug:
                            self.cu.control_utility(
                                '--cache idle_verify --dump --skip-zeros')
                        self.idle_verify_action(None)
                    except TidenException:
                        if debug:
                            self.cu.control_utility(
                                '--cache idle_verify --dump --skip-zeros')
                        raise TidenException('validate_index failed')

        tiden_assert_equal(
            0, self.ignite.find_exception_in_logs('java.lang.AssertionError'),
            "# of AssertionError")