def setup_testcase(self):
        self.logger.info('TestSetup is called')

        if self.get_context_variable('zookeeper_enabled'):
            self.start_zookeeper()

        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.su.clear_snapshots_list()
        self.start_grid(activate_on_particular_node=1)

        PiClientIgniteUtils.load_data_with_streamer(self.ignite,
                                                    self.get_client_config(),
                                                    end_key=1000)

        if self.get_context_variable(
                'pitr_enabled') and self.get_context_variable(
                    'snapshots_enabled'):
            self.su.wait_no_snapshots_activity_in_cluster()

        self.group_names = PiClientIgniteUtils.collect_cache_group_names(
            self.ignite, self.get_client_config())

        # if not self.ignite.jmx.is_started():
        #     self.ignite.jmx.start_utility()

        # wait for no client on cluster
        self.ignite.wait_for_topology_snapshot(client_num=0)

        print_debug(repr(self.ignite))
Exemplo n.º 2
0
    def test_sim(self):
        version, ignite = self.start_ignite_grid(True)

        ignite.jmx.start_utility()

        client_config = Ignite.config_builder.get_config(
            'client', config_set_name='base')
        group_names = PiClientIgniteUtils.collect_cache_group_names(
            ignite, client_config)

        PiClientIgniteUtils.load_data_with_streamer(ignite,
                                                    client_config,
                                                    end_key=50)

        server_nodes_num = ignite.get_nodes_num('server')
        sim_engine = PigeonSimulation(server_nodes_num)

        for running_iteration in range(1, DEFAULT_ITERATIONS + 1):
            log_print("Running iteration %s" % running_iteration)

            ev, node = sim_engine.next_event()
            log_print("Evaluating event %s on node %s" % (ev, node))

            pigeon = self.get_pigeon(ignite, node)

            pigeon[ev]()

            ignite.jmx.wait_for_finish_rebalance(120, group_names)

            self.verify_cluster(ignite)

        ignite.jmx.kill_utility()
    def base_test(self, **kwargs):
        cache_to_test = 'cache_group_1_001'
        check_commands = [
            '!tables', '!index',
            '\'select count(*) from \"%s\".ALLTYPESINDEXED;\'' % cache_to_test
        ]

        expected = ['COUNT\(\*\)', '1000']

        if 'ssl_connection' in kwargs:
            self.set_current_context('ssl_enabled')

        self.start_grid(activate_on_particular_node=1)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config('ssl_enabled'),
            value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value,
            end_key=1000,
            allow_overwrite=True)

        sql_tool = Sqlline(self.ignite, **kwargs)
        output = sql_tool.run_sqlline(check_commands)
        self.su.check_content_all_required(output, expected)

        # base on GG-17465 (validate index with secure cluster)
        self.cu.control_utility('--cache', 'validate_indexes')
Exemplo n.º 4
0
    def setup_testcase(self):
        if self.lfs_stored:
            self.restore_lfs('snapshot_util', timeout=1200)
        self.setup_testcase_without_start_gid()

        activation_timeout = 60
        if self.get_context_variable('sbt_model_enabled'):
            activation_timeout = 200
        self.start_grid(timeout=activation_timeout,
                        activate_on_particular_node=1)

        if not self.lfs_stored:
            if self.get_context_variable('dynamic_cache_enabled'):
                self.start_caches_dynamic(
                    caches_file_name=self.get_context_variable('caches'),
                    batch_size=10000)

            if self.get_context_variable('sbt_model_enabled'):
                PiClientIgniteUtils.load_data_with_txput_sbt_model(
                    self.config,
                    self.ignite,
                    self.get_client_config(),
                    only_caches_batch=None,
                    end_key=int(self.max_key * self.load_multiplier))

            else:
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    end_key=int(self.max_key * self.load_multiplier),
                    allow_overwrite=True)
        log_print(repr(self.ignite), color='debug')
Exemplo n.º 5
0
    def test_master_master_master_blinking_blt(self):
        self.prepare_clusters()

        client_config = self.preconfigure_cluster_0()

        iterations = 10
        last_loaded_key = START_DATA_SIZE
        nodes_before = 6

        with PiClient(self.clusters[0].grid,
                      client_config,
                      jvm_options=['-ea']) as piclient:
            PiClientIgniteUtils.load_data_with_streamer(
                self.clusters[0].grid,
                client_config,
                end_key=last_loaded_key,
                jvm_options=['-ea'],
                check_clients=False)

            sleep(60)

            with TransactionalLoading(self,
                                      ignite=self.clusters[0].grid,
                                      config_file=client_config,
                                      skip_consistency_check=True):
                for i in range(0, iterations):
                    log_print(f'Current iteration {i + 1} from {iterations}',
                              color='debug')

                    self.clusters[0].grid.kill_node(2)

                    utility_baseline_log = 'control-utility-baseline.log'

                    self.clusters[0].grid.cu.set_current_topology_as_baseline(
                        background=True, log=utility_baseline_log)

                    self.clusters[0].grid.start_node(2,
                                                     skip_topology_check=True)

                    self.clusters[0].grid.wait_for_topology_snapshot(
                        server_num=6)

                    self.clusters[0].grid.update_started_node_status(2)

                    self.clusters[0].grid.cu.set_current_topology_as_baseline(
                        background=True, log=utility_baseline_log)

                    self.verify_cluster(0, nodes_before, last_loaded_key)
    def _setup_with_context(self, context_name, **kwargs):
        self._start_grid_no_preload(context_name, **kwargs)

        if self.preloading_size > 0:
            if self.preloading_with_streamer:
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    end_key=self.preloading_size)
            else:
                PiClientIgniteUtils.load_data_with_putall(
                    self.ignite,
                    self.get_client_config(),
                    end_key=self.preloading_size)

        self._wait_cluster_ready()
    def verify_cluster(self, nodes_before, last_loaded_key=None):
        if len(nodes_before) != self.ignite.get_nodes_num('server'):
            log_print("There are missing nodes on cluster.", color='yellow')

            self.verify_no_assertion_errors()

            log_print("Wait for topology messages again.", color='yellow')
            for node_id in self.ignite.get_all_default_nodes():
                self.ignite.update_started_node_status(node_id)

            log_print("Missing nodes case confirmed. Trying to restart node.",
                      color='red')
            if len(nodes_before) != self.ignite.get_nodes_num('server'):
                nodes_to_start = []

                for node_id in self.ignite.get_alive_default_nodes():
                    # assert that node is not dead otherwise kill/restart again
                    if not self.ignite.check_node_status(node_id):
                        log_print("Restarting node %s" % node_id,
                                  color='yellow')
                        nodes_to_start.append(node_id)

                for node_id in nodes_to_start:
                    self.ignite.start_node(node_id,
                                           skip_nodes_check=True,
                                           check_only_servers=True)

                if len(nodes_before) != self.ignite.get_nodes_num('server'):
                    for node_id in self.ignite.get_alive_default_nodes():
                        self.util_get_threads_from_jstack(node_id, "FAILED")

                    assert False, "Failed to restart node"

        self.cu.control_utility('--activate')
        self.verify_no_assertion_errors()

        activate_failed = False
        log_print('Check that there is no Error in activate logs',
                  color='yellow')
        if 'Error' in self.cu.latest_utility_output:
            activate_failed = True
            log_print('Failed!', color='red')
        sleep(5)

        self.cu.control_utility('--baseline')
        self.verify_no_assertion_errors()
        log_print('Check that there is no Error in control.sh --baseline logs',
                  color='yellow')

        if 'Error' in self.cu.latest_utility_output:
            log_print('Failed! Second try after sleep 60 seconds', color='red')
            sleep(60)

            self.cu.control_utility('--baseline')

            if 'Error' in self.cu.latest_utility_output or activate_failed:
                log_print('Cluster looks hang.')

        log_print('Check that there is no AssertionError in logs',
                  color='yellow')
        self.verify_no_assertion_errors()

        if last_loaded_key:
            try:
                log_print('Trying to load data into survivor caches',
                          color='yellow')
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    start_key=last_loaded_key,
                    end_key=last_loaded_key + 500,
                    allow_overwrite=True,
                    check_clients=False,
                )

                last_loaded_key += 500

                log_print('Printing checksums of existing caches',
                          color='yellow')

                print(
                    PiClientIgniteUtils.calc_checksums_distributed(
                        self.ignite,
                        self.get_client_config(),
                        check_clients=False))

                log_print('Check that there is no AssertionError in logs',
                          color='yellow')
            except Exception as e:
                for node_id in self.ignite.get_alive_default_nodes():
                    self.util_get_threads_from_jstack(node_id, "FAILED")

                assert False, "Unable to connect client"
            finally:
                self.verify_no_assertion_errors()

        return last_loaded_key
    def test_cycling_restart_grid_dynamic_caches_with_atomic_on_restart(self):
        """
        Scenario The Glue
        (Assertions should be enabled)

        1. Start grid, load some data
        2. In the loop:
            2.1 define node restart timeout (0.5 - 2.0 seconds)
            2.2 Load more data
            2.3 Restart each node with defined timeout (DOES NOT LOOK ON TOPOLOGY SNAPSHOT)
            2.4 Try to activate, check AssertionErrors
            2.5 Try to baseline (If 2 operation failed -> PME, kill all nodes, start new test iteration)
            2.6 Try to load data
            2.7 Try to calculate checksum

        :return:
        """
        import random

        PiClient.read_timeout = 240

        # sleep_for_time = float(random.randrange(1, 15, 1)) / 5

        self.set_current_context('in_memory')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        with PiClient(self.ignite,
                      self.get_client_config(),
                      jvm_options=['-ea']) as piclient:
            # ignite = piclient.get_ignite()

            self.start_dynamic_caches_with_node_filter()

            last_loaded_key = 1000
            PiClientIgniteUtils.load_data_with_streamer(
                self.ignite,
                self.get_client_config(),
                end_key=last_loaded_key,
                jvm_options=['-ea'])

            nodes_before = self.ignite.get_alive_default_nodes()

            iterations = 50
            last_loaded_key += 1
            for i in range(0, iterations):
                log_print('Current iteration %s from %s' % (i, iterations),
                          color='debug')
                # sleep_for_time = float(self.the_glue_timeout) if self.the_glue_timeout else random.choice([0.7, 0.9, 2.0])
                sleep_for_time = float(
                    self.the_glue_timeout) if self.the_glue_timeout else round(
                        random.uniform(0.5, 2.5), 1)
                log_print(
                    "In this run we are going to sleep for {} seconds after each node restart"
                    .format(sleep_for_time),
                    color='green')

                log_print('Trying to load data into created/existing caches',
                          color='yellow')
                self.start_dynamic_caches_with_node_filter()
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    start_key=last_loaded_key,
                    end_key=last_loaded_key + 500,
                    jvm_options=['-ea'])
                last_loaded_key += 500

                log_print("Round restart")
                for node_id in self.ignite.get_alive_default_nodes():
                    self.ignite.kill_node(node_id)
                    self.ignite.start_node(node_id, skip_topology_check=True)
                    sleep(sleep_for_time)

                    try:
                        log_print(
                            "Incrementing atomics using distributed compute")
                        create_async_operation(
                            create_distributed_atomic_long).evaluate()
                    except Exception as e:
                        log_print("Failed to increment atomics")

                        # just print exception (https://issues.apache.org/jira/browse/IGNITE-11535)
                        traceback.print_exc()

                log_print("Wait for topology messages")
                for node_id in self.ignite.get_all_default_nodes():
                    self.ignite.update_started_node_status(node_id)

                sleep(15)

                log_print("Validating cluster")
                last_loaded_key = self.verify_cluster(nodes_before,
                                                      last_loaded_key)
    def test_cycling_restart_grid_dynamic_caches_no_client(self):
        """
        Scenario The Glue
        (Assertions should be enabled)

        1. Start grid, load some data
        2. In the loop:
            2.1 define node restart timeout (0.5 - 2.0 seconds)
            2.2 Load more data
            2.3 Restart each node with defined timeout (DOES NOT LOOK ON TOPOLOGY SNAPSHOT)
            2.4 Try to activate, check AssertionErrors
            2.5 Try to baseline (If 2 operation failed -> PME, kill all nodes, start new test iteration)
            2.6 Try to load data
            2.7 Try to calculate checksum

        :return:
        """
        import random

        PiClient.read_timeout = 240

        # sleep_for_time = float(random.randrange(1, 15, 1)) / 5

        self.set_current_context('in_memory')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        self.start_dynamic_caches_with_node_filter()

        last_loaded_key = 1000
        PiClientIgniteUtils.load_data_with_streamer(self.ignite,
                                                    self.get_client_config(),
                                                    end_key=last_loaded_key,
                                                    jvm_options=['-ea'])

        nodes_before = self.ignite.get_alive_default_nodes()

        iterations = 50
        last_loaded_key += 1
        for i in range(0, iterations):
            with ExitStack() as stack:
                # load data before start zk restart thread
                self.start_dynamic_caches_with_node_filter()
                # PiClientIgniteUtils.wait_for_running_clients_num(self.ignite, 0, 120)
                PiClientIgniteUtils.load_data_with_streamer(
                    self.ignite,
                    self.get_client_config(),
                    start_key=last_loaded_key,
                    end_key=last_loaded_key + 500,
                    jvm_options=['-ea'],
                    check_clients=True)
                last_loaded_key += 500

                if self.get_context_variable('zookeeper_enabled') and \
                        is_enabled(self.config.get('zookeeper_nodes_restart')):
                    stack.enter_context(ZkNodesRestart(self.zoo, 3))

                log_print('Current iteration %s from %s' % (i, iterations),
                          color='debug')

                sleep_for_time = float(
                    self.the_glue_timeout) if self.the_glue_timeout else round(
                        random.uniform(0.5, 2.5), 1)
                log_print(
                    "In this run we are going to sleep for {} seconds after each node restart"
                    .format(sleep_for_time),
                    color='green')

                log_print('Trying to load data into created/existing caches',
                          color='yellow')

                log_print("Round restart")
                for node_id in self.ignite.get_alive_default_nodes():
                    self.ignite.kill_node(node_id)
                    self.ignite.start_node(node_id, skip_topology_check=True)
                    sleep(sleep_for_time)

                log_print("Wait for topology messages")
                for node_id in self.ignite.get_all_default_nodes():
                    self.ignite.update_started_node_status(node_id)

                sleep(15)

            last_loaded_key = self.verify_cluster(nodes_before,
                                                  last_loaded_key)
    def test_clients_killed_few_coordinators(self):
        """
        1. Start grid, load some data
        2. Repeat:
            2.1. Start clients thread with loading (put all operation)
            2.2. Kill first node (coordinator) + second node (possible coordinator)
            2.3. Kill 4 next coordinators with some small timeout
            2.4. Sleep for 2 minutes to make cluster process failure
            2.5. Launch verify procedure
            2.6. Stop clients put thread

        :return:
        """

        self.set_current_context('default')

        self.util_copy_piclient_model_to_libs()
        self.util_deploy_sbt_model()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid()

        sleep_for_time = float(
            self.the_glue_timeout) if self.the_glue_timeout else round(
                random.uniform(0.1, 2.9), 1)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config(),
            value_type=ModelTypes.VALUE_ACCOUNT.value,
            end_key=1000)

        nodes_before = self.ignite.get_alive_default_nodes()

        def start_piclients():
            for _ in range(0, 3):
                try:
                    PiClientIgniteUtils.load_data_with_putall(
                        self.ignite,
                        self.get_client_config(),
                        value_type=ModelTypes.VALUE_ACCOUNT.value,
                        nodes_num=24,
                        end_key=1000)
                except Exception as err:
                    print(err)

        with PiClient(self.ignite, self.get_client_config()) as load:
            log_print("Starting clients under load", color="green")

            executor = ThreadPoolExecutor()

            executor.submit(start_piclients)

            sleep(5)

            try:
                self.ignite.kill_node(1)

                for i in range(0, 4):
                    sleep(sleep_for_time)

                    i = int(load.get_ignite().cluster().forOldest().node().
                            consistentId().replace('node_1_', ''))

                    if self.ignite.nodes[i]['status'] in [
                            NodeStatus.KILLED, NodeStatus.KILLING
                    ]:
                        sleep(sleep_for_time)

                        i = int(load.get_ignite().cluster().forOldest().node().
                                consistentId().replace('node_1_', ''))

                    self.ignite.kill_node(i)
                    log_print("Killing node %s" % i)

            except Exception as e:
                print(e)

            sleep(120)

            self.verify_cluster(nodes_before, 0)

            executor.shutdown(wait=True)
    def test_nodes_connecting_to_dead_cluster(self):
        """
        https://ggsystems.atlassian.net/browse/IGN-13800

        Two nodes are trying to connect to cluster, meanwhile cluster killed.
        They should send join request but didn't get self NodeAdded

        """
        PiClient.read_timeout = 240

        # sleep_for_time = float(random.randrange(1, 15, 1)) / 5

        self.set_current_context('in_memory')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        last_loaded_key = 1000
        PiClientIgniteUtils.load_data_with_streamer(self.ignite,
                                                    self.get_client_config(),
                                                    end_key=last_loaded_key)

        nodes_before = self.ignite.get_alive_default_nodes()

        additional_nodes = self.ignite.add_additional_nodes(
            config=self.get_client_config(), num_nodes=2)

        def start_piclients():
            try:
                self.ignite.start_additional_nodes(additional_nodes,
                                                   client_nodes=True,
                                                   skip_topology_check=True)
            except Exception as err:
                print(err)
            finally:
                sleep(10)

                self.ignite.update_started_node_status(additional_nodes)

                for add_node in additional_nodes:
                    self.ignite.kill_node(add_node)

        log_print("Starting clients under load", color="green")

        executor = ThreadPoolExecutor()

        executor.submit(start_piclients)

        try:
            time_to_sleep = round(random.uniform(3.5, 4.9), 1)
            sleep(time_to_sleep)

            log_print("Time to sleep: %s" % time_to_sleep, color='green')

            self.ignite.kill_nodes()

            sleep(30)

            self.verify_cluster(nodes_before, 0)
        except Exception as e:
            raise e
        finally:
            executor.shutdown(wait=True)

        self.ssh.killall('java')
    def test_full_cluster_blinking(self):
        """

        Enable indexes

        Start servers with PDS, start clients, start some light tx loading.
        In loop try to blink with all cluster at the same time. Logically there should be no data loss:
            full cluster blinking - so there shouldn't be any data loss

        :return:
        """

        PiClient.read_timeout = 240

        self.set_current_context('indexed_types')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        self.ignite.cu.activate(activate_on_particular_node=1)

        PiClientIgniteUtils.load_data_with_streamer(self.ignite,
                                                    self.get_client_config(),
                                                    end_key=100000)

        nodes_before = self.ignite.get_alive_default_nodes()
        iterations = 50

        with PiClient(self.ignite, self.get_client_config()):
            with TransactionalLoading(
                    self, loading_profile=LoadingProfile(delay=1000)):
                for i in range(0, iterations):
                    log_print('Current iteration %s from %s' % (i, iterations),
                              color='debug')

                    for node_id in nodes_before:
                        self.ignite.kill_node(node_id)
                        sleep(
                            float(self.the_glue_timeout) if self.
                            the_glue_timeout else round(
                                random.uniform(0.1, 0.5), 1))

                    for node_id in nodes_before:
                        self.ignite.start_node(node_id,
                                               skip_topology_check=True)
                        sleep(
                            float(self.the_glue_timeout) if self.
                            the_glue_timeout else round(
                                random.uniform(0.1, 0.5), 1))

                    self.ignite.wait_for_topology_snapshot(
                        server_num=len(nodes_before))

                    for node_id in self.ignite.get_all_default_nodes():
                        self.ignite.update_started_node_status(node_id)

                    sleep(10)

                    self.cu.control_utility('--cache validate_indexes',
                                            all_required='no issues found.')

                    self.verify_cluster(nodes_before, 0)
    def test_massive_index_rebuild(self):
        """
        1) 2 nodes, backupCnt = 1, persistenceEnabled
        2) Load (A, B) type into a cache with defined (A, B) types in index config
        3) Load new type of data into a cache (C,D)
        4) Kill one node
        5) Create new index in alive cluster
        6) Start node again

        :return:
        """

        PiClient.read_timeout = 1200

        self.set_current_context('indexed_types')

        self.util_copy_piclient_model_to_libs()
        self.ignite.set_activation_timeout(240)
        self.ignite.set_snapshot_timeout(240)
        self.ignite.set_node_option('*', 'jvm_options', ['-ea'])
        self.su.clear_snapshots_list()
        self.start_grid(skip_activation=True)

        self.ignite.cu.activate(activate_on_particular_node=1)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config(),
            value_type=ModelTypes.VALUE_ALL_TYPES_30_INDEX.value,
            end_key=5000)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config(),
            value_type=ModelTypes.VALUE_ACCOUNT.value,
            start_key=5000,
            end_key=10000)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config(),
            value_type=ModelTypes.VALUE_EXT_ALL_TYPES_30_INDEX.value,
            start_key=10000,
            end_key=15000)

        # PiClientIgniteUtils.load_data_with_streamer(self.ignite,
        #                                             self.get_client_config(),
        #                                             cache_names_patterns=
        #                                             ['cache_group_3'],
        #                                             value_type=ModelTypes.VALUE_EXT_ALL_TYPES_30_INDEX.value,
        #                                             end_key=10000)

        iterations = 50

        sqlline = Sqlline(self.ignite)

        columns = [
            'longCol',
            'doubleCol',
            'stringCol',
            'booleanCol',
            'longCol1',
            # 'doubleCol1', 'stringCol1', 'intCol', 'intCol1',  # 'booleanCol1',
            # 'index', 'longCol2', 'doubleCol2', 'stringCol2', 'booleanCol2',
            # 'longCol12', 'doubleCol12', 'stringCol12', 'intCol12', 'intCol2',
            # 'shortCol2', 'longCol3', 'doubleCol3', 'stringCol3', 'booleanCol3',
            # 'longCol13', 'doubleCol13', 'stringCol13', 'intCol13', 'intCol3', 'shortCol3'
        ]

        with PiClient(self.ignite, self.get_client_config()) as piclient:
            cache_names = piclient.get_ignite().cacheNames().toArray()

            for i in range(0, iterations):
                log_print('Current iteration %s from %s' % (i, iterations),
                          color='debug')

                update_table = []

                self.ignite.kill_node(2)

                indexed_columns = ','.join(columns)

                for cache_name in cache_names:
                    # self.ssh.exec_on_host('REMOVE')
                    vtype = 'ALLTYPES30INDEX'  # if 'cache_group_3' not in cache_name else 'EXTALLTYPES30INDEX'

                    update_table.append(
                        f'\'CREATE INDEX IF NOT EXISTS {cache_name}_{vtype} on '
                        f'\"{cache_name}\".{vtype}({indexed_columns}) INLINE_SIZE 32 PARALLEL 28;\''
                    )

                update_table.append('!index')

                sqlline.run_sqlline(update_table)

                self.ignite.start_node(2)

                util_sleep_for_a_while(30)

                self.verify_no_assertion_errors()

                self.cu.control_utility('--cache validate_indexes',
                                        all_required='no issues found.')
    def base_test_with_all_users(self, ssl_connection):
        cache_to_test = 'cache_group_1_001'
        check_commands_read = [
            '!tables', '!index',
            '\'select count(*) from \"%s\".ALLTYPESINDEXED;\'' % cache_to_test
        ]

        check_commands_update = [
            '!tables', '!index',
            '\'update \"%s\".ALLTYPESINDEXED set LONGCOL=1;\'' % cache_to_test
        ]

        expected_read = ['COUNT\(\*\)', '1000']
        expected_update = ['1,000 rows affected']
        expected_for_no_access_user = ['Authorization failed']

        self.set_current_context('ssl_enabled')

        self.start_grid(activate_on_particular_node=1)

        PiClientIgniteUtils.load_data_with_streamer(
            self.ignite,
            self.get_client_config('ssl_enabled'),
            value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value,
            end_key=1000,
            allow_overwrite=True)

        users = [
            {
                'login': '******',
                'password': '******',
                'read_check': {
                    'run': check_commands_read,
                    'expected': expected_read
                },
                'update_check': {
                    'run': check_commands_update,
                    'expected': expected_update
                }
            },
            {
                'login': '******',
                'password': '******',
                'read_check': {
                    'run': check_commands_read,
                    'expected': expected_read
                },
                'update_check': {
                    'run': check_commands_update,
                    'expected': expected_update
                }
            },
            {
                'login': '******',
                'password': '******',
                'read_check': {
                    'run': check_commands_read,
                    'expected': expected_read
                },
                'update_check': {
                    'run': check_commands_update,
                    'expected': expected_for_no_access_user
                }
            },
            {
                'login': '******',
                'password': '******',
                'read_check': {
                    'run': check_commands_read,
                    'expected': expected_for_no_access_user
                },
                'update_check': {
                    'run': check_commands_update,
                    'expected': expected_for_no_access_user
                }
            },
        ]

        def check_output(user_info):
            auth_info = namedtuple('auth_info', 'user password')
            auth = auth_info(user=user['login'], password=user['password'])
            sql_tool = Sqlline(self.ignite,
                               auth=auth,
                               ssl_connection=ssl_connection)

            for operation in ['read_check', 'update_check']:
                output = sql_tool.run_sqlline(user_info[operation].get('run'))
                self.su.check_content_all_required(
                    output, user_info[operation].get('expected'))

        for user in reversed(users):
            check_output(user)

        for user in users:
            check_output(user)
Exemplo n.º 15
0
 def load_data_with_streamer(self, *args, **kwargs):
     PiClientIgniteUtils.load_data_with_streamer(*args, **kwargs)