Esempio n. 1
0
    def test_ignite_9398_deactivate(self):
        """
        https://ggsystems.atlassian.net/browse/IGN-11435
        https://issues.apache.org/jira/browse/IGNITE-9398

        Fixed in: 8.5.1-p14

        This ticket optimizes dispatching of Custom Discovery Messages by offloading their processsing
        to separate thread. If the fix is ok, then all nodes must mention custom discovery messages faster

        :return:
        """
        self.start_grid()

        util_sleep_for_a_while(3)
        jmx = JmxUtility(self.ignite)
        jmx.deactivate(1)

        max_time = self._get_last_exchange_time()

        print_blue(
            "Max time diff between 'Started exchange init' and 'Finish exchange future' at all nodes: %s msec"
            % max_time)

        self._dump_exchange_time(max_time, "cluster deactivate")

        jmx.kill_utility()
Esempio n. 2
0
    def test_ignite_9398_snapshot(self):
        """
        https://ggsystems.atlassian.net/browse/IGN-11435
        https://issues.apache.org/jira/browse/IGNITE-9398

        Fixed in: 8.5.1-p14

        This ticket optimizes dispatching of Custom Discovery Messages by offloading their processsing
        to separate thread. If the fix is ok, then all nodes must mention custom discovery messages faster

        :return:
        """
        self.start_grid()

        su = SnapshotUtility(self.ignite)

        su.snapshot_utility('SNAPSHOT', '-type=FULL')

        # max_time = self._get_last_exchange_time("message='snapshot")

        max_time = self._get_last_exchange_time()

        print_blue(
            "Max time diff between 'Started exchange init' and 'Finish exchange future' at all nodes: %s msec"
            % max_time)

        self._dump_exchange_time(max_time, "snapshot created")
Esempio n. 3
0
    def restart_empty_grid_with_nodes_count(self, nodes_count):
        self.cu.deactivate()
        util_sleep_for_a_while(5)
        current_nodes = self.ignite.get_alive_default_nodes()
        self.ignite.stop_nodes()
        util_sleep_for_a_while(5)
        self.delete_lfs()
        additional_nodes_count = nodes_count - len(current_nodes)

        if additional_nodes_count < 0:
            print_blue('Going to remove nodes %s' %
                       current_nodes[additional_nodes_count:])
            for node_id in current_nodes[additional_nodes_count:]:
                current_nodes.remove(node_id)
                # if self.ignite.nodes.get(node_id):
                #     del self.ignite.nodes[node_id]

        log_print('Going to start nodes {}'.format(current_nodes))
        self.ignite.start_nodes(*current_nodes)

        if additional_nodes_count > 0:
            additional_nodes_count = nodes_count - len(current_nodes)
            print_blue('Starting %s additional nodes' % additional_nodes_count)
            node_id = list(
                self.ignite.add_additional_nodes(self.get_server_config(),
                                                 additional_nodes_count))
            self.ignite.start_additional_nodes(node_id)

        self.cu.activate()
Esempio n. 4
0
 def _get_new_top_after_test(self, ignite, custom_event_name=None):
     self.new_top = ignite.last_topology_snapshot()
     self.new_topVer = max([_['ver'] for _ in self.new_top])
     if custom_event_name is not None:
         print_blue("New topology version after %s: %d" %
                    (custom_event_name, self.new_topVer))
     return self.new_topVer
    def setup(self):
        default_context = self.contexts['default']
        default_context.add_context_variables(
            authentication_enabled=True,
            zookeeper_enabled=is_enabled(self.config.get('zookeeper_enabled')),
            auth_login='******',
            auth_password='******',
        )

        self.auth_creds = [
            'no_access',
            'task_execute',
            'admin_ops',
            'admin_cache',
        ]

        for auth_cred_name in self.auth_creds:
            context = self.create_test_context(auth_cred_name)
            context.add_context_variables(
                authentication_enabled=True,
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                auth_login=auth_cred_name,
                auth_password=auth_cred_name,
            )

        pp = PrettyPrinter()
        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))

        super().setup()
Esempio n. 6
0
 def _prepare_before_test(self,
                          ignite,
                          tx_loading,
                          custom_event_name='test'):
     self.last_top = ignite.last_topology_snapshot()
     self.last_topVer = max([_['ver'] for _ in self.last_top])
     print_blue("Last topology version before %s: %d" %
                (custom_event_name, self.last_topVer))
     tx_loading.metrics_thread.add_custom_event(custom_event_name)
Esempio n. 7
0
    def util_change_snapshot_src_for_remote_grid(self,
                                                 snapshot_dir,
                                                 rename_dir=True,
                                                 repair=False):
        host = None

        server_nodes = self.ignite.get_all_default_nodes(
        ) + self.ignite.get_all_additional_nodes()
        for node_id in self.ignite.nodes.keys():
            if node_id in server_nodes:
                host = self.ignite.nodes[node_id]['host']
                ignite_home = self.ignite.nodes[node_id]['ignite_home']

        if repair:
            f_to_rename = [('/'.join(line.split('/')[:-1]) + '/_test_' +
                            line.split('/')[-1], line)
                           for line in snapshot_dir]
        else:
            commands = dict()

            dir_to_search = '%s/work/snapshot/' % self.ignite.ignite_home
            if snapshot_dir:
                dir_to_search = snapshot_dir

            commands[host] = ['find %s -name \'part-1.bin\'' % dir_to_search]
            log_print(commands)
            output = self.ignite.ssh.exec(commands)
            print_blue(output)
            files = [file for file in output[host][0].split('\n') if file]
            print_blue(files)

            if rename_dir:
                f_to_rename = [('/'.join(line.split('/')[:-1]),
                                '/'.join(line.split('/')[:-2]) + '/_test_' +
                                line.split('/')[-2]) for line in files]
            else:
                f_to_rename = [(line, '/'.join(line.split('/')[:-1]) +
                                '/_test_' + line.split('/')[-1])
                               for line in files]

        commands = set()
        remote_cmd = dict()
        files = []
        for src, dst in f_to_rename:
            commands.add('mv %s %s' % (src, dst))
            files.append(src)

        remote_cmd[host] = [';'.join(commands)]

        log_print(remote_cmd)
        output = self.ignite.ssh.exec(remote_cmd)
        log_print(output)
        print_red(remote_cmd)
        return files
Esempio n. 8
0
    def util_create_snapshots_chain(self, snapshots_count):
        snapshots_chain = []
        self.run_snapshot_utility('snapshot', '-type=full')

        for i in range(0, snapshots_count):
            print_green('Iteration %s from %s' % (i + 1, snapshots_count))
            self._remove_random_from_caches()

            self.run_snapshot_utility('snapshot')
            current_dump = self.calc_checksums_on_client()
            snapshots_chain.append(
                (self.get_latest_snapshot_id(), current_dump))
            self.load_data_with_streamer(end_key=5000, allow_overwrite=True)

        print_blue(self.su.snapshots_info())
        return snapshots_chain
Esempio n. 9
0
    def setup(self):
        self.auth_creds = {
            '': {
                'name': 'server',
                'context': 'default',
            },
            'read_only': {
                'name': 'read_only',
                'context': 'read_only',
            },
            'no_access': {
                'name': 'no_access',
                'context': 'no_access',
            },
        }
        pp = PrettyPrinter()

        for auth_cred_name in self.auth_creds:
            # aka 'server_login' / 'server_password', etc.
            default_login = self.auth_creds[auth_cred_name]['name'] + '_user'
            default_password = self.auth_creds[auth_cred_name][
                'name'] + '_password'
            self.auth_creds[auth_cred_name].update({
                'login':
                self.config.get('auth_' + auth_cred_name + 'login',
                                default_login),
                'password':
                self.config.get('auth_' + auth_cred_name + 'password',
                                default_password),
            })
            context_name = self.auth_creds[auth_cred_name]['context']
            if context_name != 'default':
                context = self.create_test_context(context_name)
            else:
                context = self.contexts['default']

            context.add_context_variables(
                authentication_enabled=True,
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                auth_login=self.auth_creds[auth_cred_name]['login'],
                auth_password=self.auth_creds[auth_cred_name]['password'],
            )

        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))
        super().setup()
Esempio n. 10
0
    def _remove_random_from_caches(self, easy=False):
        clients_num = self.ignite.get_nodes_num('client')

        with PiClient(self.ignite, self.get_client_config()):
            caches = self.ignite.get_cache_names('cache_group')

            print_blue('--- Caches ---')
            print_blue(caches)

            for cache_name in caches:
                cache = IgniteCache(cache_name)

                if easy:
                    for index in range(1, randint(1, int(len(caches) / 2))):
                        cache.remove(index, key_type='long')
                else:
                    for index in range(
                            randint(1, int(self.max_key / 2)),
                            randint(int(self.max_key / 2) + 1, self.max_key)):
                        cache.remove(index, key_type='long')

        self.wait_for_running_clients_num(clients_num, 120)
Esempio n. 11
0
    def test_shared_folder_removed(self):
        folder = self.ignite.su.create_shared_snapshot_storage(
            unique_folder=True, prefix='to_remove')
        print_blue(folder)
        self.folders_to_remove.append(folder)
        assert 'to_remove' in folder

        for folder in self.folders_to_remove:
            res = self.ignite.su.remove_shared_snapshot_storage(folder)
            print_blue(res)

            host = self.ssh.get_random_host()
            output = self.ssh.exec_on_host(
                host, ['ls -la %s/.. 2>/dev/null || echo "OOKK"' % folder])
            print_blue(output)
            assert folder not in output[host][0]
            assert 'OOKK' in output[host][0]
Esempio n. 12
0
    def test_shared_unique_folder_created(self):
        folder = self.ignite.su.create_shared_snapshot_storage(
            unique_folder=True, prefix='wooop')
        print_blue(folder)
        self.folders_to_remove.append(folder)
        assert 'wooop' in folder

        host1 = self.ssh.get_random_host()
        output = self.ssh.exec_on_host(
            host1, ['touch %s/test_share.tmp && echo "OOKK"' % folder])
        print_blue(output)
        assert 'OOKK' == output[host1][0].strip()

        host2 = self.ssh.get_random_host()
        output = self.ssh.exec_on_host(
            host2, ['ls -la %s/test_share.tmp 2>&1 && echo "OOKK"' % folder])
        print_blue(output)
        assert 'test_share.tmp' in output[host2][0]
        assert 'OOKK' in output[host2][0]
Esempio n. 13
0
    def test_cache_groups_access(self):
        """
        1. two caches in the same cache group, read only user has access to only part of caches in the group
        2. start grid
        3. populate caches with data and calc hashsums
        4. start client with read-only access to some caches and no access to other caches

        5. try to read from read only cache - must succeed
        6. try to read from inaccessible cache - must fail
        7. try to write to read only cache - must fail
        8. try to write to read-write cache - must succeed
        :return:
        """
        self.util_enable_client_security('')  # load data with 'server' access
        self.start_grid()
        self.load_data_with_streamer()
        with PiClient(self.ignite,
                      self.get_client_config('read_only'),
                      nodes_num=1) as piclient:
            ignite = piclient.get_ignite()
            cache_names = ignite.cacheNames()
            cache_number_utils = piclient.get_gateway(
            ).entry_point.getUtilsService().getNumberUtils()

            for cache_name in [
                    'cache_ro_1', 'cache_rw_1', 'cache_no_1', 'cache_ro_2',
                    'cache_rw_2', 'cache_no_2'
            ]:
                assert cache_names.contains(
                    cache_name), "Expected cache not found!"
                expect_read_ok = False
                expect_write_ok = False
                if '_rw_' in cache_name:
                    expect_read_ok = True
                    expect_write_ok = True
                elif '_ro_' in cache_name:
                    expect_read_ok = True
                    expect_write_ok = False

                cache = ignite.cache(cache_name)
                key = 0
                val = None
                if expect_read_ok:
                    val = cache_number_utils.getTypedKey(cache, key, 'long')
                    print_blue("Read '%s' from '%s' - OK" % (val, cache_name))
                    assert val
                else:
                    try:
                        val = cache_number_utils.getTypedKey(
                            cache, key, 'long')
                        assert val is None
                    except Exception as e:
                        print_blue("NOT Read from '%s' : %s - OK" %
                                   (cache_name, str(e)))
                    else:
                        assert False, "Expected no READ access to '%s'!" % cache_name

                new_val = 12345
                if expect_write_ok:
                    cache_number_utils.putTypedKey(cache, key, new_val, 'long')
                    print_blue("Write '%s' to '%s' - OK" %
                               (new_val, cache_name))
                else:
                    try:
                        cache_number_utils.putTypedKey(cache, key, new_val,
                                                       'long')
                    except Exception as e:
                        print_blue("NOT Write '%s' to '%s' : %s - OK" %
                                   (str(new_val), cache_name, str(e)))
                        if expect_read_ok:
                            old_val = cache_number_utils.getTypedKey(
                                cache, key, 'long')
                            print_blue("Read '%s' from '%s' - OK" %
                                       (old_val, cache_name))
                            assert old_val.toString() == val.toString(
                            ), "Seems that value was overwritten anyway!"
                    else:
                        assert False, "Expected no WRITE access to '%s'!" % cache_name
Esempio n. 14
0
    def test_ignite_8657(self):
        """
        This test is based on IGNITE-8657:
        1. start grid with EXCHANGE_HISTORY_SIZE smaller than N
        2. activate
        3. start simultaneously M > N clients
        4. all client nodes should and be able to perform cache put/get operations and transactions

        NB: this test hangs with 2.5.1-p6, due to piclient wait Ignition.start() forever
        """
        self.start_grid()
        self.load_random_data_with_streamer(0, 1000, nodes_num=2)
        self.cu.set_current_topology_as_baseline()

        nodes_before = set(self.ignite.get_all_common_nodes())

        with PiClient(self.ignite,
                      self.get_client_config(),
                      nodes_num=10,
                      jvm_options=self.jvm_options,
                      read_timeout=300) as piclient:
            nodes_after = set(self.ignite.get_all_common_nodes())
            nodes_started = list(nodes_after - nodes_before)
            node_ids = deque(nodes_started)
            node_id = node_ids[0]
            node_ids.rotate()

            for i in range(1, 5):
                gateway = piclient.get_gateway(node_id)
                ignite = piclient.get_ignite(node_id)
                tx = ignite.transactions().txStart()
                util_sleep_for_a_while(3)
                tx.commit()
                for concurrency in ['OPTIMISTIC', 'PESSIMISTIC']:
                    for isolation in [
                            'READ_COMMITTED', 'REPEATABLE_READ', 'SERIALIZABLE'
                    ]:
                        print_blue('Run transaction %s %s' %
                                   (concurrency, isolation))

                        node_id = node_ids[0]
                        node_ids.rotate()

                        gateway = piclient.get_gateway(node_id)
                        ignite = piclient.get_ignite(node_id)

                        concurrency_isolation_map = self._get_tx_type_map(
                            gateway)

                        cache_names = ignite.cacheNames().toArray()

                        tx = ignite.transactions().txStart(
                            concurrency_isolation_map.get(concurrency),
                            concurrency_isolation_map.get(isolation))

                        for cache_name in cache_names:
                            cache = ignite.getCache(cache_name)
                            val = cache.get(int(random() * 1000))
                            # log_print('got %s' % repr(val))
                            if val:
                                cache.put(int(random() * 1000), val)

                        tx.commit()
                node_id = node_ids[0]
                node_ids.rotate()

                ignite = piclient.get_ignite(node_id)
                async_ops = []
                for cache_name in ignite.cacheNames().toArray():
                    _async = create_async_operation(create_streamer_operation,
                                                    cache_name, 1002, 2000)
                    _async.evaluate()
                    async_ops.append(_async)

                for async_op in async_ops:
                    async_op.getResult()
    def setup(self):
        self.auth_creds = {
            '': {
                'user': '******',
                'pwd': 'qwe123',
                'context': 'default',
                'description': '{ "defaultAllow": "true" }'
            },
            'read_only': {
                'user':
                '******',
                'pwd':
                'qwe123',
                'context':
                'read_only',
                'description':
                '{'
                '{ "cache": "cache_ro*", "permissions":["CACHE_READ"] }, '
                '{ "cache": "cache_rw*", "permissions":["CACHE_READ", "CACHE_PUT", "CACHE_REMOVE"] }, '
                '{ "task": "*", "permissions":["TASK_EXECUTE"] }, '
                '{ "system":["ADMIN_CACHE", "CACHE_CREATE"] }, '
                '"defaultAllow":"false"'
                '}'
            },
            'utility': {
                'user': '******',
                'pwd': 'qwe123',
                'context': 'utility',
                'description': '{ "defaultAllow":"true" }'
            },
        }
        pp = PrettyPrinter()

        for auth_cred_name in self.auth_creds:
            context_name = self.auth_creds[auth_cred_name]['context']
            if context_name != 'default':
                context = self.create_test_context(context_name)
            else:
                context = self.contexts['default']

            context.add_context_variables(
                ssl_enabled=is_enabled(self.config.get('ssl_enabled')),
                authentication_enabled=True,
                client_key_store_file_path='%s/client.jks' %
                self.config['rt']['remote']['test_module_dir'],
                server_key_store_file_path='%s/server.jks' %
                self.config['rt']['remote']['test_module_dir'],
                trust_store_file_path='%s/trust.jks' %
                self.config['rt']['remote']['test_module_dir'],
                zookeeper_enabled=is_enabled(
                    self.config.get('zookeeper_enabled')),
                pitr_enabled=is_enabled(self.config.get('pitr_enabled')),
                rolling_updates_enabled=is_enabled(
                    self.config.get('rolling_updates_enabled')),
                auth_login=self.auth_creds[auth_cred_name]['user'],
                auth_password=self.auth_creds[auth_cred_name]['pwd'],
            )

        self.cu.enable_authentication('utility', 'qwe123')
        self.su.enable_authentication('utility', 'qwe123')

        print_blue('Credentials: \n' + pp.pformat(self.auth_creds))
        super().setup()
Esempio n. 16
0
    def test_pme_bench_activate(self):
        """
        Perform activate and deactivate benchmarks, save results to .csv in var_dir
        """
        self.start_grid_no_activate()
        self.last_top = self.ignite.ignite_srvs.last_topology_snapshot()
        self.last_topVer = max([_['ver'] for _ in self.last_top])
        last_minorTopVer = 0
        print_blue("Topology version before activate: %d" % self.last_topVer)

        util_sleep_for_a_while(3)
        self.ignite.ignite_srvs.jmx.activate(1)

        n_tries = 0
        max_tries = 5
        while n_tries < max_tries:
            n_tries += 1
            max_time = self._get_last_exchange_time()
            if self.exchange_finished:
                break
            util_sleep_for_a_while(5)

        self.new_top = self.ignite.ignite_srvs.last_topology_snapshot()
        self.new_topVer = max([_['ver'] for _ in self.new_top])
        assert self.new_topVer == self.last_topVer, "ERROR: major topology changed, possibly crash during activation"

        for exch_topVer, exch_data in self.agg_exch_x1.items():
            exch_major_topVer = int(exch_topVer / 10000)
            exch_minor_topVer = exch_topVer - exch_major_topVer * 10000
            if exch_major_topVer == self.last_topVer:
                x1_time = self.agg_exch_x1[exch_topVer]['max_duration']
                x2_time = self.agg_exch_x2[exch_topVer]['max_duration']
                self._dump_exchange_time(
                    x1_time,
                    x2_time,
                    "activate [%d, %d]" %
                    (exch_major_topVer, exch_minor_topVer),
                    num_partitions=self.num_partitions)
                last_minorTopVer = exch_minor_topVer

        self.ignite.ignite_srvs.jmx.deactivate(1)

        n_tries = 0
        max_tries = 5
        while n_tries < max_tries:
            n_tries += 1
            max_time = self._get_last_exchange_time()
            if self.exchange_finished:
                break
            util_sleep_for_a_while(5)

        for exch_topVer, exch_data in self.agg_exch_x1.items():
            exch_major_topVer = int(exch_topVer / 10000)
            exch_minor_topVer = exch_topVer - exch_major_topVer * 10000
            if exch_major_topVer == self.last_topVer and exch_minor_topVer > last_minorTopVer:
                x1_time = self.agg_exch_x1[exch_topVer]['max_duration']
                x2_time = self.agg_exch_x2[exch_topVer]['max_duration']
                self._dump_exchange_time(
                    x1_time,
                    x2_time,
                    "deactivate [%d, %d]" %
                    (exch_major_topVer, exch_minor_topVer),
                    num_partitions=self.num_partitions)