Beispiel #1
0
 def test_data_replication_bootstrap_on_load(self):
     self.make_data_loading(duration=180, func_on_load='bootstrap')
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master, checksum_replica = self.calculate_checksums()
     tiden_assert(checksum_master == checksum_replica, 'Hash sum master and replica not match')
     self.success_run = True
    def test_util_1_8_counters_detection_during_PME_node_from_baseline(self):
        """
        Tests PME synchronise partition counters if some detected.
        :return:
        """
        self.load_data_with_streamer(end_key=1000,
                                     value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value)

        with PiClient(self.ignite, self.get_client_config(), nodes_num=1) as piclient:

            caches_before_lrt = []
            for cache_name in piclient.get_ignite().cacheNames().toArray():
                caches_before_lrt.append(cache_name)

            cache_under_test = caches_before_lrt[0]
            log_print('Cache under test: %s' % cache_under_test, color='blue')

            operation = create_broke_data_entry_operation(cache_under_test, 1, True, 'counter')
            operation.evaluate()

            expected = ['Conflict partition']
            self.cu.control_utility('--cache idle_verify', all_required=expected)

            output = self.cu.latest_utility_output

            grp_id, part_id = None, None
            for line in output.split('\n'):
                m = search('Conflict partition: (PartitionKey|PartitionKeyV2) \[grpId=(\d+),.*partId=(\d+)\]', line)
                if m:
                    grp_id = m.group(2)
                    part_id = m.group(3)

            tiden_assert(grp_id and part_id,
                         'Expecting to find conflicts in output\n{}'.format(self.cu.latest_utility_output))

            # Start one more server node and change baseline to run PME
            log_print("Going to start additional node", color='green')
            self.ignite.add_additional_nodes(self.get_server_config(), 1)
            self.ignite.start_additional_nodes(self.ignite.get_all_additional_nodes())
            self.cu.control_utility('--baseline')
            self.cu.set_current_topology_as_baseline()
            self.cu.control_utility('--baseline')
            msg_in_log = self.find_in_node_log('Partition states validation has failed for group: %s'
                                               % cache_under_test, node_id=1)
            assert msg_in_log != []

            # Check there are no conflicts after PME
            util_sleep_for_a_while(30)
            self.cu.control_utility('--cache', 'idle_verify')

            # Stop one more server node and change baseline to run PME
            self.ignite.kill_node(self.ignite.get_alive_additional_nodes()[0])
            util_sleep_for_a_while(30)
            self.cu.control_utility('--baseline')
            self.cu.set_current_topology_as_baseline()
            self.cu.control_utility('--baseline')

            # Check there are no conflicts after PME
            self.cu.control_utility('--cache', 'idle_verify')
Beispiel #3
0
 def test_data_replication_pmi_1(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
     """
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master, checksum_replica = self.calculate_checksums()
     tiden_assert(checksum_master == checksum_replica, 'Hash sum master and replica not match')
     self.success_run = True
Beispiel #4
0
 def test_data_replication_pmi_2_on_load(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
         5. Call switch on master cluster due load
         6. Run transactional loading on 'Replica' cluster
         7. Wait for 'Replica' cluster applies all changes.
         8. Check check sums on both clusters are the same.
     """
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.make_data_loading(duration=180, func_on_load='switch')
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_replica_app,
                                                  to_cluster=self.ignite_master_app)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_replica_app.ru.replication_utility('status')
     self.make_data_loading(duration=180, role='replica')
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_replica_app,
                                                  to_cluster=self.ignite_master_app)
     checksum_master_after, checksum_replica_after = self.calculate_checksums()
     tiden_assert(checksum_master_after == checksum_replica_after, 'Hash sum master and replica not match')
     tiden_assert(checksum_master_before != checksum_master_after,
                  'Master not change after load on replica with change')
     self.success_run = True
Beispiel #5
0
 def test_data_replication_pmi_6_pitr(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
         5. Put variable in master, wait 40 seconds and check on replica
     """
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master, checksum_replica = self.calculate_checksums()
     tiden_assert(checksum_master == checksum_replica, 'Hash sum master and replica not match')
     self.make_data_loading(duration=0, func_on_load='pitr')
     self.check_replica_value('replica')
     self.success_run = True
Beispiel #6
0
 def wait_for_another_cluster_apply_last_cut(self, from_cluster=None, to_cluster=None, timeout=180):
     cut_frequency = 35
     util_sleep_for_a_while(cut_frequency)
     already_wait = cut_frequency
     last_id = self.get_last_consistent_cut_from_logs(from_cluster)
     while True:
         to_cluster.ru.replication_utility('status')
         last_applied_id = to_cluster.ru.get_last_applied_cut_from_session()
         log_print('Found last applying id - {}, wait until value more that - {}'.format(last_applied_id, last_id))
         if int(last_applied_id) >= int(last_id):
             break
         already_wait += 10;
         util_sleep_for_a_while(10)
         if already_wait >= timeout:
             tiden_assert(already_wait < timeout, 'Check Limit of applying last cut reached')
     log_print('Found last cut applying cut on another cluster after {} s'.format(already_wait))
Beispiel #7
0
 def diff_idle_verify_dump(self, raise_flag=False):
     dump_master = self.idle_verify_dump(role='master')
     log_print('Fount path in output - {} '.format(dump_master))
     dump_replica = self.idle_verify_dump(role='replica')
     log_print('Fount path in output - {} '.format(dump_replica))
     dump_diff = unified_diff(
         open(dump_master).readlines(),
         open(dump_replica).readlines(),
         fromfile='master',
         tofile='replica'
     )
     try:
         tiden_assert(''.join(dump_diff) == '', \
                      "files idle_verify dump master, replica not match")
         log_print("IDLE_VERIFY DUMP IS EQUAL", color='green')
     except AssertionError:
         log_print("IDLE_VERIFY DUMP IS NOT EQUAL", color='red')
         if raise_flag:
             raise
Beispiel #8
0
    def util_delete_snapshot_from_fs(self, snapshot_id=None, remote_dir=None):
        """
        Delete snapshot/snapshots from file system.
        If snapshot_id is not set ALL snapshots will be deleted.
        If remote_dir is not set, snapshot will be deleted from local directory (work/snapshots).

        :param snapshot_id: snapshot ID. If does not set, mask '*' used.
        :param remote_dir: remote direcory if you need to delete snapshot from shared store.
        :return:
        """
        msg = 'Going to delete local snapshots'
        if snapshot_id:
            msg = 'Going to delete local snapshot with ID = %s' % snapshot_id
        log_print(msg)
        commands = {}
        dir_on_node = {}

        if snapshot_id:
            dir_on_node = self.util_find_snapshot_folders_on_fs(snapshot_id)

        server_nodes = self.ignite.get_all_default_nodes(
        ) + self.ignite.get_all_additional_nodes()
        for node_idx in server_nodes:
            host = self.ignite.nodes[node_idx]['host']
            ignite_home = self.ignite.nodes[node_idx]['ignite_home']

            delete_dir = dir_on_node.get(node_idx) if snapshot_id else '*'
            relative_snapshot_path = remote_dir if remote_dir else '%s/work/snapshot' % ignite_home

            if commands.get(host) is None:
                commands[host] = [
                    'rm -rf %s/%s' % (relative_snapshot_path, delete_dir)
                ]
            else:
                commands[host].append('rm -rf %s/%s' %
                                      (relative_snapshot_path, delete_dir))

        self.ssh.exec(commands)
        snapshot_folders = self.util_find_snapshot_folders_on_fs(
            snapshot_id, remote_dir=remote_dir)
        tiden_assert(snapshot_folders == {},
                     'Snapshot folders deleted %s' % snapshot_folders)
Beispiel #9
0
 def test_data_replication_pmi_3(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
         5. Stop one node in master
         6. Run transactional loading on 'Master' cluster
         7. Wait for 'Replica' cluster applies all changes.
         8. Return node back to master (stop at step 5)
         9. Check check sums on both clusters are the same.
     """
     self.make_data_loading(duration=180)
     util_sleep_for_a_while(180)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_master_app.kill_node(2)
     self.make_data_loading(duration=180, role='master')
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     self.ignite_replica_app.ru.replication_utility('status')
     self.ignite_replica_app.ru.replication_utility('stop')
     self.ignite_replica_app.ru.replication_utility('status')
     self.ignite_master_app.start_node(2)
     util_sleep_for_a_while(360)
     checksum_master_after, checksum_replica_after = self.calculate_checksums()
     tiden_assert(checksum_master_after == checksum_replica_after, 'Hash sum master and replica not match')
     tiden_assert(checksum_master_before != checksum_master_after,
                  'Master not change after load on replica with change')
     self.success_run = True
    def test_util_specific_cache_filter(self, key_dump):

        limits = {"REPLICATED": 512, 'PARTITIONED': 1024}
        loaded_keys_count = 1001
        expected_groups_list, _, _, _ = self.parse_cache_xml(limits, loaded_keys_count, 'caches.xml')
        filter = {
            'ALL': None,
            'SYSTEM': None,
            'PERSISTENT': None,
            'NOT_PERSISTENT': None
        }

        test_dir = self.config['rt']['remote']['test_dir']
        for filter_type in filter.keys():
            self.cu.control_utility('--cache', 'idle_verify --cache-filter {}'.format(filter_type))
        for filter_type in filter.keys():
            if not key_dump:
                self.cu.idle_verify_dump(cache_filter="--cache-filter {}".format(filter_type),
                                         copy_dir=test_dir, key_dump=key_dump)
            dump_filter = self.cu.idle_verify_dump(cache_filter="--cache-filter {}".format(filter_type),
                                                   copy_dir=test_dir)
            actual_groups_list = [partition_dump["info"]["grpName"] for partition_dump in dump_filter]
            filter[filter_type] = set(actual_groups_list)

        dump_filter = self.cu.idle_verify_dump(copy_dir=test_dir)
        actual_groups_list = [partition_dump["info"]["grpName"] for partition_dump in dump_filter]
        filter['WITHOUT_FILTER'] = set(actual_groups_list)
        tiden_assert('ignite-sys-cache' in filter['SYSTEM'] and 'ignite-sys-cache' not in filter[
            'PERSISTENT'] and 'ignite-sys-cache' not in filter['NOT_PERSISTENT'],
                     "ignite-sys-cache in SYSTEM filter and not in other specific filters")
        tiden_assert_equal(len(filter['PERSISTENT'] & filter['NOT_PERSISTENT']), 0,
                           "length of intersection PERSISTENTS and NOT_PERSISTENT sets == 0")
        tiden_assert_equal(sorted(filter['WITHOUT_FILTER'] | {'ignite-sys-cache'}), sorted(filter['ALL']),
                           "result of idle_verify filter - ALL and call without filter is equal "
                           "if add ignite-sys-cache")
        tiden_assert_equal(sorted(set(expected_groups_list) | {'ignite-sys-cache'}), sorted(filter['ALL']),
                           "caches.xml group list is equal filter['ALL'] group list"
                           "if add ignite-sys-cache")
        tiden_assert(filter['ALL'].issuperset(filter['SYSTEM'] | filter['PERSISTENT'] | filter['NOT_PERSISTENT']),
                     "Set of specific filters not equal set of all cache_group")
    def test_util_1_1_idle_verify_dump_skip_zeros(self):
        """
        idle_verify --dump --skipZeros
        Test idle_verify command detects problem if some key is corrupted (there is some difference between key
        on primary partition and backup).
        """
        from pt.piclient.helper.cache_utils import IgniteCache

        cache_under_test = 'cache_group_4_118'
        test_dir = self.config['rt']['remote']['test_dir']
        dump = self.cu.idle_verify_dump(copy_dir=test_dir)
        dump_skip_zeros = self.cu.idle_verify_dump(skip_zeros=True, copy_dir=test_dir)

        tiden_assert(dump != dump_skip_zeros, "non zeroes dump have equal dump without zeroes ignore")

        for dump_item in dump_skip_zeros:
            for instance in dump_item["instances"]:
                if int(instance["updateCntr"]) == 0 or int(instance["size"]) == 0 or int(instance["partHash"]) == 0:
                    raise TidenException("Found zeros in non zeros dump")

        if if_applicable_ignite_version(self.config, '2.5.6'):
            with PiClient(self.ignite, self.get_client_config()):
                caches = self.ignite.get_cache_names('cache_group')

                for cache_name in [cache_name for cache_name in caches if cache_name == cache_under_test]:
                    cache = IgniteCache(cache_name)

                    for i in range(1, self.max_key):
                        cache.remove(i, key_type='long')

            dump_skip_zeros = self.cu.idle_verify_dump(skip_zeros=True, copy_dir=test_dir)

            tiden_assert(dump != dump_skip_zeros, "non zeroes dump have equal dump without zeroes ignore")

            for dump_item in dump_skip_zeros:
                for instance in dump_item["instances"]:
                    if int(instance["updateCntr"]) == 0 or int(instance["size"]) == 0 or int(instance["partHash"]) == 0:
                        raise TidenException("Found zeros in non zeros dump")
Beispiel #12
0
 def test_data_replication_pmi_8(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Restart master
         5. Run transactional loading on 'Master' cluster.
         6. Check check sums on both clusters are the same.
         --------------- additional steps
         7. stop master
     """
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master, checksum_replica = self.calculate_checksums()
     tiden_assert(checksum_master == checksum_replica, 'Hash sum master and replica not match')
     self.restart_ignite_grid()
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master, checksum_replica = self.calculate_checksums()
     tiden_assert(checksum_master == checksum_replica, 'Hash sum master and replica not match')
     self.ignite_master_app.ru.replication_utility('stop', background=True)
     self.success_run = True
    def test_util_1_1_idle_verify_dump(self):
        """
        idle_verify --dump command dumped all data about current caches partitions on disk
        data should be different if caches was changed
        restored data must be equal previous one
        """
        with PiClient(self.ignite, self.get_client_config()):
            test_dir = self.config['rt']['remote']['test_dir']
            dump_before = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.run_snapshot_utility('snapshot', '-type=full')

            self.load_data_with_streamer(end_key=5000,
                                         value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value,
                                         allow_overwrite=True)

            dump_after = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.compare_lists_of_dicts(dump_before, dump_after)
            tiden_assert(dump_before != dump_after, 'dumps before and after data filling are equals')

            self.su.snapshot_utility('restore', '-id={}'.format(self.get_snapshot_id(1)))

            restored_dump = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.compare_lists_of_dicts(dump_before, restored_dump)
            tiden_assert_equal(dump_before, restored_dump, 'restored cache dump are changed')
Beispiel #14
0
 def test_data_replication_pmi_5(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
         5. Stop replication
         6. Check replica sums not change
         7. Check master sums change
     """
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_replica_app.ru.replication_utility('stop')
     self.ignite_master_app.ru.replication_utility('stop')
     self.make_data_loading(duration=60)
     checksum_master_after, checksum_replica_after = self.calculate_checksums(idle_verify=False)
     tiden_assert(checksum_replica_before == checksum_replica_after, 'Replica change after stop replication')
     tiden_assert(checksum_master_before != checksum_master_after, 'Master not change after load')
     self.success_run = True
    def test_util_1_1_idle_verify(self):
        """
        Test idle_verify command detects problem if some key is corrupted (there is some difference between key
        on primary partition and backup).
        """

        partitions_to_break = [1, 2]
        with PiClient(self.ignite, self.get_client_config(), nodes_num=1):
            cache_under_test = 'cache_group_1_028'
            log_print('Cache under test: %s' % cache_under_test, color='blue')

            operation = create_broke_data_entry_operation(cache_under_test, partitions_to_break[0], True,
                                                          'value', 'counter')
            log_print(operation.evaluate())
            operation = create_broke_data_entry_operation(cache_under_test, partitions_to_break[1], True, 'counter')
            log_print(operation.evaluate())

        util_sleep_for_a_while(10)

        expected = ['Conflict partition']
        self.cu.control_utility('--cache idle_verify', all_required=expected)

        log_print(self.cu.latest_utility_output)
        output = self.cu.latest_utility_output
        # m = search('See log for additional information. (.*)', self.cu.latest_utility_output)
        # if m:
        #     conflict_file = m.group(1)
        #     host = self.cu.latest_utility_host
        #     output = self.ssh.exec_on_host(host, ['cat {}'.format(conflict_file)])
        #     log_print(output, color='blue')
        # else:
        #     tiden_assert(False, 'Conflict file is not found in output:\n{}'.format(self.cu.latest_utility_output))

        grpId, partId = [], []
        for line in output.split('\n'):
            m = search('Conflict partition: (PartitionKey|PartitionKeyV2) \[grpId=(\d+),.*partId=(\d+)\]', line)
            if m:
                grpId.append(m.group(2))
                partId.append(int(m.group(3)))

        tiden_assert(grpId and partId, 'Could not find partition id in buffer %s' % output)
        tiden_assert(len(set(grpId)), 'Should be one group in output %s' % output)
        tiden_assert(set(partId) == set(partitions_to_break), 'Partition ids should match %s' % output)
Beispiel #16
0
 def test_data_replication_pmi_4_replica_node_up(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Check check sums on both clusters are the same.
         5. Stop one node in master (delete lfs)
         6. Change master blt
         7. Run transactional loading on 'Master' cluster.
         6. Kill same node (step 5) on replica
         7. Wait for 'Replica' cluster applies all changes.
         8. Return node back to master (stop at step 5)
         9. Return node back to replica (stop at step 5)
         10. Change master blt
         11. Run transactional loading on 'Master' cluster.
         12. Check check sums on both clusters are the same.
     """
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_master_app.kill_node(2)
     self.ignite_master_app.delete_lfs(node_id=2)
     self.ignite_master_app.cu.control_utility('--baseline remove node_{} --yes'.format(2))
     self.make_data_loading(duration=180)
     self.ignite_replica_app.kill_node(2)
     self.ignite_replica_app.delete_lfs(node_id=2)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_master_app.start_node(2)
     self.ignite_replica_app.start_node(2)
     self.ignite_master_app.cu.control_utility('--baseline add node_{} --yes'.format(2))
     self.make_data_loading(duration=180)
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.success_run = True
Beispiel #17
0
 def test_data_replication_pmi_7(self):
     """
         1. Bootstrap replication
         2. Run transactional loading on 'Master' cluster.
         3. Wait for 'Replica' cluster applies all changes.
         4. Call pause on replica
         5. Run transactional loading on 'Master' cluster.
         6. Call resume on replica
         7. Check check sums on both clusters are the same.
     """
     checksum_master_before, checksum_replica_before = self.calculate_checksums()
     tiden_assert(checksum_master_before == checksum_replica_before, 'Hash sum master and replica not match')
     self.ignite_replica_app.ru.replication_utility('pause')
     self.make_data_loading(duration=60)
     self.ignite_replica_app.ru.replication_utility('resume')
     self.wait_for_another_cluster_apply_last_cut(from_cluster=self.ignite_master_app,
                                                  to_cluster=self.ignite_replica_app)
     checksum_master_after, checksum_replica_after = self.calculate_checksums()
     tiden_assert(checksum_replica_after == checksum_master_after, 'Hash sum master and replica not match')
     tiden_assert(checksum_master_before != checksum_master_after, 'Master not change after load')
     self.success_run = True
    def test_util_1_3_metadata(self):
        """
        Check METADATA and ANALYZE commands from snapshot-utility.sh shows correct information.

        For METADATA command we check that output file is generated and contains correct information about snapshot.

        To test ANALYZE command we update two keys in one cache in the same partition (for cache with 16 partitions
        keys 1 and 17 should be placed in the same partition) and check that analyze shows the correct
        update counter for this partition.
        """
        from pt.piclient.helper.cache_utils import IgniteCache

        node_under_test = 2
        self.start_grid()
        self.load_data_with_streamer(end_key=1000,
                                     value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value,
                                     allow_overwrite=True)

        self.run_snapshot_utility('snapshot', '-type=full', snapshot_archive_enabled=self.snapshot_arch_enabled)
        snapshot_local_path = '%s/work/snapshot/' % self.ignite.nodes[node_under_test]['ignite_home']
        log_print(snapshot_local_path, color='blue')

        self.run_snapshot_utility('metadata', '-action=print -src=%s -id=%s'
                                  % (self.config['rt']['remote']['test_dir'], self.get_snapshot_id(1)), standalone=True)

        self.run_snapshot_utility('analyze', '-src=%s -id=%s -output=%s/snapshot_analyze_1.out'
                                  % (self.config['rt']['remote']['test_dir'], self.get_snapshot_id(1),
                                     self.config['rt']['remote']['test_dir']),
                                  standalone=True)

        with PiClient(self.ignite, self.get_client_config(), nodes_num=1) as piclient:
            from pt.piclient.helper.cache_utils import IgniteCacheConfig
            cache_name = 'test_cache_001'

            cache_assert = IgniteCache(cache_name)
            log_print(cache_assert.get(1))
            cache_assert.put(1, 1)
            log_print(cache_assert.get(17))
            cache_assert.put(17, 1)

            cache_name = 'cache_with_empty_partitions'
            ignite = piclient.get_ignite()
            gateway = piclient.get_gateway()

            # this part is to test GG-14708
            cache_config = IgniteCacheConfig(gateway)
            cache_config.set_name(cache_name)
            cache_config.set_cache_mode('PARTITIONED')
            cache_config.set_backups(2)
            cache_config.set_atomicity_mode('transactional')
            cache_config.set_write_synchronization_mode('full_sync')
            cache_config.set_affinity(False, 1024)
            ignite.getOrCreateCache(cache_config.get_config_object())

            cache = IgniteCache(cache_name)
            cache.put(1, "One")
            cache.put(2, "Two")

        self.run_snapshot_utility('snapshot', '-type=full', snapshot_archive_enabled=self.snapshot_arch_enabled)
        self.run_snapshot_utility('analyze', '-src=%s -id=%s -output=%s/snapshot_analyze_2.out'
                                  % (self.config['rt']['remote']['test_dir'], self.get_snapshot_id(2),
                                     self.config['rt']['remote']['test_dir']),
                                  standalone=True)
        tiden_assert('Exception' not in self.su.latest_utility_output, 'Found Exception in utility output:\n{}'.
                     format(self.su.latest_utility_output))
    def test_permissions_over_caches(self):
        """
        Based on GG-14323 (Backport GG-20998).

        This issue checked:
        Apparently, the problem is deeper than I thought in the first place.
        1) SQL permissions have never worked correctly. If query is executed through cache API, then we only check
        permissions against this cache. It means, that if one has read permission to one cache,
        it could be used as a "window" for all other caches.

        Issue with client getting security context should be fixed in 8.5.10. You can uncomment authenticator section in
        client.xml to test this fix without getting the security context.
        :return:
        """
        from pt.piclient.piclient import PiClient
        from pt.piclient.helper.cache_utils import IgniteCacheConfig, IgniteCache
        from pt.piclient.helper.class_utils import create_all_types
        from pt import TidenException

        self.util_enable_server_security()
        self.util_enable_ssl(context_template='auth_enabled')
        self.set_current_context('ssl_enabled')

        self.start_grid(activate_on_particular_node=1)
        try:
            with PiClient(self.ignite, self.get_client_config(),
                          nodes_num=1) as piclient:
                gateway = piclient.get_gateway()
                ignite = piclient.get_ignite()

                # Configure cache1
                cache_config = IgniteCacheConfig()
                cache_config.set_name('cache1')
                cache_config.set_cache_mode('replicated')
                cache_config.set_atomicity_mode('transactional')
                cache_config.set_write_synchronization_mode('full_sync')
                cache_config.set_affinity(False, 32)

                # set query entities
                query_indices_names = gateway.jvm.java.util.ArrayList()
                query_indices_names.add("strCol")
                query_indices = gateway.jvm.java.util.ArrayList()
                query_indices.add(gateway.jvm.org.apache.ignite.cache.
                                  QueryIndex().setFieldNames(
                                      query_indices_names, True))

                query_entities = gateway.jvm.java.util.ArrayList()
                query_entities.add(
                    gateway.jvm.org.apache.ignite.cache.QueryEntity(
                        "java.lang.Integer",
                        ModelTypes.VALUE_ALL_TYPES.value).addQueryField(
                            "strCol", "java.lang.String", None).addQueryField(
                                "longCol", "java.lang.Long",
                                None).setIndexes(query_indices))

                cache_config.get_config_object().setQueryEntities(
                    query_entities)
                cache_config.get_config_object().setStatisticsEnabled(False)

                ignite.getOrCreateCache(cache_config.get_config_object())

                # Configure cache2
                cache_config2 = IgniteCacheConfig()
                cache_config2.set_name('cache2')
                cache_config2.set_cache_mode('partitioned')
                cache_config2.set_backups(3)
                cache_config2.set_atomicity_mode('transactional')
                cache_config2.set_write_synchronization_mode('full_sync')
                cache_config2.set_affinity(False, 32)

                # set query entities
                query_indices_names2 = gateway.jvm.java.util.ArrayList()
                query_indices_names2.add("strCol")
                query_indices2 = gateway.jvm.java.util.ArrayList()
                query_indices2.add(gateway.jvm.org.apache.ignite.cache.
                                   QueryIndex().setFieldNames(
                                       query_indices_names2, True))
                query_entities2 = gateway.jvm.java.util.ArrayList()
                query_entities2.add(
                    gateway.jvm.org.apache.ignite.cache.QueryEntity(
                        "java.lang.Integer",
                        ModelTypes.VALUE_ALL_TYPES.value).addQueryField(
                            "strCol", "java.lang.String", None).addQueryField(
                                "longCol", "java.lang.Long",
                                None).setIndexes(query_indices))

                cache_config2.get_config_object().setQueryEntities(
                    query_entities2)
                cache_config2.get_config_object().setStatisticsEnabled(False)

                ignite.getOrCreateCache(cache_config2.get_config_object())

            self.wait_for_running_clients_num(0, 90)
            # Restart client
            with PiClient(self.ignite, self.get_client_config()) as piclient:
                cache1 = IgniteCache('cache1')
                cache2 = IgniteCache('cache2')

                run_num = 3
                for i in range(0, run_num):
                    ('Run %s from %s' % (str(i + 1), run_num))
                    for j in range(i * 100, i * 100 + 101):
                        cache1.put(j, create_all_types(j))
                        cache2.put(j, create_all_types(j))

                    log_print('Create sqlFieldsQueries')
                    sqlFieldsQuery1 = piclient.get_gateway().jvm.org.apache.ignite.cache.query \
                        .SqlFieldsQuery('select * from "cache1".AllTypes')
                    sqlFieldsQuery2 = piclient.get_gateway().jvm.org.apache.ignite.cache.query \
                        .SqlFieldsQuery('select * from "cache2".AllTypes')

                    log_print('Assert sqlFieldsQuery is not empty')
                    tiden_assert(
                        not cache1.cache.query(
                            sqlFieldsQuery1).getAll().isEmpty(),
                        "Value %s could be selected from cache1" % str(i + 1))

                    try:
                        cache1.cache.query(sqlFieldsQuery2).getAll().isEmpty(
                        ), "%s" % str(i + 1)
                    except Exception as e:
                        tiden_assert(
                            'Authorization failed' in str(e),
                            'Expecting "Authorization failed" error when querying from cache2 '
                            'using cache1 as a proxy')
                        log_print(str(e), color='debug')
                    else:
                        raise TidenException(
                            'Expected "Authorization failed" error but did not get one'
                        )

        except TidenException as e:
            assert "Some new problem arises during reproducing GG-14323: %s" % e
def test_assert():
    tiden_assert(True, 'ok')
    with pytest.raises(AssertionError):
        tiden_assert(False, 'fail')