Exemplo n.º 1
0
    def check_snapshots_listing_on_all_nodes(self,
                                             snapshots,
                                             snapshot_path=None):
        path = 'work/snapshot/'
        if snapshot_path:
            path = snapshot_path
        output = self.run_on_all_nodes('ls %s' % path)

        if isinstance(snapshots, list):
            expecting_snapshots = list(snapshots)
        else:
            expecting_snapshots = [str(snapshots)]

        for node_id in output.keys():
            found = []
            snapshot_folders = [
                item for item in output[node_id].split('\n') if item
            ]

            tiden_assert_equal(len(expecting_snapshots), len(snapshot_folders),
                               'Number of folders in work/snapshot on server')

            for snapshot in expecting_snapshots:
                log_print(snapshot)
                found += [
                    folder for folder in snapshot_folders if snapshot in folder
                ]

            tiden_assert_equal(
                len(expecting_snapshots), len(found),
                'All folders in work/snapshot:\n%s\ncorrespond expected snapshots: %s'
                % (','.join(found), ','.join(expecting_snapshots)))
 def test_util_specific_filter_exclude(self, key_dump):
     limits = {"REPLICATED": 512, 'PARTITIONED': 1024}
     loaded_keys_count = 1001
     test_dir = self.config['rt']['remote']['test_dir']
     common_filter = self.cu.idle_verify_dump(copy_dir=test_dir)
     actual_groups_list = set([partition_dump["info"]["grpName"] for partition_dump in common_filter])
     _, _, _, expected_caches_names = self.parse_cache_xml(limits, loaded_keys_count, 'caches.xml')
     for cache_group in expected_caches_names.keys():
         exclude_filter = self.cu.idle_verify_dump(exclude_caches=','.join(expected_caches_names[cache_group]),
                                                   copy_dir=test_dir, key_dump=key_dump)
         if key_dump:
             exclude_groups_list = set([partition_dump["info"]["grpName"] for partition_dump in exclude_filter])
             tiden_assert_equal(sorted(actual_groups_list - set([cache_group])),
                                sorted(exclude_groups_list),
                                'Exclude {} group from control --cache idle_verify and check result'.format(
                                    cache_group),
                                debug=True)
Exemplo n.º 3
0
    def test_basic_cache_access(self):
        """
        1. read only user is enabled access to only `cache_ro_*`
        2. start grid
        3. populate caches with data and calc hashsums

        4. calc hashsums from client with read-only access to some caches and no access to other caches
        5. cache sums dump for read only client must not contain information about caches client has no access to
        """
        self.util_enable_client_security('')  # load data with 'server' access
        self.start_grid(activate_on_particular_node=1)
        self.load_data_with_streamer()

        dump_before = self.calc_checksums_distributed()
        # self.util_save_dump('dump_all.log', dump_before)
        checksums_before = self.convert_to_dict(dump_before)
        self.util_enable_client_security('read_only')
        checksums_after = {}
        try:
            dump_after = self.calc_checksums_distributed(
                config_file=self.get_client_config('read_only'))
            checksums_after = self.convert_to_dict(dump_after)
        except Exception as e:
            log_print(
                'Exception in calc_checksums_distributed method:\n{}'.format(
                    e))

        # self.util_save_dump('dump_read_only.log', dump_before)

        for cache_name, checksum in checksums_before.items():
            if 'cache_no' in cache_name:
                tiden_assert_not_equal(
                    checksum, checksums_after.get(cache_name),
                    f"Checksums must differ due to no access to several cache {cache_name}"
                )
            else:
                tiden_assert_equal(
                    checksum, checksums_after.get(cache_name),
                    f"Checksums must be equal for cache {cache_name}")
    def test_util_1_1_idle_verify_dump(self):
        """
        idle_verify --dump command dumped all data about current caches partitions on disk
        data should be different if caches was changed
        restored data must be equal previous one
        """
        with PiClient(self.ignite, self.get_client_config()):
            test_dir = self.config['rt']['remote']['test_dir']
            dump_before = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.run_snapshot_utility('snapshot', '-type=full')

            self.load_data_with_streamer(end_key=5000,
                                         value_type=ModelTypes.VALUE_ALL_TYPES_INDEXED.value,
                                         allow_overwrite=True)

            dump_after = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.compare_lists_of_dicts(dump_before, dump_after)
            tiden_assert(dump_before != dump_after, 'dumps before and after data filling are equals')

            self.su.snapshot_utility('restore', '-id={}'.format(self.get_snapshot_id(1)))

            restored_dump = self.cu.idle_verify_dump(copy_dir=test_dir)
            self.compare_lists_of_dicts(dump_before, restored_dump)
            tiden_assert_equal(dump_before, restored_dump, 'restored cache dump are changed')
    def test_util_1_1_idle_verify_dump_correct_data(self):
        """
        idle_verify --dump must provide correct data about caches
        check that dump contain all declared cache groups data
        """
        patritions = self.cu.idle_verify_dump(copy_dir=self.config['rt']['remote']['test_dir'])

        limits = {"REPLICATED": 512, 'PARTITIONED': 1024}
        loaded_keys_count = 1001
        expected_groups_list, expected_partitions_count, expected_caches_counts, _ = \
            self.parse_cache_xml(limits, loaded_keys_count, 'caches.xml')

        actual_groups_list = [partition_dump["info"]["grpName"] for partition_dump in patritions]
        actual_partitions_count = {}
        actual_caches_count = {}
        for partition in patritions:
            group_name = partition["info"]["grpName"]

            # partitions
            count = actual_partitions_count.get(group_name, 0)
            count += 1
            actual_partitions_count[group_name] = count

            # caches sizes
            nodes_cache_size = [(item["consistentId"], int(item["size"])) for item in partition["instances"]]
            group_cache_size = sum([int(i[1]) for i in nodes_cache_size])
            sizes = {
                "nodes": nodes_cache_size,
                "all": group_cache_size
            }
            new_sizes = actual_caches_count.get(group_name, {
                "caches_size": 0,
                "nodes": []
            })
            new_sizes["nodes"] += [sizes]
            new_sizes["caches_size"] += group_cache_size
            actual_caches_count[group_name] = new_sizes

        tiden_assert_equal(sorted(set(expected_groups_list)), sorted(set(actual_groups_list)), "cache groups")
        tiden_assert_equal(expected_partitions_count, actual_partitions_count, 'partitions count')
        tiden_assert_equal(expected_caches_counts,
                           {k: v["caches_size"] for k, v in actual_caches_count.items()}, "caches count")
    def test_util_specific_cache_filter(self, key_dump):

        limits = {"REPLICATED": 512, 'PARTITIONED': 1024}
        loaded_keys_count = 1001
        expected_groups_list, _, _, _ = self.parse_cache_xml(limits, loaded_keys_count, 'caches.xml')
        filter = {
            'ALL': None,
            'SYSTEM': None,
            'PERSISTENT': None,
            'NOT_PERSISTENT': None
        }

        test_dir = self.config['rt']['remote']['test_dir']
        for filter_type in filter.keys():
            self.cu.control_utility('--cache', 'idle_verify --cache-filter {}'.format(filter_type))
        for filter_type in filter.keys():
            if not key_dump:
                self.cu.idle_verify_dump(cache_filter="--cache-filter {}".format(filter_type),
                                         copy_dir=test_dir, key_dump=key_dump)
            dump_filter = self.cu.idle_verify_dump(cache_filter="--cache-filter {}".format(filter_type),
                                                   copy_dir=test_dir)
            actual_groups_list = [partition_dump["info"]["grpName"] for partition_dump in dump_filter]
            filter[filter_type] = set(actual_groups_list)

        dump_filter = self.cu.idle_verify_dump(copy_dir=test_dir)
        actual_groups_list = [partition_dump["info"]["grpName"] for partition_dump in dump_filter]
        filter['WITHOUT_FILTER'] = set(actual_groups_list)
        tiden_assert('ignite-sys-cache' in filter['SYSTEM'] and 'ignite-sys-cache' not in filter[
            'PERSISTENT'] and 'ignite-sys-cache' not in filter['NOT_PERSISTENT'],
                     "ignite-sys-cache in SYSTEM filter and not in other specific filters")
        tiden_assert_equal(len(filter['PERSISTENT'] & filter['NOT_PERSISTENT']), 0,
                           "length of intersection PERSISTENTS and NOT_PERSISTENT sets == 0")
        tiden_assert_equal(sorted(filter['WITHOUT_FILTER'] | {'ignite-sys-cache'}), sorted(filter['ALL']),
                           "result of idle_verify filter - ALL and call without filter is equal "
                           "if add ignite-sys-cache")
        tiden_assert_equal(sorted(set(expected_groups_list) | {'ignite-sys-cache'}), sorted(filter['ALL']),
                           "caches.xml group list is equal filter['ALL'] group list"
                           "if add ignite-sys-cache")
        tiden_assert(filter['ALL'].issuperset(filter['SYSTEM'] | filter['PERSISTENT'] | filter['NOT_PERSISTENT']),
                     "Set of specific filters not equal set of all cache_group")
Exemplo n.º 7
0
    def test_000_sequences_debug_after_shapshot_restore_and_grid_restart(self):

        test_seq_1 = 'myseq1'
        test_seq_2 = 'myseq2'
        with PiClient(self.ignite, self.get_client_config()) as piclient:

            seq1 = self.get_or_create_atomic_sequence(piclient, test_seq_1)

            for i in range(0, 5):
                seq1.incrementAndGet()

            seq1value1 = seq1.get()
            tiden_assert_equal(5, seq1value1,
                               "sequence '%s' value" % test_seq_1)

            # create full snapshot
            full_snapshot_id = self.su.snapshot_utility(
                'snapshot', '-type=full')

            seq2 = self.get_or_create_atomic_sequence(piclient, test_seq_2)

            tiden_assert_equal(0, seq2.get(),
                               "sequence '%s' value" % test_seq_2)

            # increment sequence after snapshot
            for i in range(0, 5):
                seq1.incrementAndGet()

            seq1value2 = seq1.get()
            tiden_assert_equal(10, seq1value2,
                               "sequence '%s' value" % test_seq_1)

            # restore full snapshot
            self.su.snapshot_utility('restore', '-id=%s' % full_snapshot_id)

            seq2 = self.get_or_create_atomic_sequence(piclient,
                                                      test_seq_2,
                                                      create=False)
            tiden_assert_is_none(
                seq2,
                "after restore from snapshot, sequence '%s'" % test_seq_2)

            seq1 = self.get_or_create_atomic_sequence(piclient,
                                                      'myseq1',
                                                      create=False)
            tiden_assert_is_not_none(
                seq1,
                "after restore from snapshot, sequence '%s'" % test_seq_1)

            value = seq1.get()
            tiden_assert_equal(
                seq1value1, value,
                "sequence '%s' value restored from snapshot" % test_seq_1)

            seq2 = self.get_or_create_atomic_sequence(piclient,
                                                      test_seq_2,
                                                      create=True)

            for i in range(0, 3):
                seq2.incrementAndGet()

            seq2value = seq2.get()
            tiden_assert_equal(
                3, seq2value,
                "sequence '%s' value before restart" % test_seq_2)

            self.restart_grid()

            seq2value2 = seq2.get()
            tiden_assert_equal(
                seq2value, seq2value2,
                "sequence '%s' value after restart" % test_seq_2)

            seq1value3 = seq1.get()
            tiden_assert_equal(
                seq1value1, seq1value3,
                "sequence '%s' value after restart" % test_seq_1)
Exemplo n.º 8
0
    def test_client_hashes_sequence_values(self):
        test_seq = 'test_seq'
        with PiClient(self.ignite, self.get_client_config()) as piclient:

            seq = self.get_or_create_atomic_sequence(piclient, 'test_seq')

            seq.incrementAndGet()

            tiden_assert_equal(1, seq.get(), "sequence '%s' value" % test_seq)

            for i in range(1, 5):
                seq.incrementAndGet()

            tiden_assert_equal(5, seq.get(), "sequence '%s' value" % test_seq)
            # create full snapshot
            full_snapshot_id = self.su.snapshot_utility(
                'snapshot', '-type=full')

            # increment sequence after snapshot
            for i in range(1, 5):
                seq.incrementAndGet()

            tiden_assert_equal(9, seq.get(), "sequence '%s' value" % test_seq)

            # restore full snapshot
            self.su.snapshot_utility('restore', '-id=%s' % full_snapshot_id)

        with PiClient(self.ignite, self.get_client_config()) as piclient:

            seq = self.get_or_create_atomic_sequence(piclient,
                                                     'test_seq',
                                                     create=False)
            tiden_assert_is_not_none(
                seq, "after restore from snapshot, sequence '%s'" % test_seq)

            tiden_assert_equal(6, seq.get(), "sequence '%s' value" % test_seq)

            for i in range(1, 5):
                seq.incrementAndGet()

            tiden_assert_equal(10, seq.get(), "sequence '%s' value" % test_seq)

            self.restart_grid()

            seq = self.get_or_create_atomic_sequence(piclient,
                                                     'test_seq',
                                                     create=False)
            tiden_assert_is_not_none(
                seq, "after restore from snapshot, sequence '%s'" % test_seq)

            tiden_assert_equal(12, seq.get(), "sequence '%s' value" % test_seq)
Exemplo n.º 9
0
 def assert_check_sums(checksum_1, checksum_2):
     UltimateUtils.util_compare_check_sums(checksum_1, checksum_2)
     tiden_assert_equal(checksum_1, checksum_2, 'Check sums assertion')
Exemplo n.º 10
0
def test_assert_equal():
    tiden_assert_equal(2, 2, '2==2')
    with pytest.raises(AssertionError):
        tiden_assert_equal(2, 1, '2==1')