def test_single_write(self, servers, simple_node): ''' check that single access to some key appears among top keys using http interface of monitoring service ''' session = make_session(node=simple_node, test_name='TestMonitorTop.test_single_write') groups = session.routes.groups() session.groups = groups test_key = 'one_key' session.write_data(test_key, 'some_data').get() session.read_latest(test_key).get() test_key = hashlib.sha512(test_key).hexdigest() top_keys = [] for remote, port in zip(servers.remotes, servers.monitors): remote = remote.split(':')[0] response = get_top_by_http(remote, port) # check that response contains required fields check_response_fields(response, servers.config_params) top_keys = response['top']['top_by_size'] if has_key(test_key, top_keys): break # check that written key appears among top keys assert has_key(test_key, top_keys) # check that all top keys items contains all required fields check_key_fields(top_keys) self.__check_key_existance_using_session_monitor(session, servers.config_params, test_key)
def test_dc_three_groups(self, scope, server, simple_node): ''' Run dc recovery without --one-node and without --backend-id against all three groups. Checks that all three groups contain data from third group. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_dc_three_groups', test_namespace=self.namespace) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=(scope.test_group, scope.test_group2, scope.test_group3), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_three_groups.log', tmp_dir='dc_three_groups') session.groups = (scope.test_group,) check_data(scope, session, self.keys, self.datas2) session.groups = (scope.test_group2,) check_data(scope, session, self.keys, self.datas2) session.groups = (scope.test_group3,) check_data(scope, session, self.keys, self.datas2)
def test_single_write(self, server, simple_node): ''' check that single access to some key appears among top keys using http interface of monitoring service ''' session = make_session(node=simple_node, test_name='TestMonitorTop.test_single_write') groups = session.routes.groups() session.groups = groups test_key = 'one_key' session.write_data(test_key, 'some_data').get() session.read_latest(test_key).get() test_key = hashlib.sha512(test_key).hexdigest() top_keys = [] for remote, port in zip(server.remotes, server.monitors): remote = remote.split(':')[0] response = get_top_by_http(remote, port) # check that response contains required fields check_response_fields(response, server.config_params) top_keys = response['top']['top_by_size'] if has_key(test_key, top_keys): break # check that written key appears among top keys assert has_key(test_key, top_keys) # check that all top keys items contains all required fields check_key_fields(top_keys) self.__check_key_existance_using_session_monitor( session, server.config_params, test_key)
def test_setup(self, server, simple_node): ''' Initial test cases that prepare test cluster before running recovery. It includes: 1. preparing whole test class scope - making session, choosing node and backends etc. 2. initial cleanup - disabling all backends at all nodes and removing all blobs 3. enabling backends that will be used at test 4. running initial actions from test_data - preparing keys on both backends ''' self.scope = scope self.scope.session = make_session(node=simple_node, test_name='TestMerge') self.scope.routes = self.scope.session.routes self.scope.group = self.scope.routes.groups()[0] self.scope.session.groups = [self.scope.group] self.scope.address = self.scope.routes.addresses()[0] group_routes = self.scope.routes.filter_by_group(self.scope.group) self.scope.backends = group_routes.get_address_backends(self.scope.address)[:2] self.scope.timestamp = elliptics.Time.now() self.scope.test_data = make_test_data(timestamps=[self.old_ts, self.cur_ts, self.new_ts], dest_count=len(self.scope.backends)) self.scope.keyshifter = KeyShifter(self.get_first_backend_key()) self.test_sessions = [] for backend in self.scope.backends: self.test_sessions.append(self.scope.session.clone()) self.test_sessions[-1].set_direct_id(self.scope.address, backend) disable_backends(self.scope.session, self.scope.session.routes.addresses_with_backends()) remove_all_blobs(self.scope.session) enable_backends(self.scope.session, [(self.scope.address, b) for b in self.scope.backends]) self.prepare_test_data()
def test_iterate_all_node_ranges_with_timestamp(self, server, simple_node): ''' Runs iterator on first node/backend from route-list with using all ranges covered by it and timetamps that specifies period from 30 second before now to now. Checks iterated keys by check_iterator_results. ''' session = make_session(node=simple_node, test_name='TestSession.test_iterate_all_node_ranges_with_timestamp') session.groups = session.routes.groups() node_id, node, backend = iter(session.routes.get_unique_routes()[0]) ranges = convert_ranges(session.routes.get_address_backend_ranges(node, backend)) end_time = elliptics.Time.now() begin_time = end_time begin_time.tsec -= 30 iterator = session.start_iterator( id=node_id, ranges=ranges, type=elliptics.iterator_types.network, flags=elliptics.iterator_flags.key_range | elliptics.iterator_flags.ts_range, time_begin=begin_time, time_end=end_time) check_iterator_results(node, backend, iterator, session, node_id)
def test_clone(self, server, simple_node): orig_s = make_session(node=simple_node, test_name='TestSession.test_clone') orig_s.groups = [1, 2, 3] orig_s.timeout = 13 orig_s.exceptions_policy = elliptics.exceptions_policy.throw_at_wait orig_s.cflags = elliptics.command_flags.direct orig_s.ioflags = elliptics.io_flags.overwrite orig_s.timestamp = elliptics.Time(213, 415) orig_s.trace_id = 731 orig_s.user_flags = 19731 clone_s = orig_s.clone() assert clone_s.groups == orig_s.groups == [1, 2, 3] assert clone_s.timeout == orig_s.timeout == 13 assert clone_s.exceptions_policy == orig_s.exceptions_policy == \ elliptics.exceptions_policy.throw_at_wait assert clone_s.cflags == orig_s.cflags == elliptics.command_flags.direct assert clone_s.ioflags == orig_s.ioflags == elliptics.io_flags.overwrite assert clone_s.timestamp == orig_s.timestamp == elliptics.Time( 213, 415) assert clone_s.trace_id == orig_s.trace_id == 731 assert clone_s.user_flags == orig_s.user_flags == 19731
def test_prepare_write_commit(self, simple_node): session = make_session(node=simple_node, test_name="TestSession.test_prepare_write_commit") session.groups = [session.routes.groups()[0]] routes = session.routes.filter_by_groups(session.groups) pos, records, addr, back = (0, 0, None, 0) for id, address, backend in routes.get_unique_routes(): ranges = routes.get_address_backend_ranges(address, backend) statistics = session.monitor_stat(address, elliptics.monitor_stat_categories.backend).get()[0].statistics records_in_blob = statistics["backends"]["{0}".format(backend)]["backend"]["config"]["records_in_blob"] for i, (begin, end) in enumerate(ranges): if int(str(end), 16) - int(str(begin), 16) > records_in_blob * 2: pos = int(str(begin), 16) records = records_in_blob * 2 addr, back = address, backend assert pos assert records for i in range(pos, pos + records): r = session.write_data(elliptics.Id(format(i, "x")), "data").get() assert len(r) == 1 assert r[0].address == addr pos_id = elliptics.Id(format(i, "x")) prepare_size = 1 << 10 data = "a" + "b" * (prepare_size - 2) + "c" session.write_prepare(pos_id, data[0], 0, 1 << 10).get() session.write_plain(pos_id, data[1:-1], 1).get() session.write_commit(pos_id, data[-1], prepare_size - 1, prepare_size).get() assert session.read_data(pos_id).get()[0].data == data
def test_prepare_plain_commit_simple(self, simple_node): """ Description: simple write_prepare/write_plain/write_commit with checking data correctness and accessibility Steps: write_prepare for some key without data check that the key is not accessible write_plain for the key with some data check that the key is not accessible write_commit for the key without data check that the key is accessible and data is correct """ session = make_session(node=simple_node, test_name="TestSession.test_prepare_plain_commit_simple") # test data test_group = session.routes.groups()[0] test_key = "test_prepare_plain_commit_simple.key" test_data_size = 1024 test_data = os.urandom(test_data_size) session.groups = [test_group] session.write_prepare(key=test_key, data="", remote_offset=0, psize=test_data_size).get() check_key_unavailability(session, test_key) session.write_plain(key=test_key, data=test_data, remote_offset=0).get() check_key_unavailability(session, test_key) session.write_commit(key=test_key, data="", remote_offset=0, csize=test_data_size).get() checked_read(session, test_key, test_data)
def test_write_and_corrupt_data(self, server, simple_node): ''' Writes one by one the key with different data and incremental timestamp to groups 1, 2, 3 and corrupts data in the group #3. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_write_and_corrupt_data', test_namespace=self.namespace) timestamp3 = elliptics.Time(self.corrupted_timestamp.tsec + 7200, self.corrupted_timestamp.tnsec) session.groups = [scope.test_group] session.timestamp = self.corrupted_timestamp write_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.1']) check_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.1'], self.corrupted_timestamp) session.groups = [scope.test_group2] session.timestamp = self.corrupted_timestamp2 write_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.2']) check_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.2'], self.corrupted_timestamp2) session.groups = [scope.test_group3] session.timestamp = timestamp3 write_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.3']) check_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.3'], timestamp3) res = session.lookup(self.corrupted_key).get()[0] with open(res.filepath, 'r+b') as f: f.seek(res.offset, 0) tmp = '123' + f.read()[3:] f.seek(res.offset, 0) f.write(tmp) f.flush()
def test_iterate_one_range(self, simple_node): ''' Runs iterator on first node/backend from route-list with using only first range of it. Checks iterated keys by check_iterator_results. ''' session = make_session(node=simple_node, test_name='TestSession.test_iterate_one_range') session.groups = session.routes.groups() node_id, node, backend = iter(session.routes.get_unique_routes()[0]) ranges = convert_ranges( (session.routes.get_address_backend_ranges(node, backend)[0], )) # We could set flags=elliptics.iterator_flags.key_range, # but it should be done automatically if there is at least one range. # So this test also checks that behaviour. flags = 0 iterator = session.start_iterator( id=node_id, ranges=ranges, type=elliptics.iterator_types.network, flags=flags, time_begin=elliptics.Time(0, 0), time_end=elliptics.Time(2**64 - 1, 2**64 - 1)) check_iterator_results(node, backend, iterator, session, node_id)
def test_iterate_all_node_ranges_no_meta(self, simple_node): ''' Runs iterator with no_meta on first node/backend from route-list with using all ranges covered by it. Checks iterated keys by check_iterator_results. ''' session = make_session( node=simple_node, test_name='TestSession.test_iterate_all_node_ranges_no_meta') session.groups = session.routes.groups() node_id, node, backend = iter(session.routes.get_unique_routes()[0]) ranges = convert_ranges( session.routes.get_address_backend_ranges(node, backend)) end_time = elliptics.Time.now() begin_time = end_time begin_time.tsec -= 30 iterator = session.start_iterator( id=node_id, ranges=ranges, type=elliptics.iterator_types.network, flags=elliptics.iterator_flags.key_range | elliptics.iterator_flags.no_meta, time_begin=begin_time, time_end=end_time) check_iterator_results(node, backend, iterator, session, node_id, True)
def test_dc_corrupted_data(self, server, simple_node): ''' Runs dc recovery and checks that second version of data is recovered to all groups. This test checks that dc recovery correctly handles corrupted key on his way: Group #3 which key was corrupted has a newest timestamp and recovery tries to used it at first. But read fails and recovery switchs to the group #2 and recovers data from this group to groups #1 and #3. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_dc_corrupted_data', test_namespace=self.namespace) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=(scope.test_group, scope.test_group2, scope.test_group3), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_corrupted_data.log', tmp_dir='dc_corrupted_data') for group in (scope.test_group, scope.test_group2, scope.test_group3): session.groups = [group] check_data(scope, session, [self.corrupted_key], [self.corrupted_data + '.2'], self.corrupted_timestamp2)
def test_dc_from_dump_two_groups(self, scope, server, simple_node): ''' Runs dnet_recovery dc without --one-node and without --backend-id against both groups and with -f merge.dump.file. Checks self.keys availability after recovering in both groups. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_dc_from_dump_two_groups', test_namespace=self.namespace) dump_filename = 'dc.dump.file' with open(dump_filename, 'w') as dump_file: for key in self.keys + ['unknown_key']: dump_file.write('{0}\n'.format(str(session.transform(key)))) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=( scope.test_group, scope.test_group2, ), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_from_dump_two_groups.log', tmp_dir='dc_from_dump_two_groups', dump_file=dump_filename) session.groups = (scope.test_group, ) check_data(scope, session, self.keys, self.datas, self.timestamp) session.groups = (scope.test_group2, ) check_data(scope, session, self.keys, self.datas, self.timestamp)
def test_setup(self, server, simple_node): ''' Initial test cases that prepare test cluster before running recovery. It includes: 1. preparing whole test class scope - making session, choosing node and backends etc. 2. initial cleanup - disabling all backends at all nodes and removing all blobs 3. enabling backends that will be used at test 4. running initial actions from test_data - preparing keys on both backends ''' self.scope = scope self.scope.session = make_session(node=simple_node, test_name='TestDC') self.scope.keyshifter = KeyShifter(elliptics.Id(0)) self.scope.routes = self.scope.session.routes self.scope.groups = self.scope.routes.groups()[:3] self.scope.test_data = make_test_data( timestamps=[self.old_ts, self.cur_ts, self.new_ts], dest_count=len(self.scope.groups)) disable_backends(self.scope.session, self.scope.session.routes.addresses_with_backends()) remove_all_blobs(self.scope.session) enable_backends(self.scope.session, self.scope.routes.addresses_with_backends()) self.prepare_test_data()
def test_resetting_timeout(self, server, simple_node): session = make_session(node=simple_node, test_name="TestSession.test_resetting_timeout") assert session.timeout == 5 # check default timeout value session.timeout = 1 # set different value assert session.timeout == 1 # check that the value has been set session.timeout = 0 # set timeout to 0 which should reset to default assert session.timeout == 5 # check default timeout value
def test_indexes_simple(self, simple_node): session = make_session(node=simple_node, test_name="TestSession.test_indexes_simple") session.groups = session.routes.groups() check_dict = {} key = "simple_key" indexes = ["simple_index_1", "simple_index_2", "simple_index_3", "simple_index_4", "simple_index_5"] datas = ["key_data_1", "key_data_2", "key_data_3", "key_data_4", "key_data_5"] session.set_indexes(key, indexes, datas).wait() for i, idx in enumerate(indexes): check_dict[idx] = datas[i] self.check_indexes(session, key, check_dict.keys(), check_dict.values()) indexes_2 = ["simple_index_4", "simple_index_5", "simple_index_6", "simple_index_7"] datas_2 = ["key_data_4.2", "key_data_5.2", "key_data_6.2", "key_data_7.2"] session.update_indexes(key, indexes_2, datas_2).wait() for i, idx in enumerate(indexes_2): check_dict[idx] = datas_2[i] self.check_indexes(session, key, check_dict.keys(), check_dict.values()) removed_indexes = indexes[:3] session.remove_indexes(key, removed_indexes).wait() for idx in removed_indexes: del check_dict[idx] self.check_indexes(session, key, check_dict.keys(), check_dict.values())
def test_dc_from_dump_two_groups(self, server, simple_node): ''' Runs dnet_recovery dc without --one-node and without --backend-id against both groups and with -f merge.dump.file. Checks self.keys availability after recovering in both groups. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_dc_from_dump_two_groups', test_namespace=self.namespace) dump_filename = 'dc.dump.file' with open(dump_filename, 'w') as dump_file: for key in self.keys + ['unknown_key']: dump_file.write('{0}\n'.format(str(session.transform(key)))) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=(scope.test_group, scope.test_group2,), rtype=RECOVERY.DC, log_file='dc_from_dump_two_groups.log', tmp_dir='dc_from_dump_two_groups', dump_file=dump_filename) session.groups = (scope.test_group,) check_data(scope, session, self.keys, self.datas, self.timestamp) session.groups = (scope.test_group2,) check_data(scope, session, self.keys, self.datas, self.timestamp)
def test_merge_from_dump_3_backends(self, server, simple_node): ''' Writes all keys to dump file: 'merge.dump.file'. Runs dnet_recovery merge without --one-node and without --backend-id and with -f merge.dump.file. Checks that all keys are available and have correct data. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_merge_from_dump_3_backends', test_namespace=self.namespace) dump_filename = 'merge.dump.file' with open(dump_filename, 'w') as dump_file: for key in self.keys + ['unknown_key']: dump_file.write('{0}\n'.format(str(session.transform(key)))) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address, groups=(scope.test_group, ), session=session.clone(), rtype=RECOVERY.MERGE, log_file='merge_from_dump_3_backends.log', tmp_dir='merge_from_dump_3_backends', dump_file=dump_filename) session.groups = (scope.test_group, ) check_data(scope, session, self.keys, self.datas, self.timestamp) check_keys_absence(scope, session, self.keys)
def test_indexes_dict(self, simple_node): session = make_session(node=simple_node, test_name="TestSession.test_indexes_dict") session.groups = session.routes.groups() key = "dict_key" indexes = { "dict_index_1": "key_data_1", "dict_index_2": "key_data_2", "dict_index_3": "key_data_3", "dict_index_4": "key_data_4", "dict_index_5": "key_data_5", } set_session = session.clone() # We want to count only successfully finished transactions set_session.set_filter(elliptics.filters.positive_final) result = set_session.set_indexes(key, indexes) assert len(result.get()) == len(session.groups) self.check_indexes(session, key, indexes.keys(), indexes.values()) indexes_2 = { "dict_index_4": "key_data_4.2", "dict_index_5": "key_data_5.2", "dict_index_6": "key_data_6.2", "dict_index_7": "key_data_7.2", } session.update_indexes(key, indexes_2).wait() indexes.update(indexes_2) self.check_indexes(session, key, indexes.keys(), indexes.values())
def test_indexes_dict(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_indexes_dict') session.groups = session.routes.groups() key = 'dict_key' indexes = {'dict_index_1': 'key_data_1', 'dict_index_2': 'key_data_2', 'dict_index_3': 'key_data_3', 'dict_index_4': 'key_data_4', 'dict_index_5': 'key_data_5'} set_session = session.clone() # We want to count only successfully finished transactions set_session.set_filter(elliptics.filters.positive_final) result = set_session.set_indexes(key, indexes) assert len(result.get()) == len(session.groups) self.check_indexes(session, key, indexes.keys(), indexes.values()) indexes_2 = {'dict_index_4': 'key_data_4.2', 'dict_index_5': 'key_data_5.2', 'dict_index_6': 'key_data_6.2', 'dict_index_7': 'key_data_7.2'} session.update_indexes(key, indexes_2).wait() indexes.update(indexes_2) self.check_indexes(session, key, indexes.keys(), indexes.values())
def test_indexes_simple(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_indexes_simple') session.groups = session.routes.groups() check_dict = {} key = 'simple_key' indexes = ['simple_index_1', 'simple_index_2', 'simple_index_3', 'simple_index_4', 'simple_index_5'] datas = ['key_data_1', 'key_data_2', 'key_data_3', 'key_data_4', 'key_data_5'] session.set_indexes(key, indexes, datas).wait() for i, idx in enumerate(indexes): check_dict[idx] = datas[i] self.check_indexes(session, key, check_dict.keys(), check_dict.values()) indexes_2 = ['simple_index_4', 'simple_index_5', 'simple_index_6', 'simple_index_7'] datas_2 = ['key_data_4.2', 'key_data_5.2', 'key_data_6.2', 'key_data_7.2'] session.update_indexes(key, indexes_2, datas_2).wait() for i, idx in enumerate(indexes_2): check_dict[idx] = datas_2[i] self.check_indexes(session, key, check_dict.keys(), check_dict.values()) removed_indexes = indexes[:3] session.remove_indexes(key, removed_indexes).wait() for idx in removed_indexes: del check_dict[idx] self.check_indexes(session, key, check_dict.keys(), check_dict.values())
def test_dc_three_groups(self, scope, server, simple_node): ''' Run dc recovery without --one-node and without --backend-id against all three groups. Checks that all three groups contain data from third group. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_dc_three_groups', test_namespace=self.namespace) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=(scope.test_group, scope.test_group2, scope.test_group3), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_three_groups.log', tmp_dir='dc_three_groups') session.groups = (scope.test_group, ) check_data(scope, session, self.keys, self.datas2) session.groups = (scope.test_group2, ) check_data(scope, session, self.keys, self.datas2) session.groups = (scope.test_group3, ) check_data(scope, session, self.keys, self.datas2)
def test_monitor_categories_and_backends(self, simple_node, backends_combination, categories): '''Requests all possible combination of categories and backends one by one and checks statistics''' session = make_session( node=simple_node, test_name='TestSession.test_monitor_categories_and_backends') address = session.routes.addresses()[0] start = datetime.now() entry = session.monitor_stat(address, categories=categories, backends=backends_combination).get()[0] try: assert type(entry.address) is elliptics.Address json_stat = entry.statistics except Exception as e: with open("monitor.stat.json", "w") as f: f.write(entry.__statistics__) raise e end = datetime.now() checker = MonitorStatsChecker( json_statistics=json_stat, time_period=(start, end), categories=categories, address=address, routes=session.routes, backends_combination=backends_combination) checker.check_json_stat()
def test_merge_from_dump_3_backends(self, server, simple_node): ''' Writes all keys to dump file: 'merge.dump.file'. Runs dnet_recovery merge without --one-node and without --backend-id and with -f merge.dump.file. Checks that all keys are available and have correct data. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_merge_from_dump_3_backends', test_namespace=self.namespace) dump_filename = 'merge.dump.file' with open(dump_filename, 'w') as dump_file: for key in self.keys + ['unknown_key']: dump_file.write('{0}\n'.format(str(session.transform(key)))) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address, groups=(scope.test_group,), rtype=RECOVERY.MERGE, log_file='merge_from_dump_3_backends.log', tmp_dir='merge_from_dump_3_backends', dump_file=dump_filename) session.groups = (scope.test_group,) check_data(scope, session, self.keys, self.datas, self.timestamp) check_keys_absence(scope, session, self.keys)
def test_indexes_dict(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_indexes_dict') session.groups = session.routes.groups() key = 'dict_key' indexes = { 'dict_index_1': 'key_data_1', 'dict_index_2': 'key_data_2', 'dict_index_3': 'key_data_3', 'dict_index_4': 'key_data_4', 'dict_index_5': 'key_data_5' } set_session = session.clone() # We want to count only successfully finished transactions set_session.set_filter(elliptics.filters.positive_final) result = set_session.set_indexes(key, indexes) assert len(result.get()) == len(session.groups) self.check_indexes(session, key, indexes.keys(), indexes.values()) indexes_2 = { 'dict_index_4': 'key_data_4.2', 'dict_index_5': 'key_data_5.2', 'dict_index_6': 'key_data_6.2', 'dict_index_7': 'key_data_7.2' } session.update_indexes(key, indexes_2).wait() indexes.update(indexes_2) self.check_indexes(session, key, indexes.keys(), indexes.values())
def test_dc_two_groups(self, scope, server, simple_node): ''' Runs dnet_recovery dc without --one-node and without --backend-id against both groups. Checks self.keys availability after recovering in both groups. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_dc_two_groups', test_namespace=self.namespace) recovery(one_node=False, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=None, address=scope.test_address2, groups=( scope.test_group, scope.test_group2, ), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_two_groups.log', tmp_dir='dc_two_groups') session.groups = (scope.test_group, ) check_data(scope, session, self.keys, self.datas) session.groups = (scope.test_group2, ) check_data(scope, session, self.keys, self.datas)
def test_dc_one_backend_and_one_group(self, server, simple_node): ''' Runs dnet_recovery dc with --one-node=scope.test_address2, --backend-id=scope.test_backend2 and against both groups. Checks self.keys availability after recovering in both groups. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_dc_one_backend_and_one_group', test_namespace=self.namespace) recovery(one_node=True, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=scope.test_backend2, address=scope.test_address2, groups=( scope.test_group, scope.test_group2, ), session=session.clone(), rtype=RECOVERY.DC, log_file='dc_one_backend.log', tmp_dir='dc_one_backend', no_meta=True) session.groups = (scope.test_group2, ) check_data(scope, session, self.keys, self.datas, self.timestamp)
def test_merge_two_backends(self, server, simple_node): ''' Runs dnet_recovery merge with --one-node=scope.test_address and --backend-id==scope.test_backend. Checks self.keys availability after recovering. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_merge_two_backends', test_namespace=self.namespace) recovery(one_node=True, remotes=map(elliptics.Address.from_host_port_family, server.remotes), backend_id=scope.test_backend, address=scope.test_address, groups=(scope.test_group, ), session=session.clone(), rtype=RECOVERY.MERGE, no_meta=True, log_file='merge_2_backends.log', tmp_dir='merge_2_backends') session.groups = (scope.test_group, ) check_data(scope, session, self.keys, self.datas, self.timestamp) check_keys_absence(scope, session, self.keys)
def test_monitor_categories(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_monitor_categories') addr = session.routes.addresses()[0] for category in elliptics.monitor_stat_categories.values.values(): stat = session.monitor_stat(addr, categories=category).get()[0] assert stat
def test_monitor_stat(self, simple_node): """Simply get all statistics from all nodes and check that statistics is valid dict""" session = make_session(node=simple_node, test_name="TestSession.test_monitor_stat") for addr in session.routes.addresses(): stat = session.monitor_stat(addr).get()[0] assert stat.error.code == 0 assert stat.error.message == "" assert type(stat.statistics) == dict
def test_monitor_stat(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_monitor_stat') for addr in session.routes.addresses(): stat = session.monitor_stat(addr).get()[0] assert stat.error.code == 0 assert stat.error.message == '' assert type(stat.statistics) == dict
def test_enable_all_third_group_backends(self, server, simple_node): ''' Enables all backends from all node from third group. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_enable_all_third_group_backends', test_namespace=self.namespace) enable_group(scope, session, scope.test_group3)
def test_checks_all_enabled(self, server, simple_node): ''' Checks statuses of all backends from all nodes and groups ''' session = make_session(node=simple_node, test_name='TestRecovery.test_checks_all_enabled', test_namespace=self.namespace) assert set(scope.init_routes.addresses()) == set(session.routes.addresses())
def test_resetting_timeout(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_resetting_timeout') assert session.timeout == 5 # check default timeout value session.timeout = 1 # set different value assert session.timeout == 1 # check that the value has been set session.timeout = 0 # set timeout to 0 which should reset to default assert session.timeout == 5 # check default timeout value
def test_prepare_plain_commit_with_restarting_backend(self, simple_node): """ Description: write_plain/write_commit can be made if corresponding backend was restarted after write_prepare. Steps: write_prepare for test_key and test_data check that test_key is not accessible restart backend: disable & enable it write_plain for test_key with test_data2 check that test_key is not accessible restart backend: disable & enable it write_commit test_key with test_data3 check that test_key is accessible and data is correct """ session = make_session( node=simple_node, test_name="TestSession.test_prepare_plain_commit_with_restarting_backend" ) # test data test_group = session.routes.groups()[0] test_key = "test_prepare_plain_commit_with_restarting_backend.key" test_data_size = 1024 test_data = os.urandom(test_data_size) test_data2 = os.urandom(test_data_size) test_data3 = os.urandom(test_data_size) session.groups = [test_group] results = session.write_prepare(key=test_key, data=test_data, remote_offset=0, psize=test_data_size).get() test_address = results[0].address test_backend = results[0].backend_id check_key_unavailability(session, test_key) session.disable_backend(test_address, test_backend).get() session.enable_backend(test_address, test_backend).get() check_key_unavailability(session, test_key) results = session.write_plain(key=test_key, data=test_data2, remote_offset=0).get() assert results[0].address == test_address assert results[0].backend_id == test_backend check_key_unavailability(session, test_key) session.disable_backend(test_address, test_backend).get() session.enable_backend(test_address, test_backend).get() check_key_unavailability(session, test_key) results = session.write_commit(key=test_key, data=test_data3, remote_offset=0, csize=test_data_size).get() assert results[0].address == test_address assert results[0].backend_id == test_backend checked_read(session, test_key, test_data3)
def test_monitor_categories(self, simple_node, categories): '''Requests all possible combination of categories one by one and checks statistics''' session = make_session(node=simple_node, test_name='TestSession.test_monitor_categories') checker = MonitorStatsChecker(address=session.routes.addresses()[0], session=session, categories=categories) checker.get_and_check()
def test_enable_rest_backends(self, server, simple_node): ''' Restore all groups with all nodes and all backends. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_enable_rest_backends', test_namespace=self.namespace) for g in scope.test_other_groups: enable_group(scope, session, g)
def test_enable_all_third_group_backends(self, server, simple_node): ''' Enables all backends from all node from third group. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_enable_all_third_group_backends', test_namespace=self.namespace) enable_group(scope, session, scope.test_group3)
def test_monitor_categories(self, server, simple_node, categories): '''Requests all possible combination of categories one by one and checks statistics''' session = make_session(node=simple_node, test_name='TestSession.test_monitor_categories') checker = MonitorStatsChecker(address=session.routes.addresses()[0], session=session, categories=categories) checker.get_and_check()
def test_monitor_stat(self, simple_node): '''Simply get all statistics from all nodes and check that statistics is valid dict''' session = make_session(node=simple_node, test_name='TestSession.test_monitor_stat') for addr in session.routes.addresses(): stat = session.monitor_stat(addr).get()[0] assert stat.error.code == 0 assert stat.error.message == '' assert type(stat.statistics) == dict
def test_write_to_all_groups(self, simple_node, key, data, exception): session = make_session(node=simple_node, test_name="TestSession.test_write_to_all_groups") groups = session.routes.groups() session.groups = groups if exception: with pytest.raises(exception): checked_write(session, key, data) else: checked_write(session, key, data)
def test_checks_all_enabled(self, server, simple_node): ''' Checks statuses of all backends from all nodes and groups ''' session = make_session( node=simple_node, test_name='TestRecovery.test_checks_all_enabled', test_namespace=self.namespace) assert set(scope.init_routes.addresses()) == set( session.routes.addresses())
def test_enable_rest_backends(self, server, simple_node): ''' Restore all groups with all nodes and all backends. ''' session = make_session( node=simple_node, test_name='TestRecovery.test_enable_rest_backends', test_namespace=self.namespace) for g in scope.test_other_groups: enable_group(scope, session, g)
def test_prepare_data(self, scope, server, simple_node): ''' Writes self.keys to chosen group and checks their availability. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_prepare_data', test_namespace=self.namespace) session.groups = [scope.test_group] write_data(scope, session, self.keys, self.datas) check_data(scope, session, self.keys, self.datas)
def test_write_to_all_groups(self, simple_node, key, data, exception): session = make_session( node=simple_node, test_name='TestSession.test_write_to_all_groups') groups = session.routes.groups() session.groups = groups if exception: with pytest.raises(exception): checked_write(session, key, data) else: checked_write(session, key, data)
def test_write_without_groups(self, server, simple_node, key, data): session = make_session(node=simple_node, test_name='TestSession.test_write_without_groups') result = session.write_data(key, data) try: result.get() except elliptics.Error as e: assert e.message.message == 'insufficient results count due to'\ ' checker: 0 of 1 (1): No such device or address: -6' else: pytest.fail('Failed: DID NOT RAISE')
def test_write_data_to_third_group(self, scope, server, simple_node): ''' Writes different data by self.key in third group ''' session = make_session( node=simple_node, test_name='TestRecovery.test_write_data_to_third_group', test_namespace=self.namespace) session.groups = [scope.test_group3] write_data(scope, session, self.keys, self.datas2) check_data(scope, session, self.keys, self.datas2)
def test_write_data_to_third_group(self, server, simple_node): ''' Writes different data by self.key in third group ''' session = make_session(node=simple_node, test_name='TestRecovery.test_write_data_to_third_group', test_namespace=self.namespace) session.groups = [scope.test_group3] session.timestamp = self.timestamp2 write_data(scope, session, self.keys, self.datas2) check_data(scope, session, self.keys, self.datas2, self.timestamp2)
def test_enable_another_one_backend(self, server, simple_node): ''' Enables another one backend from the same group. ''' assert scope.disabled_backends[-2][0] == scope.test_group session = make_session(node=simple_node, test_name='TestRecovery.test_enable_another_one_backend', test_namespace=self.namespace) group, address, backend = scope.disabled_backends[-1] r = enable_backend(scope, session, group, address, backend) check_backend_status(r.get(), backend, state=1) wait_backends_in_route(session, ((address, backend),))
def test_write_without_groups(self, server, simple_node, key, data): session = make_session( node=simple_node, test_name='TestSession.test_write_without_groups') result = session.write_data(key, data) try: result.get() except elliptics.Error as e: assert e.message.message == 'insufficient results count due to'\ ' checker: 0 of 1 (1): No such device or address: -6' else: pytest.fail('Failed: DID NOT RAISE')
def test_teardown(self): """Cleanup backends. * disable all backends * remove all blobs * enable all backends on all nodes """ session = make_session(node=scope.node, test_name='TestIsolatedRecovery.test_teardown', test_namespace=self.namespace) addresses_with_backends = session.routes.addresses_with_backends() cleanup_backends(session, addresses_with_backends, addresses_with_backends)
def test_enable_another_one_backend(self, server, simple_node): ''' Enables another one backend from the same group. ''' assert scope.disabled_backends[-2][0] == scope.test_group session = make_session( node=simple_node, test_name='TestRecovery.test_enable_another_one_backend', test_namespace=self.namespace) group, address, backend = scope.disabled_backends[-1] r = enable_backend(scope, session, group, address, backend) check_backend_status(r.get(), backend, state=1) wait_backends_in_route(session, ((address, backend), ))
def test_write_to_one_group(self, simple_node): data = "some data" session = make_session(node=simple_node, test_name="TestSession.test_write_to_one_group") for group in session.routes.groups(): tmp_key = "one_groups_key_" + str(group) session.groups = [group] checked_write(session, tmp_key, data) other_groups = list(session.routes.groups()) other_groups.remove(group) session.groups = other_groups with pytest.raises(elliptics.NotFoundError): results = session.read_data(tmp_key).get() assert results == []
def test_enable_second_group_one_backend(self, server, simple_node): ''' Enables one backend from one node from second group. ''' session = make_session(node=simple_node, test_name='TestRecovery.test_enable_second_group_one_backend', test_namespace=self.namespace) group, address, backend = next(((g, a, b) for g, a, b in scope.disabled_backends if g == scope.test_group2)) scope.test_address2 = address scope.test_backend2 = backend r = enable_backend(scope, session, group, address, backend) check_backend_status(r.get(), backend, state=1) wait_backends_in_route(session, ((address, backend), ))
def test_bulk_write_read(self, simple_node): session = make_session(node=simple_node, test_name="TestSession.test_bulk_write_read") groups = session.routes.groups() session.groups = groups data = "data" keys = ["bulk key " + str(i) for i in xrange(100)] checked_bulk_write(session, [(key, data) for key in keys], data) checked_bulk_read(session, keys, data) session.set_namespace("bulk additional namespace") checked_bulk_write(session, dict.fromkeys(keys, "data"), data) checked_bulk_read(session, keys, data)
def test_write_to_one_group(self, server, simple_node): data = 'some data' session = make_session(node=simple_node, test_name='TestSession.test_write_to_one_group') for group in session.routes.groups(): tmp_key = 'one_groups_key_' + str(group) session.groups = [group] checked_write(session, tmp_key, data) other_groups = list(session.routes.groups()) other_groups.remove(group) session.groups = other_groups with pytest.raises(elliptics.NotFoundError): results = session.read_data(tmp_key).get() assert results == []
def test_bulk_write_read(self, server, simple_node): session = make_session(node=simple_node, test_name='TestSession.test_bulk_write_read') groups = session.routes.groups() session.groups = groups data = 'data' keys = ['bulk key ' + str(i) for i in xrange(100)] checked_bulk_write(session, [(key, data) for key in keys], data) checked_bulk_read(session, keys, data) session.set_namespace('bulk additional namespace') checked_bulk_write(session, dict.fromkeys(keys, 'data'), data) checked_bulk_read(session, keys, data)