class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. :avocado: recursive """ def setUp(self): super(CreateManyDkeys, self).setUp() self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.pool: self.pool.destroy(1) finally: super(CreateManyDkeys, self).tearDown() def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_epoch() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_epoch(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format( key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data) + 1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n".format( "dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @skipForTicket("DAOS-1721") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=object,vm,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. """ def setUp(self): self.agent_sessions = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.container = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.hostfile is not None: os.remove(self.hostfile) if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_epoch() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_epoch(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format(key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data)+1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n" .format("dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @avocado.skip("Skipping until DAOS-1721 is fixed.") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=object,vm,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class CreateMillionDkeys(Test): """ Tests To Create the Millions Dkeys in same Object. """ def setUp(self): with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") tmp = build_paths['PREFIX'] + '/tmp' server_group = self.params.get("server_group", '/server/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) ServerUtils.runServer(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) poh = self.pool.handle self.container = DaosContainer(self.context) self.container.create(poh) self.container.open() def tearDown(self): if self.hostfile is not None: os.remove(self.hostfile) self.pool.destroy(1) ServerUtils.stopServer() ServerUtils.killServer(self.hostlist) def test_million_dkeys(self): """ Test millions dkeys in same object :avocado: tags=dkeys,regression,vm,large """ ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_epoch() c_epoch = ctypes.c_uint64(epoch) no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') try: print("Started Writing the Dataset-----------\n") for key in range(no_of_dkeys): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer("data {0}".format(key)) c_size = ctypes.c_size_t(len("data {0}".format(key)) + 1) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, epoch) self.container.commit_epoch(c_epoch) print("Started Verification of the Dataset-----------\n") for key in range(no_of_dkeys): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) val = ioreq.single_fetch(c_dkey, c_akey, len("data {0}".format(key)) + 1, c_epoch) original_data = "data {0}".format(key) if original_data != (repr(val.value)[1:-1]): self.fail( "ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n".format( "dkey {0}".format(key), "akey {0}".format(key), original_data, repr(val.value)[1:-1])) except ValueError as e: print(e) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")