class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. """ def setUp(self): self.agent_sessions = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.container = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.hostfile is not None: os.remove(self.hostfile) if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_epoch() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_epoch(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format(key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data)+1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n" .format("dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @avocado.skip("Skipping until DAOS-1721 is fixed.") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=object,vm,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class ObjectDataValidation(avocado.Test): """ Test Class Description: Tests that create Different length records, Disconnect the pool/container and reconnect, validate the data after reconnect. """ # pylint: disable=too-many-instance-attributes def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file( self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') self.agent_sessions = agent_utils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def reconnect(self): ''' Function to reconnect the pool/container and reopen the Object for read verification. ''' #Close the Obj/Container, Disconnect the Pool. self.obj.close() self.container.close() self.pool.disconnect() time.sleep(5) #Connect Pool, Open Container and Object self.pool.connect(2) self.container.open() self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) @avocado.fail_on(DaosApiError) def test_invalid_tx_commit_close(self): """ Test ID: (1)DAOS-1346: Verify commit tx bad parameter behavior. (2)DAOS-1343: Verify tx_close bad parameter behavior. (3)DAOS-1342: Verify tx_close through daos_api. (4)DAOS-1338: Add and verify tx_abort through daos_api. (5)DAOS-1339: Verify tx_abort bad parameter behavior. Test Description: Write Avocado Test to verify commit tx and close tx bad parameter behavior. :avocado: tags=all,object,full_regression,small,invalid_tx """ self.d_log.info("==Writing the Single Dataset for negative test...") record_index = 0 expected_error = "RC: -1002" dkey = 0 akey = 0 indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_value = ctypes.create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) try: new_transaction = self.container.get_new_tx() except DaosApiError as excep: #initial container get_new_tx failed, skip rest of the test self.fail("##container get_new_tx failed: {}".format(excep)) invalid_transaction = new_transaction + random.randint(1000, 383838) self.log.info("==new_transaction= %s", new_transaction) self.log.info("==invalid_transaction= %s", invalid_transaction) self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction) try: self.container.commit_tx(invalid_transaction) self.fail("##(1.1)Container.commit_tx passing with invalid handle") except DaosApiError as excep: self.log.info(str(excep)) self.log.info( "==(1)Expecting failure: invalid Container.commit_tx.") if expected_error not in str(excep): self.fail( "##(1.2)Expecting error RC: -1002, but got {}.".format( str(excep))) try: self.container.close_tx(invalid_transaction) self.fail("##(2.1)Container.close_tx passing with invalid handle") except DaosApiError as excep: self.log.info(str(excep)) self.log.info( "==(2)Expecting failure: invalid Container.commit_tx.") if expected_error not in str(excep): self.fail( "##(2.2)Expecting error RC: -1002, but got {}.".format( str(excep))) try: self.container.close_tx(new_transaction) self.log.info("==(3)container.close_tx test passed.") except DaosApiError as excep: self.log.info(str(excep)) self.fail("##(3)Failed on close_tx.") try: self.container.abort_tx(invalid_transaction) self.fail("##(4.1)Container.abort_tx passing with invalid handle") except DaosApiError as excep: self.log.info(str(excep)) self.log.info( "==(4)Expecting failure: invalid Container.abort_tx.") if expected_error not in str(excep): self.fail( "##(4.2)Expecting error RC: -1002, but got {}.".format( str(excep))) #Try to abort the transaction which already closed. try: self.container.abort_tx(new_transaction) self.fail("##(5.1)Container.abort_tx passing with a closed handle") except DaosApiError as excep: self.log.info(str(excep)) self.log.info( "==(5)Expecting failure: Container.abort_tx closed handle.") if expected_error not in str(excep): self.fail( "##(5.2)Expecting error RC: -1002, but got {}.".format( str(excep))) #open another transaction for abort test try: new_transaction2 = self.container.get_new_tx() except DaosApiError as excep: self.fail("##(6.1)container get_new_tx failed: {}".format(excep)) self.log.info("==new_transaction2= %s", new_transaction2) self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction2) try: self.container.abort_tx(new_transaction2) self.log.info("==(6)container.abort_tx test passed.") except DaosApiError as excep: self.log.info(str(excep)) self.fail("##(6.2)Failed on abort_tx.") @avocado.fail_on(DaosApiError) @skipForTicket("DAOS-3208") def test_single_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify single data after pool/container disconnect/reconnect. :avocado: tags=all,object,full_regression,small,single_object """ self.d_log.info("Writing the Single Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_value = ctypes.create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) new_transaction = self.container.get_new_tx() self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Single Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1) if indata != (repr(val.value)[1:-1]): self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail( "ERROR: Data mismatch for dkey = {0}, akey={1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 @avocado.fail_on(DaosApiError) @skipForTicket("DAOS-3208") def test_array_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify Array data after pool/container disconnect/reconnect. :avocado: tags=all,object,full_regression,small,array_object """ self.d_log.info("Writing the Array Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): c_values = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): c_values.append( (ctypes.create_string_buffer(value), len(value) + 1)) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) new_transaction = self.container.get_new_tx() self.ioreq.insert_array(c_dkey, c_akey, c_values, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Array Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): indata.append(value) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_rec_count = ctypes.c_uint(len(indata)) c_rec_size = ctypes.c_size_t(len(indata[0]) + 1) outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count, c_rec_size) for item in enumerate(indata): if indata[item[0]] != outdata[item[0]][:-1]: self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail( "ERROR:Data mismatch for dkey = {0}, akey={1}". format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0
class DeleteContainerTest(Test): """ Tests DAOS container delete and close. """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createuid = os.geteuid() self.creategid = os.getegid() self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL = None self.CONTAINER = None self.hostfile = None hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) def tearDown(self): ServerUtils.stopServer() if self.hostfile is not None: os.remove(self.hostfile) def test_container_delete(self): """ Test basic container delete :avocado: tags=regression,cont,vm """ expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/ContainerUUIDS/*/') contUUID = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) openlist = self.params.get("opened", "/run/createtests/ConnectionOpened/*/") opened = openlist[0] expected_for_param.append(openlist[1]) forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/") force = forcelist[0] expected_for_param.append(forcelist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break # special expected result for a specific case: # valid pool handle + valid container UUID, container opened, force destroy > 0 if opened and force > 0 and not contUUID == 'INVALID' and poh == 'VALID': expected_result = 'PASS' try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid, None) # need a connection to create container self.POOL.connect(1 << 1) self.CONTAINER = DaosContainer(self.Context) # create should always work (testing destroy) if not contUUID == 'INVALID': contUUID = uuid.UUID(uuidlist[0]) self.CONTAINER.create(self.POOL.handle, contUUID) else: self.CONTAINER.create(self.POOL.handle) # Opens the container if required if opened: self.CONTAINER.open(self.POOL.handle) # wait a few seconds and then attempds to destroy container time.sleep(5) if poh == 'VALID': poh = self.POOL.handle # if container is INVALID, overwrite with non existing UUID if contUUID == 'INVALID': contUUID = uuid.uuid4() self.CONTAINER.destroy(force, poh, contUUID) self.CONTAINER = None # cleanup the pool #self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except Exception as e: print e print traceback.format_exc() if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
class Snapshot(TestWithServers): """ Epic: DAOS-2249 Create system level tests that cover basic snapshot functionality. Testcase: DAOS-1370 Basic snapshot test DAOS-1386 Test container SnapShot information DAOS-1371 Test list snapshots DAOS-1395 Test snapshot destroy DAOS-1402 Test creating multiple snapshots Test Class Description: Start DAOS servers, set up the pool and container for the above snapshot Epic and Testcases, including snapshot basic, container information, list, creation and destroy. :avocado: recursive """ def setUp(self): """ set up method """ super(Snapshot, self).setUp() # get parameters from yaml file createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') self.log.info("==In setUp, self.context= %s", self.context) try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to the pool with rw permission # DAOS_PC_RO = int(1 << 0) # DAOS_PC_RW = int(1 << 1) # DAOS_PC_EX = int(1 << 2) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) except DaosApiError as error: self.log.info("Error detected in DAOS pool container setup: %s", str(error)) self.log.info(traceback.format_exc()) self.fail("##Test failed on setUp, before snapshot taken") # now open it self.container.open() # do a query and compare the UUID returned from create with # that returned by query self.container.query() if self.container.get_uuid_str() != c_uuid_to_str( self.container.info.ci_uuid): self.fail("##Container UUID did not match the one in info.") def tearDown(self): """ tear down method """ try: if self.container: self.container.close() if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) except DaosApiError as excep: self.log.info(excep) self.log.info(traceback.format_exc()) self.fail("##Snapshot test failed on cleanUp.") finally: super(Snapshot, self).tearDown() def display_snapshot(self, snapshot): """ To display the snapshot information. Args: snapshot: snapshot handle to be displayed. Return: none. """ self.log.info("==display_snapshot================") self.log.info("snapshot= %s", snapshot) self.log.info("snapshot.context= %s", snapshot.context) self.log.info("snapshot.context.libdaos= %s", snapshot.context.libdaos) self.log.info("snapshot.context.libtest= %s", snapshot.context.libtest) self.log.info("snapshot.context.ftable= %s", snapshot.context.ftable) self.log.info("snapshot.context.ftable[list-attr]= %s", snapshot.context.ftable["list-attr"]) self.log.info("snapshot.context.ftable[test-event]=%s", snapshot.context.ftable["test-event"]) self.log.info("snapshot.name= %s", snapshot.name) self.log.info("snapshot.epoch= %s", snapshot.epoch) self.log.info("==================================") def take_snapshot(self, container, epoch): """ To take a snapshot on the container with current epoch. Args: container: container for the snapshot epoch: the container epoch for the snapshot Return: An object representing the snapshot """ self.log.info("==Taking snapshot for:") self.log.info(" coh= %s", container.coh) self.log.info(" epoch= %s", epoch) snapshot = DaosSnapshot(self.context) snapshot.create(container.coh, epoch) self.display_snapshot(snapshot) return snapshot def invalid_snapshot_test(self, coh, epoch): """ Negative snapshot test with invalid container handle or epoch. Args: container: container for the snapshot epoch: the container epoch for the snapshot Return: 0: Failed 1: Passed (expected failure detected) """ status = 0 try: snapshot = DaosSnapshot(self.context) snapshot.create(coh, epoch) except Exception as error: self.log.info("==>Negative test, expected error: %s", str(error)) status = 1 return status def test_snapshot_negativecases(self): """ Test ID: DAOS-1390 Verify snap_create bad parameter behavior. DAOS-1322 Create a new container, verify snapshot state. as expected for a brand new container. DAOS-1392 Verify snap_destroy bad parameter behavior. DAOS-1388 Verify snap_list bad parameter behavior. Test Description: (0)Take a snapshot of the newly created container. (1)Create an object, write random data into it, and take a snapshot. (2)Verify the snapshot is working properly. (3)Test snapshot with an invalid container handle. (4)Test snapshot with a NULL container handle. (5)Test snapshot with an invalid epoch. (6)Verify snap_destroy with a bad parameter. (7)Verify snap_list bad parameter behavior. Use Cases: Combinations with minimun 1 client and 1 server. :avocado: tags=snap,snapshot_negative,snapshotcreate_negative """ #DAOS-1322 Create a new container, verify snapshot state as expected # for a brand new container. try: self.log.info( "==(0)Take a snapshot of the newly created container.") snapshot = DaosSnapshot(self.context) snapshot.create(self.container.coh, 0) self.display_snapshot(snapshot) except Exception as error: self.fail("##(0)Error on a snapshot on a new container %s", str(error)) #(1)Create an object, write some data into it, and take a snapshot obj_cls = self.params.get("obj_class", '/run/object_class/*') akey = self.params.get("akey", '/run/snapshot/*', default="akey") dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey") data_size = self.params.get("test_datasize", '/run/snapshot/*', default=150) rand_str = lambda n: ''.join( [random.choice(string.lowercase) for i in xrange(n)]) thedata = "--->>>Happy Daos Snapshot-Create Negative Testing " + \ "<<<---" + rand_str(random.randint(1, data_size)) try: obj, epoch = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=obj_cls) except DaosApiError as error: self.fail("##(1)Test failed during the initial object write: %s", str(error)) obj.close() ##Take a snapshot of the container snapshot = self.take_snapshot(self.container, epoch) self.log.info("==(1)Container epoch= %s", epoch) self.log.info(" snapshot.epoch= %s", snapshot.epoch) #(2)Verify the snapshot is working properly. try: obj.open() snap_handle = snapshot.open(self.container.coh, snapshot.epoch) thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, snap_handle.value) except Exception as error: self.fail("##(2)Error when retrieving the snapshot data: %s", str(error)) self.log.info("==(2)snapshot_list[ind]=%s", snapshot) self.log.info("==snapshot.epoch= %s", snapshot.epoch) self.log.info("==written thedata=%s", thedata) self.log.info("==thedata2.value= %s", thedata2.value) if thedata2.value != thedata: raise Exception("##(2)The data in the snapshot is not the " "same as the original data") self.log.info("==Snapshot data matches the data originally " "written.") #(3)Test snapshot with an invalid container handle self.log.info("==(3)Snapshot with an invalid container handle.") if self.invalid_snapshot_test(self.container, epoch): self.log.info( "==>Negative test 1, expecting failed on taking " "snapshot with an invalid container.coh: %s", self.container) else: self.fail( "##(3)Negative test 1 passing, expecting failed on" " taking snapshot with an invalid container.coh: %s", self.container) #(4)Test snapshot with a NULL container handle self.log.info("==(4)Snapshot with a NULL container handle.") if self.invalid_snapshot_test(None, epoch): self.log.info("==>Negative test 2, expecting failed on taking " "snapshot on a NULL container.coh.") else: self.fail("##(4)Negative test 2 passing, expecting failed on " "taking snapshot with a NULL container.coh.") #(5)Test snapshot with an invalid epoch self.log.info("==(5)Snapshot with a NULL epoch.") if self.invalid_snapshot_test(self.container.coh, None): self.log.info("==>Negative test 3, expecting failed on taking " "snapshot with a NULL epoch.") else: self.fail("##(5)Negative test 3 passing, expecting failed on " "taking snapshot with a NULL epoch.") #(6)DAOS-1392 destroy snapshot with an invalid handle self.log.info( "==(6)DAOS-1392 destroy snapshot with an invalid handle.") try: snapshot.destroy(None, epoch) self.fail("##(6)Negative test destroy snapshot with an " "invalid coh handle, expected fail, shown Passing##") except Exception as error: self.log.info( "==>Negative test, destroy snapshot with an invalid handle.") self.log.info(" Expected Error: %s", str(error)) expected_error = "RC: -1002" if expected_error not in str(error): self.fail("##(6.1)Expecting error RC: -1002 did not show.") #(7)DAOS-1388 Verify snap_list bad parameter behavior self.log.info( "==(7)DAOS-1388 Verify snap_list bad parameter behavior.") try: snapshot.list(None, 0) self.fail("##(7)Negative test snapshot list with an " "invalid coh and epoch, expected fail, shown Passing##") except Exception as error: self.log.info( "==>Negative test, snapshot list with an invalid coh.") self.log.info(" Expected Error: %s", str(error)) expected_error = "RC: -1002" if expected_error not in str(error): self.fail("##(7.1)Expecting error RC: -1002 did not show.") def test_snapshots(self): """ Test ID: DAOS-1386 Test container SnapShot information DAOS-1371 Test list snapshots DAOS-1395 Test snapshot destroy DAOS-1402 Test creating multiple snapshots Test Description: (1)Create an object, write random data into it, and take a snapshot. (2)Make changes to the data object. The write_an_obj function does a commit when the update is complete. (3)Verify the data in the snapshot is the original data. Get a handle for the snapshot and read the object at dkey, akey. Compare it to the originally written data. (4)List the snapshot and make sure it reflects the original epoch. ==>Repeat step(1) to step(4) for multiple snapshot tests. (5)Verify the snapshots data. (6)Destroy the snapshot. (7)Check if still able to Open the destroyed snapshot and Verify the snapshot removed from the snapshot list. Use Cases: Require 1 client and 1 server to run snapshot test. 1 pool and 1 container is used, num_of_snapshot defined in the snapshot.yaml will be performed and verified. :avocado: tags=snap,snapshots """ coh_list = [] container_epoch_list = [] snapshot_list = [] test_data = [] snapshot_index = 0 obj_cls = self.params.get("obj_class", '/run/object_class/*') akey = self.params.get("akey", '/run/snapshot/*', default="akey") dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey") data_size = self.params.get("test_datasize", '/run/snapshot/*', default=150) snapshot_loop = self.params.get("num_of_snapshot", '/run/snapshot/*', default=10) rand_str = lambda n: ''.join( [random.choice(string.lowercase) for i in xrange(n)]) # #Test loop for creat, modify and snapshot object in the DAOS container. # while snapshot_index < snapshot_loop: #(1)Create an object, write some data into it, and take a snapshot #size = random.randint(1, 100) + 1 snapshot_index += 1 thedata = "--->>>Happy Daos Snapshot Testing " + \ str(snapshot_index) + \ "<<<---" + rand_str(random.randint(1, data_size)) datasize = len(thedata) + 1 try: obj, epoch = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls) obj.close() except DaosApiError as error: self.fail("##Test failed during the initial object write: %s", str(error)) #Take a snapshot of the container snapshot = DaosSnapshot(self.context) snapshot.create(self.container.coh, epoch) self.log.info("==Wrote an object and created a snapshot") #Display snapshot substep = "1." + str(snapshot_index) self.log.info("==(1)Test step %s", substep) self.log.info("==self.container epoch= %s", epoch) self.log.info("==snapshot.epoch= %s", snapshot.epoch) self.display_snapshot(snapshot) #Save snapshot test data coh_list.append(self.container.coh) container_epoch_list.append(epoch) snapshot_list.append(snapshot) test_data.append(thedata) #(2)Make changes to the data object. The write_an_obj function does # a commit when the update is complete more_transactions = 100 self.log.info( "==(2)Committing %d additional transactions to " "the same KV.", more_transactions) while more_transactions: size = random.randint(1, 250) + 1 new_data = rand_str(size) try: new_obj, _ = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() except Exception as error: self.fail( "##Test failed during the write of multi-objects: %s", str(error)) more_transactions -= 1 #(3)Verify the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey # Compare it to the originally written data. self.log.info("==(3)snapshot test loop: %s", snapshot_index) try: obj.open() snap_handle = snapshot.open(self.container.coh, snapshot.epoch) thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj, snap_handle.value) except Exception as error: self.fail("##Error when retrieving the snapshot data: %s", str(error)) self.log.info("==container_epoch= %s", epoch) self.log.info("==snapshot_list[ind]=%s", snapshot) self.log.info("==snapshot.epoch= %s", snapshot.epoch) self.log.info("==written thedata size= %s", len(thedata) + 1) self.log.info("==written thedata=%s", thedata) self.log.info("==thedata3.value= %s", thedata3.value) if thedata3.value != thedata: raise Exception("##The data in the snapshot is not the " "same as the original data") self.log.info("==The snapshot data matches the data originally" " written.") #(4)List the snapshot and make sure it reflects the original epoch try: reported_epoch = snapshot.list(self.container.coh, epoch) except Exception as error: self.fail("##Test was unable to list the snapshot: %s", str(error)) self.log.info("==(4)List snapshot reported_epoch=%s", reported_epoch) self.log.info(" snapshot.epoch=%s", snapshot.epoch) ##self.log.info("tickets already assigned DAOS-2390 DAOS-2392") #if snapshot.epoch != reported_epoch: # raise Exception("##The snapshot epoch returned from " # "snapshot list is not the same as the original" # "epoch list is not the same as the original epoch" # "snapshotted.") self.log.info("==After 10 additional commits the snapshot is " "still available") #(5)Verify the snapshots data for ind in range(0, len(container_epoch_list)): epoch = container_epoch_list[ind] current_ss = snapshot_list[ind] datasize = len(test_data[ind]) + 1 try: obj.open() snap_handle = snapshot.open(self.container.coh, current_ss.epoch) except Exception as error: self.fail("##Error when retrieving the snapshot data: %s", str(error)) ##self.log.info("tickets already assigned DAOS-2484 and DAOS-2557") #thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj, # snap_handle.value) #self.log.info("==(5)snapshot test list %s:".format(ind+1)) #self.log.info("==container_epoch_list[ind]=%s"\ # .format(epoch)) #self.log.info("==snapshot_list[ind]=%s"\ # .format(snapshot_list[ind])) #self.log.info("==snapshot_list[ind].epoch=%s"\ # .format( current_ss.epoch)) #self.log.info("==test_data_size= %s".format(datasize)) #self.log.info("==thedata3.value= %s"\ # .format(thedata3.value)) #self.log.info("==test_data[ind]= %s"\ # .format( test_data[ind])) #if thedata3.value != test_data[ind]: # raise Exception("##The data in the snapshot is not " # "same as the original data") #self.log.info("The snapshot data matches the data originally " # "written.") #(6)Destroy the snapshot self.log.info("==(6)Destroy snapshot epoch: %s", epoch) try: snapshot.destroy(self.container.coh, epoch) self.log.info("==Snapshot successfully destroyed") except Exception as error: self.fail("##Error on snapshot.destroy: %s", str(error)) #(7)Check if still able to Open the destroyed snapshot and # Verify the snapshot removed from the snapshot list try: obj.open() snap_handle3 = snapshot.open(self.container.coh, snapshot.epoch) thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj, snap_handle3.value) except Exception as error: self.fail("##(7)Error when retrieving the 2nd snapshot data: %s", str(error)) self.log.info("-->thedata_after_snapshot.destroyed.value= %s", thedata3.value) self.log.info("==>snapshot_epoch= %s", snapshot.epoch) self.log.info("-->snapshot.list(self.container.coh, epoch)=%s", snapshot.list(self.container.coh, epoch)) #self.cancel("tickets already assigned DAOS-2390 DAOS-2392") #Still able to open the snapshot and read data after destroyed. self.log.info("==(7)DAOS container SnapshotInfo test passed") # Now destroy the snapshot try: snapshot.destroy(self.container.coh) self.log.info("==Snapshot successfully destroyed") except Exception as error: self.fail("##Error on snapshot.destroy: %s", str(error))
class PunchTest(TestWithServers): """ Simple test to verify the 3 different punch calls. :avocado: recursive """ def setUp(self): try: super(PunchTest, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: super(PunchTest, self).tearDown() def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(txn, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as dummy_e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as dummy_e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, txn = self.container.write_multi_akeys(dkey, data1, obj_cls=1) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, txn) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(txn, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as excep: self.fail("Punch should have worked.\n") def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(txn) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: obj.punch(0) # expecting it to work without a tx except DaosApiError as excep: print(excep) self.fail("Punch should have worked.\n")
def test_bad_handle(self): """ Test ID: DAOS-1376 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=object,objupdate,objbadhand,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 dkey = "this is the dkey" akey = "this is the akey" obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) saved_oh = obj.obj_handle obj.obj_handle = 99999 obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, obj, None, 2) container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n")
class ObjectDataValidation(avocado.Test): """ Test Class Description: Tests that create Different length records, Disconnect the pool/container and reconnect, validate the data after reconnect. """ # pylint: disable=too-many-instance-attributes def setUp(self): self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("server_group", '/server/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') ServerUtils.runServer(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: ServerUtils.stopServer(hosts=self.hostlist) def reconnect(self): ''' Function to reconnect the pool/container and reopen the Object for read verification. ''' #Close the Obj/Container, Disconnect the Pool. self.obj.close() self.container.close() self.pool.disconnect() time.sleep(5) #Connect Pool, Open Container and Object self.pool.connect(2) self.container.open() self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) @avocado.fail_on(DaosApiError) def test_single_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify single data after pool/container disconnect/reconnect. :avocado: tags=single_object,data_verification,medium,vm """ self.d_log.info("Writing the Single Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = "{0}".format( str(akey)[0]) * self.record_length[record_index] c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_value = ctypes.create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) new_transaction = self.container.get_new_tx() self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Single Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1) if indata != (repr(val.value)[1:-1]): self.d_log.error( "ERROR:Data mismatch for dkey = {0}, akey={1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail( "ERROR: Data mismatch for dkey = {0}, akey={1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 @avocado.fail_on(DaosApiError) def test_array_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify Array data after pool/container disconnect/reconnect. :avocado: tags=array_object,data_verification,array,medium,vm """ self.d_log.info("Writing the Array Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): c_values = [] value = "{0}".format( str(akey)[0]) * self.record_length[record_index] for item in range(self.array_size): c_values.append( (ctypes.create_string_buffer(value), len(value) + 1)) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) new_transaction = self.container.get_new_tx() self.ioreq.insert_array(c_dkey, c_akey, c_values, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Array Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = [] value = "{0}".format( str(akey)[0]) * self.record_length[record_index] for item in range(self.array_size): indata.append(value) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_rec_count = ctypes.c_uint(len(indata)) c_rec_size = ctypes.c_size_t(len(indata[0]) + 1) outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count, c_rec_size) for item in enumerate(indata): if indata[item[0]] != outdata[item[0]][:-1]: self.d_log.error( "ERROR:Data mismatch for dkey = {0}, akey={1}". format("dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail( "ERROR:Data mismatch for dkey = {0}, akey={1}". format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0
class ContainerAsync(Test): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: tags=container,containercreate2,connectpermission """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group",'/server/','daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL = None hostlist = self.params.get("test_machines",'/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) print("Host file is: {}".format(self.hostfile)) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(10) def tearDown(self): if self.hostfile is not None: os.remove(self.hostfile) if self.POOL is not None and self.POOL.attached: self.POOL.destroy(1) ServerUtils.stopServer() def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=container,containerasync,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) GLOB_SIGNAL = threading.Event() self.Container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.POOL.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1005: self.fail("RC not as expected in async test") print ("RC after Container create failed:", GLOB_RC) # cleanup the Pool and Container self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=container,containerasync,destroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) GLOB_SIGNAL = threading.Event() self.Container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=container,containerasync,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh GLOB_SIGNAL = threading.Event() self.Container1.open(poh, cuuid, 2, coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.open(None, None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.close() self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=container,containerasync.closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh self.Container1.open(poh, cuuid, 2, coh) GLOB_SIGNAL = threading.Event() self.Container1.close(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after successful Container create: " , GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.close(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1002: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=container,containerasync,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh # Open Container self.Container1.open(poh, None, 2, None, coh) GLOB_SIGNAL = threading.Event() self.Container1.query(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after successful Container create: " , GLOB_RC) # Close opened Container self.Container1.close(coh) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.query(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1002: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc()
class TestContainer(TestDaosApiBase): """A class for functional testing of DaosContainer objects.""" def __init__(self, pool, cb_handler=None): """Create a TeestContainer object. Args: pool (TestPool): the test pool in which to create the container cb_handler (CallbackHandler, optional): callback object to use with the API methods. Defaults to None. """ super(TestContainer, self).__init__(cb_handler) self.pool = pool self.log = self.pool.log self.object_qty = TestParameter(None) self.record_qty = TestParameter(None) self.akey_size = TestParameter(None) self.dkey_size = TestParameter(None) self.data_size = TestParameter(None) self.container = None self.uuid = None self.opened = False self.written_data = [] def get_params(self, test, path="/run/container/*"): """Get the container parameters from the yaml file. Args: test (Test): avocado Test object path (str, optional): yaml namespace. Defaults to "/run/container/*". """ super(TestContainer, self).get_params(test, path) @fail_on(DaosApiError) def create(self, uuid=None): """Create a container. Args: uuid (str, optional): contianer uuid. Defaults to None. """ self.destroy() self.log.info("Creating a container") self.container = DaosContainer(self.pool.context) self.container.create(self.pool.pool.handle, uuid) self.uuid = self.container.get_uuid_str() @fail_on(DaosApiError) def open(self): """Open the container. Returns: bool: True if the container has been opened; False if the container is already opened. """ if not self.opened: self.log.info("Opening container %s", self.uuid) self.container.open() self.opened = True return True return False @fail_on(DaosApiError) def close(self): """Close the container. Returns: bool: True if the container has been closed; False if the container is already closed. """ if self.opened: self.log.info("Closing container %s", self.uuid) self.container.close() self.opened = False return True return False @fail_on(DaosApiError) def destroy(self, force=1): """Destroy the container. Args: force (int, optional): force flag. Defaults to 1. Returns: bool: True if the container has been destroyed; False if the container does not exist. """ if self.container: self.close() self.log.info("Destroying container %s", self.uuid) self.container.destroy(force) self.container = None self.written_data = [] return True return False @fail_on(DaosTestError) def write_objects(self, rank=None, obj_class=None): """Write objects to the container. Args: rank (int, optional): server rank. Defaults to None. obj_class (int, optional): daos object class. Defaults to None. """ self.open() self.log.info("Writing objects in container %s", self.uuid) for _ in range(self.object_qty.value): self.written_data.append(TestContainerData()) self.written_data[-1].write_object( self, self.record_qty.value, self.akey_size.value, self.dkey_size.value, self.data_size.value, rank, obj_class) @fail_on(DaosTestError) def read_objects(self): """Read the objects from the container and verify they match. Returns: bool: True if """ self.open() self.log.info("Reading objects in container %s", self.uuid) status = len(self.written_data) > 0 for data in self.written_data: status &= data.read_object(self) return status
class PunchTest(Test): """ Simple test to verify the 3 different punch calls. """ def setUp(self): try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile( self.hostlist, self.workdir) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.Context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: ServerUtils.stopServer(hosts=self.hostlist) def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, tx = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, tx) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(tx, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, tx = self.container.write_multi_akeys(dkey, data1, obj_cls=1) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, tx) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(tx, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as e: print(e) pass try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as e: self.fail("Punch should have worked.\n") def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, tx = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, tx) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(tx) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as e: print(e) pass try: obj.punch(0) # expecting it to work without a tx except DaosApiError as e: print(e) self.fail("Punch should have worked.\n")
class OpenContainerTest(TestWithServers): """ Tests DAOS container bad create (non existing pool handle, bad uuid) and close. :avocado: recursive """ def __init__(self, *args, **kwargs): super(OpenContainerTest, self).__init__(*args, **kwargs) self.pool1 = None self.pool2 = None self.container1 = None self.container2 = None def setUp(self): super(OpenContainerTest, self).setUp() # common parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # pool 1 UID GID self.createuid1 = self.params.get("uid", '/run/createtests/createuid1/') self.creategid1 = self.params.get("gid", '/run/createtests/creategid1/') # pool 2 UID GID self.createuid2 = self.params.get("uid", '/run/createtests/createuid2/') self.creategid2 = self.params.get("gid", '/run/createtests/creategid2/') def tearDown(self): try: if self.container1 is not None: self.container1.destroy() if self.container2 is not None: self.container2.destroy() if self.pool1 is not None and self.pool1.attached: self.pool1.destroy(1) if self.pool2 is not None and self.pool2.attached: self.pool2.destroy(1) finally: super(OpenContainerTest, self).tearDown() def test_container_open(self): """ Test basic container bad create. :avocado: tags=container,containeropen """ container_uuid = None expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/') container_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/handles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # create two pools and try to create containers in these pools self.pool1 = DaosPool(self.context) self.pool1.create(self.createmode, self.createuid1, self.creategid1, self.createsize, self.createsetid, None) self.pool2 = DaosPool(self.context) self.pool2.create(self.createmode, self.createuid2, self.creategid2, self.createsize, None, None) # Connect to the pools self.pool1.connect(1 << 1) self.pool2.connect(1 << 1) # defines pool handle for container open if pohlist[0] == 'pool1': poh = self.pool1.handle else: poh = self.pool2.handle # Create a container in pool1 self.container1 = DaosContainer(self.context) self.container1.create(self.pool1.handle) # defines test UUID for container open if uuidlist[0] == 'pool1': struuid = self.container1.get_uuid_str() container_uuid = uuid.UUID(struuid) else: if uuidlist[0] == 'MFUUID': container_uuid = "misformed-uuid-0000" else: container_uuid = uuid.uuid4() # random uuid # tries to open the container1 # open should be ok only if poh = pool1.handle && # containerUUID = container1.uuid self.container1.open(poh, container_uuid) # wait a few seconds and then destroy containers time.sleep(5) self.container1.close() self.container1.destroy() self.container1 = None # cleanup the pools self.pool1.disconnect() self.pool1.destroy(1) self.pool1 = None self.pool2.disconnect() self.pool2.destroy(1) self.pool2 = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if self.hostfile is not None: os.remove(self.hostfile)
class PunchTest(Test): """ Simple test to verify the 3 different punch calls. """ def setUp(self): self.agent_sessions = None try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(txn, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as dummy_e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as dummy_e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, txn = self.container.write_multi_akeys(dkey, data1, obj_cls=1) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys(dkey, readbuf, obj, txn) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(txn, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as excep: self.fail("Punch should have worked.\n") def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(txn) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: obj.punch(0) # expecting it to work without a tx except DaosApiError as excep: print(excep) self.fail("Punch should have worked.\n")
class ObjOpenBadParam(Test): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. """ def __init__(self, *args, **kwargs): """ Initialize values for variables that are used in tearDown() such that if setUp() fails for any reason, tearDown() will avoid throwing an AttributeError exception. """ super(ObjOpenBadParam, self).__init__(*args, **kwargs) self.container = None self.pool = None def setUp(self): self.agent_sessions = None self.hostlist = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def tearDown(self): try: self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=object,objopen,objopenbadhand,regression,vm,small """ saved_handle = self.obj.obj_handle self.obj.obj_handle = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=object,objopen,objopenbadconthand,regression,vm,small """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=object,objopen,objopenclosedcont,regression,vm,small """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=object,objopen,objopenpoolhandle,regression,vm,small """ saved_oh = self.obj.obj_handle self.obj.obj_handle = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=object,objopen,objopennullrl,regression,vm,small """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=object,objopen,objopennulloid,regression,vm,small """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=object,objopen,objopennulltgts,regression,vm,small """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=object,objopen,objopennullattr,regression,vm,small """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
class ObjFetchBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_fetch function. :avocado: recursive """ def setUp(self): super(ObjFetchBadParam, self).setUp() time.sleep(5) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj( thedata, self.datasize, self.dkey, self.akey, None, None, 2) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) self.fail("Error reading back data, test failed during"\ " the initial setup.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.\n") def test_bad_handle(self): """ Test ID: DAOS-1377 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=all,object,full_regression,small,objbadhandle """ try: # trash the handle and read again saved_oh = self.obj.obj_handle self.obj.obj_handle = 99999 # expecting this to fail with -1002 dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch) self.container.oh = saved_oh self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: self.container.oh = saved_oh if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n") def test_null_ptrs(self): """ Test ID: DAOS-1377 Test Description: Pass null pointers for various fetch parameters. :avocado: tags=all,object,full_regression,small,objfetchnull """ try: # now try it with a bad dkey, expecting this to fail with -1003 dummy_thedata2 = self.container.read_an_obj( self.datasize, None, self.akey, self.obj, self.epoch) self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # now try it with a null sgl (iod_size is not set) # expecting this to fail with -2013 test_hints = ['sglnull'] dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch, test_hints) # behavior not as expect so commented out for now # when DAOS-1448 is complete, uncomment and retest self.fail("Test was expected to return a -2013 but it has not.\n") except DaosApiError as excep: if '-2013' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -2013 but it has not.\n") try: # when DAOS-1449 is complete, uncomment and retest # now try it with a null iod, expecting this to fail with -1003 #test_hints = ['iodnull'] #thedata2 = self.container.read_an_obj(self.datasize, dkey, akey, # self.obj, self.epoch, test_hints) pass #self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n")
class BasicSnapshot(Test): """ DAOS-1370 Basic snapshot test Test Class Description: Test that a snapshot taken of a container remains unchaged even after an object in the container has been updated 500 times. Create the container. Write an object to the container. Take a snapshot. Write 500 changes to the KV pair of the object. Check that the snapshot is still there. Confirm that the data in the snapshot is unchanged. Destroy the snapshot """ def __init__(self, *args, **kwargs): super(BasicSnapshot, self).__init__(*args, **kwargs) self.snapshot = None def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as finput: build_paths = json.load(finput) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) server_utils.run_server(hostfile, server_group, basepath) # Set up the pool and container. try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/*') createuid = os.geteuid() creategid = os.getegid() # initialize a pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as error: print(error) print(traceback.format_exc()) self.fail("Test failed before snapshot taken") def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: server_utils.stop_server() def test_basic_snapshot(self): """ Test ID: DAOS-1370 Test Description: Create a pool, container in the pool, object in the container, add one key:value to the object. Commit the transaction. Perform a snapshot create on the container. Create 500 additional transactions with a small change to the object in each and commit each after the object update is done. Verify the snapshot is still available and the contents remain in their original state. :avocado: tags=snapshot,basicsnap """ try: # create an object and write some data into it obj_cls = self.params.get("obj_class", '/run/object_class/*') thedata = "Now is the winter of our discontent made glorious" datasize = len(thedata) + 1 dkey = "dkey" akey = "akey" obj, epoch = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls) obj.close() # Take a snapshot of the container self.snapshot = DaosSnapshot(self.context) self.snapshot.create(self.container.coh, epoch) print("Wrote an object and created a snapshot") except DaosApiError as error: self.fail("Test failed during the initial object write.\n{0}" .format(error)) # Make 500 changes to the data object. The write_an_obj function does a # commit when the update is complete try: rand_str = lambda n: ''.join([random.choice(string.lowercase) for i in xrange(n)]) print("Committing 500 additional transactions to the same KV") more_transactions = 500 while more_transactions: size = random.randint(1, 250) + 1 new_data = rand_str(size) new_obj, _ = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() more_transactions -= 1 except Exception as error: self.fail("Test failed during the write of 500 objects.\n{0}" .format(error)) # List the snapshot and make sure it reflects the original epoch try: reported_epoch = self.snapshot.list(self.container.coh) if self.snapshot.epoch != reported_epoch: raise Exception("The snapshot epoch returned from snapshot " "list is not the same as the original epoch " "snapshotted.") print("After 500 additional commits the snapshot is still " "available") except Exception as error: self.fail("Test was unable to list the snapshot\n{0}" .format(error)) # Make sure the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey. # Compare it to the originally written data. try: obj.open() snap_handle = self.snapshot.open(self.container.coh) thedata2 = self.container.read_an_obj(datasize, dkey, akey, obj, snap_handle.value) if thedata2.value != thedata: raise Exception("The data in the snapshot is not the same as " "the original data") print("The snapshot data matches the data originally written.") except Exception as error: self.fail("Error when retrieving the snapshot data.\n{0}" .format(error)) # Now destroy the snapshot try: self.snapshot.destroy(self.container.coh) print("Snapshot successfully destroyed") except Exception as error: self.fail("{0}".format(error))
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=object,arrayobj,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool_params/createmode/') createsetid = self.params.get("setname", '/run/pool_params/createset/') createsize = self.params.get("size", '/run/pool_params/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid, epoch = container.write_an_array_value(thedata, dkey, akey, obj_cls=3) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length+1, dkey, akey, oid, epoch) if thedata[0][0:length-1] != thedata2[0][0:length-1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[0]) self.plog.error("Read: >%s<", thedata2[0]) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length-1] != thedata2[2][0:length-1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[2]) self.plog.error("Read: >%s<", thedata2[2]) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() # cleanup the pool pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") except DaosApiError as excep: self.plog.error("Test Failed, exception was thrown.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
def test_tx_basics(self): """ Perform I/O to an object in a container in 2 different transactions, verifying basic I/O and transactions in particular. NOTE: this was an epoch test and all I did was get it working with tx Not a good test at this point, need to redesign when tx is fully working. :avocado: tags=container,tx,basictx """ pool = None try: # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = 45 dkey = "this is the dkey" akey = "this is the akey" oid, txn = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) # read the data back and make sure its correct thedata2 = container.read_an_obj(thedatasize, dkey, akey, oid, txn) if thedata != thedata2.value: print("thedata>" + thedata) print("thedata2>" + thedata2.value) self.fail("Write data 1, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" thedatasize2 = 19 # note using the same keys so writing to the same spot dkey = "this is the dkey" akey = "this is the akey" oid, tx2 = container.write_an_obj(thedata3, thedatasize2, dkey, akey, oid, None, 2) # read the data back and make sure its correct thedata4 = container.read_an_obj(thedatasize2, dkey, akey, oid, tx2) if thedata3 != thedata4.value: self.fail("Write data 2, read it back, didn't match\n") # transactions generally don't work this way but need to explore # an alternative to below code once model is complete, maybe # read from a snapshot or read from TX_NONE etc. # the original data should still be there too #thedata5 = container.read_an_obj(thedatasize, dkey, akey, # oid, transaction) #if thedata != thedata5.value: # self.fail("Write data 3, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1)
class SameKeyDifferentValue(TestWithServers): """ Test Description: Test to verify different type of values passed to same akey and dkey. :avocado: recursive """ def setUp(self): try: super(SameKeyDifferentValue, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: super(SameKeyDifferentValue, self).tearDown() def test_single_to_array_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e. single to array value) to the same akey and dkey. Case1: Insert akey,dkey with single value Insert same akey,dkey with array value Result: should return -1001 ERR. Case2: Insert akey,dkey with single value Punch the keys Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with single value Punch the keys Trigger aggregation Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,singletoarray,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write single value data into it obj, txn = self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct read_back_data = self.container.read_an_obj( len(single_value_data) + 1, dkey, akey, obj, txn) if single_value_data != read_back_data.value: print("data I wrote:" + single_value_data) print("data I read back" + read_back_data.value) self.fail("Write data, read it back, didn't match\n") # test case 1 if i == 0: try: # write array value data to same keys, expected to fail self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Array value write to existing single value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with array value type self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object") def test_array_to_single_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e array to single value) to the same akey and dkey. Case1: Insert akey,dkey with array value Insert same akey,dkey with single value Result: should return -1001 ERR. Case2: Insert akey,dkey with array value Punch the keys Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with array value Punch the keys Trigger aggregation Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,arraytosingle,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write array value data into it obj, txn = self.container.write_an_array_value( array_value_data, dkey, akey, obj_cls=1) # read the data back and make sure its correct length = len(array_value_data[0]) read_back_data = self.container.read_an_array( len(array_value_data), length + 1, dkey, akey, obj, txn) for j in range(3): if (array_value_data[j][0:length - 1] != read_back_data[j][0:length - 1]): print("Written Data: {}".format(array_value_data[j])) print("Read Data: {}".format(read_back_data[j])) self.fail("Data mismatch\n") # test case 1 if i == 0: try: # write single value data to same keys, expected to fail self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Single value write to existing array value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with single value type self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object")
class ObjectDataValidation(avocado.Test): """ Test Class Description: Tests that create Different length records, Disconnect the pool/container and reconnect, validate the data after reconnect. """ # pylint: disable=too-many-instance-attributes def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def reconnect(self): ''' Function to reconnect the pool/container and reopen the Object for read verification. ''' #Close the Obj/Container, Disconnect the Pool. self.obj.close() self.container.close() self.pool.disconnect() time.sleep(5) #Connect Pool, Open Container and Object self.pool.connect(2) self.container.open() self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) @avocado.fail_on(DaosApiError) def test_single_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify single data after pool/container disconnect/reconnect. :avocado: tags=single_object,data_verification,medium,vm """ self.d_log.info("Writing the Single Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_value = ctypes.create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) new_transaction = self.container.get_new_tx() self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Single Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata)+1) if indata != (repr(val.value)[1:-1]): self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}" .format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 @avocado.fail_on(DaosApiError) def test_array_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify Array data after pool/container disconnect/reconnect. :avocado: tags=array_object,data_verification,array,medium,vm """ self.d_log.info("Writing the Array Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): c_values = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): c_values.append((ctypes.create_string_buffer(value), len(value)+1)) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) new_transaction = self.container.get_new_tx() self.ioreq.insert_array(c_dkey, c_akey, c_values, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Array Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): indata.append(value) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_rec_count = ctypes.c_uint(len(indata)) c_rec_size = ctypes.c_size_t(len(indata[0]) + 1) outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count, c_rec_size) for item in enumerate(indata): if indata[item[0]] != outdata[item[0]][:-1]: self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail("ERROR:Data mismatch for dkey = {0}, akey={1}" .format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=object,arrayobj,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() print("uid is {} gid is {}".format(createuid, creategid)) # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.Context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pl.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.Context) container.create(pool.handle) self.pl.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid, epoch = container.write_an_array_value(thedata, dkey, akey) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length + 1, dkey, akey, oid, epoch) if thedata[0][0:length - 1] != thedata2[0][0:length - 1]: self.pl.error("Data mismatch") self.pl.error("Wrote: >%s<" (thedata[0])) self.pl.error("Read: >%s<" (thedata2[0])) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length - 1] != thedata2[2][0:length - 1]: self.pl.error("Data mismatch") self.pl.error("Wrote: >%s<" (thedata[2])) self.pl.error("Read: >%s<" (thedata2[2])) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() # cleanup the pool pool.disconnect() pool.destroy(1) self.pl.info("Test Complete") except ValueError as e: self.pl.error("Test Failed, exception was thrown.") print e print traceback.format_exc() self.fail("Test was expected to pass but it failed.\n")
class ObjOpenBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. :avocado: recursive """ def setUp(self): super(ObjOpenBadParam, self).setUp() try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def tearDown(self): try: self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) finally: super(ObjOpenBadParam, self).tearDown() def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=object,objopen,objopenbadhand,regression,vm,small """ saved_handle = self.obj.obj_handle self.obj.obj_handle = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=object,objopen,objopenbadconthand,regression,vm,small """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=object,objopen,objopenclosedcont,regression,vm,small """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=object,objopen,objopenpoolhandle,regression,vm,small """ saved_oh = self.obj.obj_handle self.obj.obj_handle = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=object,objopen,objopennullrl,regression,vm,small """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=object,objopen,objopennulloid,regression,vm,small """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=object,objopen,objopennulltgts,regression,vm,small """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=object,objopen,objopennullattr,regression,vm,small """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
class DeleteContainerTest(Test): """ Tests DAOS container delete and close. """ def setUp(self): self.agent_sessions = None # get paths from the build_vars generated by build self.hostlist = None with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createuid = os.geteuid() self.creategid = os.getegid() self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = None self.container = None # hostlist and logging self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_container_delete(self): """ Test basic container delete :avocado: tags=regression,cont,vm,contdelete """ expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/ContainerUUIDS/*/') cont_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) openlist = self.params.get("opened", "/run/createtests/ConnectionOpened/*/") opened = openlist[0] expected_for_param.append(openlist[1]) forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/") force = forcelist[0] expected_for_param.append(forcelist[1]) if force >= 1: self.cancel("Force >= 1 blocked by issue described in " "https://jira.hpdd.intel.com/browse/DAOS-689") if force == 0: self.cancel("Force = 0 blocked by " "https://jira.hpdd.intel.com/browse/DAOS-1935") expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid, None) # need a connection to create container self.pool.connect(1 << 1) self.container = DaosContainer(self.context) # create should always work (testing destroy) if not cont_uuid == 'INVALID': cont_uuid = uuid.UUID(uuidlist[0]) self.container.create(self.pool.handle, cont_uuid) else: self.container.create(self.pool.handle) # Opens the container if required if opened: self.container.open(self.pool.handle) # wait a few seconds and then attempts to destroy container time.sleep(5) if poh == 'VALID': poh = self.pool.handle # if container is INVALID, overwrite with non existing UUID if cont_uuid == 'INVALID': cont_uuid = uuid.uuid4() self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid) self.container = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.d_log.error(excep) self.d_log.error(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: # clean up the pool if self.pool is not None: self.pool.destroy(1) self.pool = None
def test_null_values(self): """ Test ID: DAOS-1376 Test Description: Pass a dkey and an akey that is null. :avocado: tags=all,object,full_regression,small,objupdatenull """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # data used in the test thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during setup .\n") try: # try using a null dkey dkey = None akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # try using a null akey/io descriptor dkey = "this is the dkey" akey = None container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # lastly try passing no data thedata = None thedatasize = 0 dkey = "this is the dkey" akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.plog.info("Update with no data worked") except DaosApiError as excep: container.close() container.destroy() pool.disconnect() pool.destroy(1) print(excep) print(traceback.format_exc()) self.plog.error("Update with no data failed") self.fail("Update with no data failed.\n") container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete")
class BasicSnapshot(Test): """ DAOS-1370 Basic snapshot test Test Class Description: Test that a snapshot taken of a container remains unchaged even after an object in the container has been updated 500 times. Create the container. Write an object to the container. Take a snapshot. Write 500 changes to the KV pair of the object. Check that the snapshot is still there. Confirm that the data in the snapshot is unchanged. Destroy the snapshot """ def __init__(self, *args, **kwargs): super(BasicSnapshot, self).__init__(*args, **kwargs) self.snapshot = None def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as finput: build_paths = json.load(finput) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) server_utils.run_server(hostfile, server_group, basepath) # Set up the pool and container. try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/*') createuid = os.geteuid() creategid = os.getegid() # initialize a pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as error: print(error) print(traceback.format_exc()) self.fail("Test failed before snapshot taken") def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: server_utils.stop_server() def test_basic_snapshot(self): """ Test ID: DAOS-1370 Test Description: Create a pool, container in the pool, object in the container, add one key:value to the object. Commit the transaction. Perform a snapshot create on the container. Create 500 additional transactions with a small change to the object in each and commit each after the object update is done. Verify the snapshot is still available and the contents remain in their original state. :avocado: tags=snapshot,basicsnap """ try: # create an object and write some data into it obj_cls = self.params.get("obj_class", '/run/object_class/*') thedata = "Now is the winter of our discontent made glorious" datasize = len(thedata) + 1 dkey = "dkey" akey = "akey" obj, epoch = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls) obj.close() # Take a snapshot of the container self.snapshot = DaosSnapshot(self.context) self.snapshot.create(self.container.coh, epoch) print("Wrote an object and created a snapshot") except DaosApiError as error: self.fail( "Test failed during the initial object write.\n{0}".format( error)) # Make 500 changes to the data object. The write_an_obj function does a # commit when the update is complete try: rand_str = lambda n: ''.join( [random.choice(string.lowercase) for i in xrange(n)]) print("Committing 500 additional transactions to the same KV") more_transactions = 500 while more_transactions: size = random.randint(1, 250) + 1 new_data = rand_str(size) new_obj, _ = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() more_transactions -= 1 except Exception as error: self.fail( "Test failed during the write of 500 objects.\n{0}".format( error)) # List the snapshot and make sure it reflects the original epoch try: reported_epoch = self.snapshot.list(self.container.coh) if self.snapshot.epoch != reported_epoch: raise Exception("The snapshot epoch returned from snapshot " "list is not the same as the original epoch " "snapshotted.") print("After 500 additional commits the snapshot is still " "available") except Exception as error: self.fail( "Test was unable to list the snapshot\n{0}".format(error)) # Make sure the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey. # Compare it to the originally written data. try: obj.open() snap_handle = self.snapshot.open(self.container.coh) thedata2 = self.container.read_an_obj(datasize, dkey, akey, obj, snap_handle.value) if thedata2.value != thedata: raise Exception("The data in the snapshot is not the same as " "the original data") print("The snapshot data matches the data originally written.") except Exception as error: self.fail( "Error when retrieving the snapshot data.\n{0}".format(error)) # Now destroy the snapshot try: self.snapshot.destroy(self.container.coh) print("Snapshot successfully destroyed") except Exception as error: self.fail("{0}".format(error))
def test_bad_handle(self): """ Test ID: DAOS-1376 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=all,object,full_regression,small,objbadhand """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 dkey = "this is the dkey" akey = "this is the akey" obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) saved_oh = obj.obj_handle obj.obj_handle = 99999 obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, obj, None, 2) container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n")
class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. :avocado: recursive """ def setUp(self): super(CreateManyDkeys, self).setUp() self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.pool: self.pool.destroy(1) finally: super(CreateManyDkeys, self).tearDown() def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_tx() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_tx(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format( key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data) + 1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n".format( "dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @skipForTicket("DAOS-1721") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=all,full,small,object,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class ContainerAsync(Test): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def setUp(self): self.agent_sessions = None self.hostlist = None self.pool = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) print("Host file is: {}".format(self.hostfile)) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) time.sleep(10) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) time.sleep(5) server_utils.stop_server(hosts=self.hostlist) def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=container,containerasync,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=container,containerasync,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=container,containerasync,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.close() self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=container,containerasync,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=container,containerasync,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
class OpenClose(Test): """ Tests DAOS container open/close function with handle parameter. """ def __init__(self, *args, **kwargs): super(OpenClose, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def setUp(self): # these are first since they are referenced in teardown self.pool = None self.hostlist = None self.hostlist = self.params.get("test_servers", '/run/hosts/') # get paths from the build_vars generated by build with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../../.build_vars.json')) as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) except server_utils.ServerFailed: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.container1.coh if coh_params[0] == 'GOOD': coh = self.container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.container2 = DaosContainer(self.context) self.container2.create(poh) self.container2.open(poh, cuuid, 2, None) coh = self.container2.coh self.container2.close() self.container2.destroy() # close container with either good or bad handle self.container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: if expected_result == 'PASS': print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.container1.close(saved_coh) finally: self.container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
class OpenClose(Test): """ Tests DAOS container open/close function with handle parameter. """ def setUp(self): # get paths from the build_vars generated by build with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../../.build_vars.json')) as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL = None hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) print("Host file is: {}".format(self.hostfile)) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(5) def tearDown(self): if self.hostfile is not None: os.remove(self.hostfile) if self.POOL is not None and self.POOL.attached: self.POOL.destroy(1) ServerUtils.stopServer() def test_openhandle(self): """ Test container open with container handle parameter. :avocado: tags=container,openclose,openhandle """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') coh = self.params.get("coh", '/run/createtests/container_handle/*/') expected_result = coh[1] try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) self.Container2.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh_bad = self.Container2.coh # Destroying Container2 and use it's handle self.Container2.destroy() # Defining 'good' and 'bad' container handles if coh[0] == 'GOOD': coh[0] = self.Container1.coh else: coh[0] = coh_bad # opening Container handle. # Once with right coh and once with wrong coh self.Container1.open(poh, cuuid, 2, None, coh[0]) self.Container1.close(coh[0]) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except ValueError as e: print e print traceback.format_exc() if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: # cleanup the Pool and Container self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') coh = self.params.get("coh", '/run/createtests/container_handle/*/') expected_result = coh[1] try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) self.Container2.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh_bad = self.Container2.coh # Destroying Container2 and use it's handle self.Container2.destroy() # Defining 'good' and 'bad' container handles if coh[0] == 'GOOD': coh[0] = self.Container1.coh else: coh[0] = coh_bad # opening Container handle. # Once with right coh and once with wrong coh self.Container1.open(poh, cuuid, 2, None, self.Container1.coh) self.Container1.close(coh[0]) self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except ValueError as e: print e print traceback.format_exc() if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: self.POOL = None
class ContainerAsync(TestWithServers): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: recursive """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None self.pool = None def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=all,small,full_regression,container,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=all,small,full_regression,container,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=all,small,full_regression,container,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.close() self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=all,small,full_regression,container,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=all,small,full_regression,container,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
class DeleteContainerTest(TestWithServers): """ Tests DAOS container delete and close. :avocado: recursive """ def setUp(self): super(DeleteContainerTest, self).setUp() # parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createuid = os.geteuid() self.creategid = os.getegid() self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') def test_container_delete(self): """ Test basic container delete :avocado: tags=all,container,tiny,smoke,pr,contdelete """ expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/ContainerUUIDS/*/') cont_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) openlist = self.params.get("opened", "/run/createtests/ConnectionOpened/*/") opened = openlist[0] expected_for_param.append(openlist[1]) forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/") force = forcelist[0] expected_for_param.append(forcelist[1]) if force >= 1: self.cancel("Force >= 1 blocked by issue described in " "https://jira.hpdd.intel.com/browse/DAOS-689") if force == 0: self.cancel("Force = 0 blocked by " "https://jira.hpdd.intel.com/browse/DAOS-1935") expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid, None) # need a connection to create container self.pool.connect(1 << 1) self.container = DaosContainer(self.context) # create should always work (testing destroy) if not cont_uuid == 'INVALID': cont_uuid = uuid.UUID(uuidlist[0]) self.container.create(self.pool.handle, cont_uuid) else: self.container.create(self.pool.handle) # Opens the container if required if opened: self.container.open(self.pool.handle) # wait a few seconds and then attempts to destroy container time.sleep(5) if poh == 'VALID': poh = self.pool.handle # if container is INVALID, overwrite with non existing UUID if cont_uuid == 'INVALID': cont_uuid = uuid.uuid4() self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid) self.container = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.d_log.error(excep) self.d_log.error(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: # clean up the pool if self.pool is not None: self.pool.destroy(1) self.pool = None
class OpenClose(TestWithServers): """ Tests DAOS container open/close function with handle parameter. :avocado: recursive """ def __init__(self, *args, **kwargs): super(OpenClose, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: super(OpenClose, self).tearDown() except server_utils.ServerFailed: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.container1.coh if coh_params[0] == 'GOOD': coh = self.container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.container2 = DaosContainer(self.context) self.container2.create(poh) self.container2.open(poh, cuuid, 2, None) coh = self.container2.coh self.container2.close() self.container2.destroy() # close container with either good or bad handle self.container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: if expected_result == 'PASS': print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.container1.close(saved_coh) finally: self.container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
class OpenContainerTest(Test): """ Tests DAOS container bad create (non existing pool handle, bad uuid) and close. :avocado: tags=container,containeropen """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool1 = None self.pool2 = None self.container1 = None self.container2 = None self.hostfile = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) # common parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # pool 1 UID GID self.createuid1 = self.params.get("uid", '/run/createtests/createuid1/') self.creategid1 = self.params.get("gid", '/run/createtests/creategid1/') # pool 2 UID GID self.createuid2 = self.params.get("uid", '/run/createtests/createuid2/') self.creategid2 = self.params.get("gid", '/run/createtests/creategid2/') self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.container1 is not None: self.container1.destroy() if self.container2 is not None: self.container2.destroy() if self.pool1 is not None and self.pool1.attached: self.pool1.destroy(1) if self.pool2 is not None and self.pool2.attached: self.pool2.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_container_open(self): """ Test basic container bad create. :avocado: tags=container,containeropen """ container_uuid = None expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/') container_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/handles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # create two pools and try to create containers in these pools self.pool1 = DaosPool(self.context) self.pool1.create(self.createmode, self.createuid1, self.creategid1, self.createsize, self.createsetid, None) self.pool2 = DaosPool(self.context) self.pool2.create(self.createmode, self.createuid2, self.creategid2, self.createsize, None, None) # Connect to the pools self.pool1.connect(1 << 1) self.pool2.connect(1 << 1) # defines pool handle for container open if pohlist[0] == 'pool1': poh = self.pool1.handle else: poh = self.pool2.handle # Create a container in pool1 self.container1 = DaosContainer(self.context) self.container1.create(self.pool1.handle) # defines test UUID for container open if uuidlist[0] == 'pool1': struuid = self.container1.get_uuid_str() container_uuid = uuid.UUID(struuid) else: if uuidlist[0] == 'MFUUID': container_uuid = "misformed-uuid-0000" else: container_uuid = uuid.uuid4() # random uuid # tries to open the container1 # open should be ok only if poh = pool1.handle && # containerUUID = container1.uuid self.container1.open(poh, container_uuid) # wait a few seconds and then destroy containers time.sleep(5) self.container1.close() self.container1.destroy() self.container1 = None # cleanup the pools self.pool1.disconnect() self.pool1.destroy(1) self.pool1 = None self.pool2.disconnect() self.pool2.destroy(1) self.pool2 = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if self.hostfile is not None: os.remove(self.hostfile)
class PunchTest(Test): """ Simple test to verify the 3 different punch calls. """ def setUp(self): try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile( self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.Context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: ServerUtils.stopServer(hosts=self.hostlist) def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, epoch = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" # note using the same keys so writing to the same spot obj, epoch2 = self.container.write_an_obj(thedata3, len(thedata3) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata4 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if thedata3 != thedata4.value: print("data I wrote:" + thedata3) print("data I read back" + thedata4.value) self.fail("wrote in new epoch, read it back, didn't match\n") # the original data should still be there too thedata5 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata5.value: self.fail("original data isn't there any more\n") # repeat, so there will be 3 epochs thedata6 = "a really different string" # note using the same keys so writing to the same spot obj, epoch3 = self.container.write_an_obj(thedata6, len(thedata6) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata7 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if thedata6 != thedata7.value: print("data I wrote:" + thedata6) print("data I read back" + thedata7.value) self.fail("wrote in new epoch, read it back, didn't match\n") # now punch the data from the middle epoch obj.punch_dkeys(epoch2, [dkey]) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") try: # read the data from the middle epoch thedata8 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if len(thedata8.value) is not 0: print("data8: {} {}", thedata8.value, len(thedata8.value)) self.fail("punch from middle epoch didn't work") # read the data from the last epoch thedata9 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if len(thedata9.value) is not 0: print("data9: {} {}", thedata9.value, len(thedata9.value)) self.fail("after punch data in the last epoch should be gone") # lastly check the first epoch thedata10 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata10.value: self.fail( "Epoch preceeding the punch should still have data\n") except DaosApiError as e: print(e) self.fail("Test failed.\n") def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, epoch1 = self.container.write_multi_akeys(dkey, data1) # do this again, note that the epoch has been advanced by # the write_multi_akeys function data2 = [("this is akey 1", "this is data value 4"), ("this is akey 2", "this is data value 5"), ("this is akey 3", "this is data value 6")] obj, epoch2 = self.container.write_multi_akeys(dkey, data2, obj) # do this again, note that the epoch has been advanced by # the write_multi_akeys function data3 = [("this is akey 1", "this is data value 7"), ("this is akey 2", "this is data value 8"), ("this is akey 3", "this is data value 9")] obj, epoch3 = self.container.write_multi_akeys(dkey, data3, obj) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, epoch1) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey, 1st epoch {}".format( retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from the middle epoch print("punching: {}".format([data2[1][0]])) obj.punch_akeys(epoch2, dkey, [data2[1][0]]) # verify its gone from the epoch where it was punched readbuf = [(data2[1][0], len(data2[1][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, epoch2) if len(retrieved_data[data2[1][0]]) != 0: print("retrieved: {}".format(retrieved_data)) print("retrieved punched data but it was still there") self.fail("punched data still present") except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") @avocado.skip('Currently this test fails') def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, epoch = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" # note using the same keys so writing to the same spot obj, epoch2 = self.container.write_an_obj(thedata3, len(thedata3) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata4 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if thedata3 != thedata4.value: print("data I wrote:" + thedata3) print("data I read back" + thedata4.value) self.fail("wrote in new epoch, read it back, didn't match\n") # the original data should still be there too thedata5 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata5.value: self.fail("original data isn't there any more\n") # repeat, so there will be 3 epochs thedata6 = "a really different string" # note using the same keys so writing to the same spot obj, epoch3 = self.container.write_an_obj(thedata6, len(thedata6) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata7 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if thedata6 != thedata7.value: print("data I wrote:" + thedata6) print("data I read back" + thedata7.value) self.fail("wrote in new epoch, read it back, didn't match\n") # now punch the object from the middle epoch obj.punch(epoch2) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") try: # read the data from the middle epoch, should be gone thedata8 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if len(thedata8.value) is not 0: print("data8: {} {}", thedata8.value, len(thedata8.value)) self.fail("punch from middle epoch didn't work") except DaosApiError as e: print(e) self.fail("READ FROM DELETED OBJECT FAILED.\n") try: # read the data from the last epoch thedata9 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if len(thedata9.value) is not 0: print("data9: {} {}", thedata8.value, len(thedata8.value)) self.fail("after punch data in the last epoch should be gone") except DaosApiError as e: print(e) self.fail("READ FROM DELETED OBJECT FAILED.\n") try: # lastly check the first epoch, this one should still be there thedata10 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata10.value: self.fail( "Epoch preceeding the punch should still have data\n") except DaosApiError as e: print(e) self.fail("Test failed.\n")
class ObjFetchBadParam(Test): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_fetch function. """ def setUp(self): self.pl = logging.getLogger("progress") # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(5) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.Context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj( thedata, self.datasize, self.dkey, self.akey) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) self.fail("Error reading back data, test failed during"\ " the initial setup.\n") except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during the initial setup.\n") def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_bad_handle(self): """ Test ID: DAOS-1377 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=object,objfetch,objfetchbadhand,regression,vm,small """ try: # trash the handle and read again saved_oh = self.obj.oh self.obj.oh = 99999 # expecting this to fail with -1002 thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) self.container.oh = saved_oh self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as e: self.container.oh = saved_oh if not '-1002' in str(e): print(e) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n") def test_null_ptrs(self): """ Test ID: DAOS-1377 Test Description: Pass null pointers for various fetch parameters. :avocado: tags=object,objfetch,objfetchnull,regression,vm,small """ try: # now try it with a bad dkey, expecting this to fail with -1003 thedata2 = self.container.read_an_obj(self.datasize, None, self.akey, self.obj, self.epoch) self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as e: if not '-1003' in str(e): print(e) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # now try it with a null sgl, expecting this to fail with -1003 test_hints = ['sglnull'] thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch, test_hints) # behavior not as expect so commented out for now # when DAOS-1448 is complete, uncomment and retest #self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as e: if not '-1003' in str(e): print(e) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # when DAOS-1449 is complete, uncomment and retest # now try it with a null iod, expecting this to fail with -1003 #test_hints = ['iodnull'] #thedata2 = self.container.read_an_obj(self.datasize, dkey, akey, # self.obj, self.epoch, test_hints) pass #self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as e: if not '-1003' in str(e): print(e) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n")
class OpenClose(Test): """ Tests DAOS container open/close function with handle parameter. """ def setUp(self): # these are first since they are referenced in teardown self.pool = None self.hostlist = None self.hostlist = self.params.get("test_servers", '/run/hosts/') # get paths from the build_vars generated by build with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../../.build_vars.json')) as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(5) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: ServerUtils.stopServer(hosts=self.hostlist) except ServerFailed as e: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.Container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.Container1.coh if coh_params[0] == 'GOOD': coh = self.Container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.Container2 = DaosContainer(self.Context) self.Container2.create(poh) self.Container2.open(poh, cuuid, 2, None) coh = self.Container2.coh self.Container2.close() self.Container2.destroy() # close container with either good or bad handle self.Container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: if expected_result == 'PASS': print(e) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.Container1.close(saved_coh) finally: self.Container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
def test_epoch_basics(self): """ Perform I/O to an object in a container in 2 different epochs, verifying basic I/O and epochs in particular. :avocado: tags=container,epoch,basicepoch """ try: # parameters used in pool create createmode = self.params.get("mode",'/run/conttests/createmode/') createuid = self.params.get("uid",'/run/conttests/createuid/') creategid = self.params.get("gid",'/run/conttests/creategid/') createsetid = self.params.get("setname",'/run/conttests/createset/') createsize = self.params.get("size",'/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage POOL = DaosPool(self.Context) POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container POOL.connect(1 << 1) # create a container CONTAINER = DaosContainer(self.Context) CONTAINER.create(POOL.handle) # now open it CONTAINER.open() # do a query and compare the UUID returned from create with # that returned by query CONTAINER.query() if CONTAINER.get_uuid_str() != c_uuid_to_str( CONTAINER.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = 45 dkey = "this is the dkey" akey = "this is the akey" oid, epoch = CONTAINER.write_an_obj(thedata, thedatasize, dkey, akey) # read the data back and make sure its correct thedata2 = CONTAINER.read_an_obj(thedatasize, dkey, akey, oid, epoch) if thedata != thedata2.value: print("thedata>" + thedata) print("thedata2>" + thedata2.value) self.fail("Write data 1, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" thedatasize2 = 19 # note using the same keys so writing to the same spot dkey = "this is the dkey" akey = "this is the akey" oid, epoch2 = CONTAINER.write_an_obj(thedata3, thedatasize2, dkey, akey, oid) # read the data back and make sure its correct thedata4 = CONTAINER.read_an_obj(thedatasize2, dkey, akey, oid, epoch2) if thedata3 != thedata4.value: self.fail("Write data 2, read it back, didn't match\n") # the original data should still be there too thedata5 = CONTAINER.read_an_obj(thedatasize, dkey, akey, oid, epoch) if thedata != thedata5.value: self.fail("Write data 3, read it back, didn't match\n") CONTAINER.close() # wait a few seconds and then destroy time.sleep(5) CONTAINER.destroy() # cleanup the pool POOL.disconnect() POOL.destroy(1) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
def test_container_basics(self): """ Test basic container create/destroy/open/close/query. Nothing fancy just making sure they work at a rudimentary level :avocado: tags=container,containercreate,containerdestroy,basecont """ pool = None hostlist = None try: hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(hostlist, self.workdir) self.agent_sessions = agent_utils.run_agent(self.basepath, hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # give it time to start time.sleep(2) # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createuid = self.params.get("uid", '/run/conttests/createuid/') creategid = self.params.get("gid", '/run/conttests/creategid/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info'n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely, error: %s" % excep) finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1) if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=hostlist)
def test_container_basics(self): """ Test basic container create/destroy/open/close/query. Nothing fancy just making sure they work at a rudimentary level :avocado: tags=container,containercreate,containerdestroy,basecont """ hostfile = None try: self.hostlist = self.params.get("test_machines",'/run/hosts/*') hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) ServerUtils.runServer(hostfile, self.server_group, self.basepath) # give it time to start time.sleep(2) # parameters used in pool create createmode = self.params.get("mode",'/run/conttests/createmode/') createuid = self.params.get("uid",'/run/conttests/createuid/') creategid = self.params.get("gid",'/run/conttests/creategid/') createsetid = self.params.get("setname",'/run/conttests/createset/') createsize = self.params.get("size",'/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage POOL = DaosPool(self.Context) POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container POOL.connect(1 << 1) # create a container CONTAINER = DaosContainer(self.Context) CONTAINER.create(POOL.handle) # now open it CONTAINER.open() # do a query and compare the UUID returned from create with # that returned by query CONTAINER.query() if CONTAINER.get_uuid_str() != c_uuid_to_str( CONTAINER.info.ci_uuid): self.fail("Container UUID did not match the one in info'n") CONTAINER.close() # wait a few seconds and then destroy time.sleep(5) CONTAINER.destroy() # cleanup the pool POOL.disconnect() POOL.destroy(1) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") except Exception as e: self.fail("Daos code segfaulted most likely, error: %s" % e) finally: try: if hostfile is not None: os.remove(hostfile) finally: ServerUtils.stopServer(hosts=self.hostlist)
class OpenContainerTest(Test): """ Tests DAOS container bad create (non existing pool handle, bad uuid) and close. :avocado: tags=container,containeropen """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group",'/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL1 = None self.POOL2 = None self.CONTAINER1 = None self.CONTAINER2 = None self.hostfile = None self.hostlist = self.params.get("test_machines",'/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) # common parameters used in pool create self.createmode = self.params.get("mode",'/run/createtests/createmode/') self.createsetid = self.params.get("setname",'/run/createtests/createset/') self.createsize = self.params.get("size",'/run/createtests/createsize/') # POOL 1 UID GID self.createuid1 = self.params.get("uid",'/run/createtests/createuid1/') self.creategid1 = self.params.get("gid",'/run/createtests/creategid1/') # POOL 2 UID GID self.createuid2 = self.params.get("uid",'/run/createtests/createuid2/') self.creategid2 = self.params.get("gid",'/run/createtests/creategid2/') ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.CONTAINER1 is not None: self.CONTAINER1.destroy() if self.CONTAINER2 is not None: self.CONTAINER2.destroy() if self.POOL1 is not None and self.POOL1.attached: self.POOL1.destroy(1) if self.POOL2 is not None and self.POOL2.attached: self.POOL2.destroy(1) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_container_open(self): """ Test basic container bad create. :avocado: tags=container,containeropen """ expected_for_param = [] uuidlist = self.params.get("uuid",'/run/createtests/uuids/*/') containerUUID = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh",'/run/createtests/handles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # create two pools and try to create containers in these pools self.POOL1 = DaosPool(self.Context) self.POOL1.create(self.createmode, self.createuid1, self.creategid1, self.createsize, self.createsetid, None) self.POOL2 = DaosPool(self.Context) self.POOL2.create(self.createmode, self.createuid2, self.creategid2, self.createsize, None, None) # Connect to the pools self.POOL1.connect(1 << 1) self.POOL2.connect(1 << 1) # defines pool handle for container open if pohlist[0] == 'POOL1': poh = self.POOL1.handle else: poh = self.POOL2.handle # Create a container in POOL1 self.CONTAINER1 = DaosContainer(self.Context) self.CONTAINER1.create(self.POOL1.handle) # defines test UUID for container open if uuidlist[0] == 'POOL1': struuid = self.CONTAINER1.get_uuid_str() containerUUID = uuid.UUID(struuid) else: if uuidlist[0] == 'MFUUID': containerUUID = "misformed-uuid-0000" else: containerUUID = uuid.uuid4() # random uuid # tries to open the container1 # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid self.CONTAINER1.open(poh, containerUUID) # wait a few seconds and then destroy containers time.sleep(5) self.CONTAINER1.close() self.CONTAINER1.destroy() self.CONTAINER1 = None # cleanup the pools self.POOL1.disconnect() self.POOL1.destroy(1) self.POOL1 = None self.POOL2.disconnect() self.POOL2.destroy(1) self.POOL2 = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: print(e) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if self.hostfile is not None: os.remove(self.hostfile)
class ObjOpenBadParam(Test): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. """ def __init__(self, *args, **kwargs): """ Initialize values for variables that are used in tearDown() such that if setUp() fails for any reason, tearDown() will avoid throwing an AttributeError exception. """ super(ObjOpenBadParam, self).__init__(*args, **kwargs) self.container = None self.pool = None def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def tearDown(self): try: self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) finally: ServerUtils.stopServer() ServerUtils.killServer(self.hostlist) def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=object,objopen,objopenbadhand,regression,vm,small """ saved_handle = self.obj.oh self.obj.oh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1002" in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.oh = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=object,objopen,objopenbadconthand,regression,vm,small """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1002" in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=object,objopen,objopenclosedcont,regression,vm,small """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1002" in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=object,objopen,objopenpoolhandle,regression,vm,small """ saved_oh = self.obj.oh self.obj.oh = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1002" in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.oh = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=object,objopen,objopennullrl,regression,vm,small """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1003" in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=object,objopen,objopennulloid,regression,vm,small """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1003" in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=object,objopen,objopennulltgts,regression,vm,small """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1003" in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=object,objopen,objopennullattr,regression,vm,small """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if not "-1003" in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
def test_container_basics(self): """ Test basic container create/destroy/open/close/query. Nothing fancy just making sure they work at a rudimentary level :avocado: tags=container,containercreate,containerdestroy,basecont """ pool = None hostlist = None try: hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # give it time to start time.sleep(2) # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createuid = self.params.get("uid", '/run/conttests/createuid/') creategid = self.params.get("gid", '/run/conttests/creategid/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info'n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely, error: %s" % excep) finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1) if self.agent_sessions: AgentUtils.stop_agent(hostlist, self.agent_sessions) server_utils.stop_server(hosts=hostlist)
def test_null_values(self): """ Test ID: DAOS-1376 Test Description: Pass a dkey and an akey that is null. :avocado: tags=object,objupdate,objupdatenull,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # data used in the test thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during setup .\n") try: # try using a null dkey dkey = None akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # try using a null akey/io descriptor dkey = "this is the dkey" akey = None container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # lastly try passing no data thedata = None thedatasize = 0 dkey = "this is the dkey" akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.plog.info("Update with no data worked") except DaosApiError as excep: container.close() container.destroy() pool.disconnect() pool.destroy(1) print(excep) print(traceback.format_exc()) self.plog.error("Update with no data failed") self.fail("Update with no data failed.\n") container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete")