def check_handle(self, pool_glob_handle, uuidstr, cont_glob_handle, rank): """Verify that the global handles can be turned into local handles. This gets run in a child process and verifies the global handles can be turned into local handles in another process. Args: pool_glob_handle (sharedctypes.RawValue): pool handle uuidstr (sharedctypes.RawArray): pool uuid cont_glob_handle (sharedctypes.RawValue): container handle rank (int): pool svc rank Raises: DaosApiError: if there was an error converting the pool handle or using the local pool handle to create a container. """ # setup the pool and connect using global handle pool = DaosPool(self.context) pool.uuid = uuidstr pool.set_svc(rank) pool.group = "daos_server" buf = ctypes.cast( pool_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) pool_handle = pool.global2local(self.context, pool_glob_handle.iov_len, pool_glob_handle.iov_buf_len, buf2) # perform an operation that will use the new handle, if it # doesn't throw an exception, then all is well. pool.pool_query() # setup the container and then connect using the global handle container = DaosContainer(self.context) container.poh = pool_handle buf = ctypes.cast( cont_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) dummy_cont_handle = container.global2local( self.context, cont_glob_handle.iov_len, cont_glob_handle.iov_buf_len, buf2) # just try one thing to make sure handle is good container.query()
def check_handle(self, pool_glob_handle, uuidstr, cont_glob_handle, rank): """ This gets run in a child process and verifyes the global handles can be turned into local handles in another process. """ # setup the pool and connect using global handle pool = DaosPool(self.context) pool.uuid = uuidstr pool.set_svc(rank) pool.group = "daos_server" buf = ctypes.cast(pool_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) pool_handle = pool.global2local(self.context, pool_glob_handle.iov_len, pool_glob_handle.iov_buf_len, buf2) # perform an operation that will use the new handle, if it # doesn't throw an exception, then all is well. pool.pool_query() # setup the container and then connect using the global handle container = DaosContainer(self.context) container.poh = pool_handle buf = ctypes.cast(cont_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) dummy_cont_handle = container.global2local( self.context, cont_glob_handle.iov_len, cont_glob_handle.iov_buf_len, buf2) # just try one thing to make sure handle is good container.query()
def test_tx_basics(self): """ Perform I/O to an object in a container in 2 different transactions, verifying basic I/O and transactions in particular. NOTE: this was an epoch test and all I did was get it working with tx Not a good test at this point, need to redesign when tx is fully working. :avocado: tags=all,container,tx,small,smoke,pr,basictx """ self.pool = None try: # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(self.pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = 45 dkey = "this is the dkey" akey = "this is the akey" oid, txn = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) # read the data back and make sure its correct thedata2 = container.read_an_obj(thedatasize, dkey, akey, oid, txn) if thedata != thedata2.value: print("thedata>" + thedata) print("thedata2>" + thedata2.value) self.fail("Write data 1, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" thedatasize2 = 19 # note using the same keys so writing to the same spot dkey = "this is the dkey" akey = "this is the akey" oid, tx2 = container.write_an_obj(thedata3, thedatasize2, dkey, akey, oid, None, 2) # read the data back and make sure its correct thedata4 = container.read_an_obj(thedatasize2, dkey, akey, oid, tx2) if thedata3 != thedata4.value: self.fail("Write data 2, read it back, didn't match\n") # transactions generally don't work this way but need to explore # an alternative to below code once model is complete, maybe # read from a snapshot or read from TX_NONE etc. # the original data should still be there too #thedata5 = container.read_an_obj(thedatasize, dkey, akey, # oid, transaction) #if thedata != thedata5.value: # self.fail("Write data 3, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=all,smoke,pr,object,tiny,basicobject """ self.prepare_pool() try: # create a container container = DaosContainer(self.context) container.create(self.pool.pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid = container.write_an_array_value(thedata, dkey, akey, obj_cls=3) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length + 1, dkey, akey, oid) if thedata[0][0:length - 1] != thedata2[0][0:length - 1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[0]) self.plog.error("Read: >%s<", thedata2[0]) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length - 1] != thedata2[2][0:length - 1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[2]) self.plog.error("Read: >%s<", thedata2[2]) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() self.plog.info("Test Complete") except DaosApiError as excep: self.plog.error("Test Failed, exception was thrown.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
class Snapshot(TestWithServers): """ Epic: DAOS-2249 Create system level tests that cover basic snapshot functionality. Testcase: DAOS-1370 Basic snapshot test DAOS-1386 Test container SnapShot information DAOS-1371 Test list snapshots DAOS-1395 Test snapshot destroy DAOS-1402 Test creating multiple snapshots Test Class Description: Start DAOS servers, set up the pool and container for the above snapshot Epic and Testcases, including snapshot basic, container information, list, creation and destroy. :avocado: recursive """ def setUp(self): """ set up method """ super(Snapshot, self).setUp() self.log.info("==In setUp, self.context= %s", self.context) # initialize a python pool object then create the underlying # daos storage and connect to it self.prepare_pool() try: # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.pool.handle) except DaosApiError as error: self.log.info("Error detected in DAOS pool container setup: %s", str(error)) self.log.info(traceback.format_exc()) self.fail("##Test failed on setUp, before snapshot taken") # now open it self.container.open() # do a query and compare the UUID returned from create with # that returned by query self.container.query() if self.container.get_uuid_str() != c_uuid_to_str( self.container.info.ci_uuid): self.fail("##Container UUID did not match the one in info.") def display_snapshot(self, snapshot): """ To display the snapshot information. Args: snapshot: snapshot handle to be displayed. Return: none. """ self.log.info("==display_snapshot================") self.log.info("snapshot= %s", snapshot) self.log.info("snapshot.context= %s", snapshot.context) self.log.info("snapshot.context.libdaos= %s", snapshot.context.libdaos) self.log.info("snapshot.context.libtest= %s", snapshot.context.libtest) self.log.info("snapshot.context.ftable= %s", snapshot.context.ftable) self.log.info("snapshot.context.ftable[list-attr]= %s", snapshot.context.ftable["list-attr"]) self.log.info("snapshot.context.ftable[test-event]=%s", snapshot.context.ftable["test-event"]) self.log.info("snapshot.name= %s", snapshot.name) self.log.info("snapshot.epoch= %s", snapshot.epoch) self.log.info("==================================") def take_snapshot(self, container): """ To take a snapshot on the container on current epoch. Args: container: container for the snapshot Return: An object representing the snapshot """ self.log.info("==Taking snapshot for:") self.log.info(" coh= %s", container.coh) snapshot = DaosSnapshot(self.context) snapshot.create(container.coh) self.display_snapshot(snapshot) return snapshot def invalid_snapshot_test(self, coh): """ Negative snapshot test with invalid container handle. Args: container: container for the snapshot Return: 0: Failed 1: Passed (expected failure detected) """ status = 0 try: snapshot = DaosSnapshot(self.context) snapshot.create(coh) except Exception as error: self.log.info("==>Negative test, expected error: %s", str(error)) status = 1 return status def test_snapshot_negativecases(self): # pylint: disable=no-member """ Test ID: DAOS-1390 Verify snap_create bad parameter behavior. DAOS-1322 Create a new container, verify snapshot state. as expected for a brand new container. DAOS-1392 Verify snap_destroy bad parameter behavior. DAOS-1388 Verify snap_list bad parameter behavior. Test Description: (0)Take a snapshot of the newly created container. (1)Create an object, write random data into it, and take a snapshot. (2)Verify the snapshot is working properly. (3)Test snapshot with an invalid container handle. (4)Test snapshot with a NULL container handle. (5)Verify snap_destroy with a bad parameter. (6)Verify snap_list bad parameter behavior. Use Cases: Combinations with minimum 1 client and 1 server. :avocado: tags=all,small,smoke,daily_regression,snap,snapshot_negative, :avocado: tags=snapshotcreate_negative """ #DAOS-1322 Create a new container, verify snapshot state as expected # for a brand new container. try: self.log.info( "==(0)Take a snapshot of the newly created container.") snapshot = DaosSnapshot(self.context) snapshot.create(self.container.coh) self.display_snapshot(snapshot) except Exception as error: self.fail("##(0)Error on a snapshot on a new container %s", str(error)) #(1)Create an object, write some data into it, and take a snapshot obj_cls = self.params.get("obj_class", '/run/object_class/*') akey = self.params.get("akey", '/run/snapshot/*', default="akey") dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey") data_size = self.params.get("test_datasize", '/run/snapshot/*', default=150) rand_str = lambda n: ''.join( [random.choice(string.lowercase) for i in range(n)]) thedata = "--->>>Happy Daos Snapshot-Create Negative Testing " + \ "<<<---" + rand_str(random.randint(1, data_size)) try: obj = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=obj_cls) except DaosApiError as error: self.fail("##(1)Test failed during the initial object write: %s", str(error)) obj.close() ##Take a snapshot of the container snapshot = self.take_snapshot(self.container) self.log.info("==(1)snapshot.epoch= %s", snapshot.epoch) #(2)Verify the snapshot is working properly. try: obj.open() snap_handle = snapshot.open(self.container.coh, snapshot.epoch) thedata2 = self.container.read_an_obj(len(thedata) + 1, dkey, akey, obj, txn=snap_handle.value) except Exception as error: self.fail("##(2)Error when retrieving the snapshot data: %s", str(error)) self.log.info("==(2)snapshot_list[ind]=%s", snapshot) self.log.info("==snapshot.epoch= %s", snapshot.epoch) self.log.info("==written thedata=%s", thedata) self.log.info("==thedata2.value= %s", thedata2.value) if thedata2.value != thedata: raise Exception("##(2)The data in the snapshot is not the " "same as the original data") self.log.info("==Snapshot data matches the data originally " "written.") #(3)Test snapshot with an invalid container handle self.log.info("==(3)Snapshot with an invalid container handle.") if self.invalid_snapshot_test(self.container): self.log.info( "==>Negative test 1, expecting failed on taking " "snapshot with an invalid container.coh: %s", self.container) else: self.fail( "##(3)Negative test 1 passing, expecting failed on" " taking snapshot with an invalid container.coh: %s", self.container) #(4)Test snapshot with a NULL container handle self.log.info("==(4)Snapshot with a NULL container handle.") if self.invalid_snapshot_test(None): self.log.info("==>Negative test 2, expecting failed on taking " "snapshot on a NULL container.coh.") else: self.fail("##(4)Negative test 2 passing, expecting failed on " "taking snapshot with a NULL container.coh.") #(5)DAOS-1392 destroy snapshot with an invalid handle self.log.info( "==(6)DAOS-1392 destroy snapshot with an invalid handle.") try: snapshot.destroy(None, snapshot.epoch) self.fail("##(6)Negative test destroy snapshot with an " "invalid coh handle, expected fail, shown Passing##") except Exception as error: self.log.info( "==>Negative test, destroy snapshot with an invalid handle.") self.log.info(" Expected Error: %s", str(error)) expected_error = "RC: -1002" if expected_error not in str(error): self.fail("##(6.1)Expecting error RC: -1002 did not show.") #(6)DAOS-1388 Verify snap_list bad parameter behavior self.log.info( "==(7)DAOS-1388 Verify snap_list bad parameter behavior.") try: snapshot.list(None, 0) self.fail("##(7)Negative test snapshot list with an " "invalid coh and epoch, expected fail, shown Passing##") except Exception as error: self.log.info( "==>Negative test, snapshot list with an invalid coh.") self.log.info(" Expected Error: %s", str(error)) expected_error = "RC: -1002" if expected_error not in str(error): self.fail("##(7.1)Expecting error RC: -1002 did not show.") def display_snapshot_test_data(self, test_data, ss_index): """Display the snapshot test data. Args: test_data: list of snapshot testdata dictionary keys: coh: container handle snapshot: snapshot handle tst_obj: test object tst_data: test data ss_index: snapshot-list index to be displayed. """ if len(test_data) < ss_index - 1: self.log.info("##Under to display test_data info, " "index out of range.") else: ind = ss_index - 1 self.log.info(" =Snapshot number : %s", ss_index) self.log.info(" ==container_coh =%s", test_data[ind]["coh"]) self.log.info(" ==snapshot =%s", test_data[ind]["snapshot"]) self.log.info(" ==snapshot.epoch =%s", test_data[ind]["snapshot"].epoch) self.log.info(" ==data obj =%s", test_data[ind]["tst_obj"]) self.log.info(" ==snapshot tst_data_size= %s", len(test_data[ind]["tst_data"]) + 1) self.log.info(" ==original tst_data =%s", test_data[ind]["tst_data"]) return def test_snapshots(self): # pylint: disable=no-member,too-many-locals """ Test ID: DAOS-1386 Test container SnapShot information DAOS-1371 Test list snapshots DAOS-1395 Test snapshot destroy DAOS-1402 Test creating multiple snapshots Test Description: (1)Create an object, write random data into it, and take a snapshot. (2)Make changes to the data object. The write_an_obj function does a commit when the update is complete. (3)Verify the data in the snapshot is the original data. Get a handle for the snapshot and read the object at dkey, akey. Compare it to the originally written data. (4)List the snapshot and make sure it reflects the original epoch. ==>Repeat step(1) to step(4) for multiple snapshot tests. (5)Verify the snapshots data. (6)Destroy the snapshot individually. (7)Check if still able to Open the destroyed snapshot and Verify the snapshot removed from the snapshot list. (8)Destroy the container snapshot. Use Cases: Require 1 client and 1 server to run snapshot test. 1 pool and 1 container is used, num_of_snapshot defined in the snapshot.yaml will be performed and verified. :avocado: tags=all,small,smoke,snap,snapshots,full_regression """ test_data = [] ss_number = 0 obj_cls = self.params.get("obj_class", '/run/object_class/*') akey = self.params.get("akey", '/run/snapshot/*', default="akey") dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey") data_size = self.params.get("test_datasize", '/run/snapshot/*', default=150) snapshot_loop = self.params.get("num_of_snapshot", '/run/snapshot/*', default=3) rand_str = lambda n: ''.join( [random.choice(string.lowercase) for i in range(n)]) # #Test loop for creat, modify and snapshot object in the DAOS container. # while ss_number < snapshot_loop: #(1)Create an object, write some data into it, and take a snapshot ss_number += 1 thedata = "--->>>Happy Daos Snapshot Testing " + \ str(ss_number) + \ "<<<---" + rand_str(random.randint(1, data_size)) datasize = len(thedata) + 1 try: obj = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls) obj.close() except DaosApiError as error: self.fail("##(1)Test failed during the initial object " "write: {}".format(str(error))) #Take a snapshot of the container snapshot = DaosSnapshot(self.context) snapshot.create(self.container.coh) self.log.info("==Wrote an object and created a snapshot") #Display snapshot self.log.info("=(1.%s)snapshot test loop: %s", ss_number, ss_number) self.log.info(" ==snapshot.epoch= %s", snapshot.epoch) self.display_snapshot(snapshot) #Save snapshot test data test_data.append({ "coh": self.container.coh, "tst_obj": obj, "snapshot": snapshot, "tst_data": thedata }) #(2)Make changes to the data object. The write_an_obj function does # a commit when the update is complete num_transactions = more_transactions = 200 self.log.info( "=(2.%s)Committing %d additional transactions to " "the same KV.", ss_number, more_transactions) while more_transactions: size = random.randint(1, 250) + 1 new_data = rand_str(size) try: new_obj = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() except Exception as error: self.fail("##(2)Test failed during the write of " "multi-objects: {}".format(str(error))) more_transactions -= 1 #(3)Verify the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey # Compare it to the originally written data. self.log.info("=(3.%s)snapshot test loop: %s", ss_number, ss_number) try: obj.open() snap_handle = snapshot.open(self.container.coh, snapshot.epoch) thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj, txn=snap_handle.value) obj.close() except Exception as error: self.fail("##(3.1)Error when retrieving the snapshot data: {}". format(str(error))) self.display_snapshot_test_data(test_data, ss_number) self.log.info(" ==thedata3.value= %s", thedata3.value) if thedata3.value != thedata: raise Exception("##(3.2)The data in the snapshot is not the " "same as the original data") self.log.info(" ==The snapshot data matches the data originally" " written.") #(4)List the snapshot and make sure it reflects the original epoch try: ss_list = snapshot.list(self.container.coh, snapshot.epoch) self.log.info("=(4.%s)snapshot.list(self.container.coh)= %s", ss_number, ss_list) self.log.info(" ==snapshot.epoch= %s", snapshot.epoch) except Exception as error: self.fail( "##(4)Test was unable to list the snapshot: {}".format( str(error))) self.log.info( " ==After %s additional commits the snapshot is " "still available", num_transactions) #(5)Verify the snapshots data for ind, _ in enumerate(test_data): ss_number = ind + 1 self.log.info("=(5.%s)Verify the snapshot number %s:", ss_number, ss_number) self.display_snapshot_test_data(test_data, ss_number) coh = test_data[ind]["coh"] current_ss = test_data[ind]["snapshot"] obj = test_data[ind]["tst_obj"] tst_data = test_data[ind]["tst_data"] datasize = len(tst_data) + 1 try: obj.open() snap_handle5 = snapshot.open(coh, current_ss.epoch) thedata5 = self.container.read_an_obj(datasize, dkey, akey, obj, txn=snap_handle5.value) obj.close() except Exception as error: self.fail("##(5.1)Error when retrieving the snapshot data: {}". format(str(error))) self.log.info(" ==snapshot tst_data =%s", thedata5.value) if thedata5.value != tst_data: raise Exception( "##(5.2)Snapshot #%s, test data Mis-matches" "the original data written.", ss_number) self.log.info( " snapshot test number %s, test data matches" " the original data written.", ss_number) #(6)Destroy the individual snapshot self.log.info("=(6.%s)Destroy the snapshot epoch: %s", ss_number, snapshot.epoch) try: snapshot.destroy(coh, snapshot.epoch) self.log.info(" ==snapshot.epoch %s successfully destroyed", snapshot.epoch) except Exception as error: self.fail("##(6)Error on snapshot.destroy: {}".format( str(error))) #(7)Check if still able to Open the destroyed snapshot and # Verify the snapshot removed from the snapshot list try: obj.open() snap_handle7 = snapshot.open(coh, snapshot.epoch) thedata7 = self.container.read_an_obj(datasize, dkey, akey, obj, txn=snap_handle7.value) obj.close() except Exception as error: self.fail( "##(7)Error when retrieving the snapshot data: {}".format( str(error))) self.log.info("=(7)=>thedata_after_snapshot.destroyed.value= %s", thedata7.value) self.log.info(" ==>snapshot.epoch= %s", snapshot.epoch) #Still able to open the snapshot and read data after destroyed. try: ss_list = snapshot.list(coh, snapshot.epoch) self.log.info(" -->snapshot.list(coh, snapshot.epoch)= %s", ss_list) except Exception as error: self.fail("##(7)Error when calling the snapshot list: {}".format( str(error))) #(8)Destroy the snapshot on the container try: snapshot.destroy(coh) self.log.info("=(8)Container snapshot destroyed successfully.") except Exception as error: self.fail("##(8)Error on snapshot.destroy. {}".format(str(error))) self.log.info("===DAOS container Multiple snapshots test passed.")
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=all,smoke,pr,object,tiny,basicobject """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool_params/createmode/') createsetid = self.params.get("setname", '/run/pool_params/createset/') createsize = self.params.get("size", '/run/pool_params/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid, epoch = container.write_an_array_value(thedata, dkey, akey, obj_cls=3) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length + 1, dkey, akey, oid, epoch) if thedata[0][0:length - 1] != thedata2[0][0:length - 1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[0]) self.plog.error("Read: >%s<", thedata2[0]) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length - 1] != thedata2[2][0:length - 1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[2]) self.plog.error("Read: >%s<", thedata2[2]) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() # cleanup the pool pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") except DaosApiError as excep: self.plog.error("Test Failed, exception was thrown.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
class ContainerAsync(TestWithServers): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: recursive """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None self.pool = None def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=all,small,full_regression,container,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.disconnect() self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=all,small,full_regression,container,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=all,small,full_regression,container,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the container self.container1.close() self.container1.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=all,small,full_regression,container,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the container self.container1.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=all,small,full_regression,container,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the container self.container1.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc())