def test_exclude(self): """ Pass bad parameters to pool connect :avocado: tags=all,pool,full_regression,tiny,badexclude """ # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/') targets = [] if tgtlist[0] == "NULLPTR": targets = None self.cancel("skipping null pointer test until DAOS-1929 is fixed") else: targets.append(tgtlist[0]) expected_for_param.append(tgtlist[1]) svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/') svc = svclist[0] expected_for_param.append(svclist[1]) setlist = self.params.get("setname", '/run/testparams/connectsetnames/*/') connectset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/') excludeuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break saved_svc = None saved_grp = None saved_uuid = None pool = None try: # setup the DAOS python API with open('../../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib64/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # trash the the pool service rank list if not svc == 'VALID': self.cancel("skipping this test until DAOS-1931 is fixed") saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr) pool.svc = None # trash the pool group value if connectset == 'NULLPTR': saved_grp = pool.group pool.group = None # trash the UUID value in various ways if excludeuuid == 'NULLPTR': self.cancel("skipping this test until DAOS-1932 is fixed") ctypes.memmove(saved_uuid, pool.uuid, 16) pool.uuid = 0 if excludeuuid == 'CRAP': self.cancel("skipping this test until DAOS-1932 is fixed") ctypes.memmove(saved_uuid, pool.uuid, 16) pool.uuid[4] = 244 pool.exclude(targets) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") finally: if pool is not None: if saved_svc is not None: pool.svc = saved_svc if saved_grp is not None: pool.group = saved_grp if saved_uuid is not None: ctypes.memmove(pool.uuid, saved_uuid, 16) pool.destroy(1)
def test_container_delete(self): """ Test basic container delete :avocado: tags=all,container,tiny,smoke,pr,contdelete """ expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/ContainerUUIDS/*/') cont_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) openlist = self.params.get("opened", "/run/createtests/ConnectionOpened/*/") opened = openlist[0] expected_for_param.append(openlist[1]) forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/") force = forcelist[0] expected_for_param.append(forcelist[1]) if force >= 1: self.cancel("Force >= 1 blocked by issue described in " "https://jira.hpdd.intel.com/browse/DAOS-689") if force == 0: self.cancel("Force = 0 blocked by " "https://jira.hpdd.intel.com/browse/DAOS-1935") expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid, None) # need a connection to create container self.pool.connect(1 << 1) self.container = DaosContainer(self.context) # create should always work (testing destroy) if not cont_uuid == 'INVALID': cont_uuid = uuid.UUID(uuidlist[0]) self.container.create(self.pool.handle, cont_uuid) else: self.container.create(self.pool.handle) # Opens the container if required if opened: self.container.open(self.pool.handle) # wait a few seconds and then attempts to destroy container time.sleep(5) if poh == 'VALID': poh = self.pool.handle # if container is INVALID, overwrite with non existing UUID if cont_uuid == 'INVALID': cont_uuid = uuid.uuid4() self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid) self.container = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.d_log.error(excep) self.d_log.error(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: # clean up the pool if self.pool is not None: self.pool.destroy(1) self.pool = None
def test_create(self): """Test ID: DAOS-???. Test Description: Pass bad parameters to pool create. :avocado: tags=all,pool,full_regression,tiny,badcreate """ # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test pool = None expected_for_param = [] modelist = self.params.get("mode", '/run/createtests/modes/*') mode = modelist[0] expected_for_param.append(modelist[1]) uidlist = self.params.get("uid", '/run/createtests/uids/*') uid = uidlist[0] if uid == 'VALID': uid = os.geteuid() expected_for_param.append(uidlist[1]) gidlist = self.params.get("gid", '/run/createtests/gids/*') gid = gidlist[0] if gid == 'VALID': gid = os.getegid() expected_for_param.append(gidlist[1]) setidlist = self.params.get("setname", '/run/createtests/setnames/*') if setidlist[0] == 'NULLPTR': group = None self.cancel("skipping this test until DAOS-1991 is fixed") else: group = setidlist[0] expected_for_param.append(setidlist[1]) targetlist = self.params.get("rankptr", '/run/createtests/target/*') if targetlist[0] == 'NULL': targetptr = None else: targetptr = [0] expected_for_param.append(targetlist[1]) # not ready for this yet # devicelist = self.params.get("devptr", '/run/createtests/device/*') # if devicelist[0] == 'NULL': # devptr = None # else: # devptr = devicelist[0] # expected_for_param.append(devicelist[1]) sizelist = self.params.get("size", '/run/createtests/psize/*') size = sizelist[0] expected_for_param.append(sizelist[1]) # parameter not presently supported # svclist = self.params.get("rankptr", '/run/createtests/svc/*') # if svclist[0] == 'NULL': # svc = None # else: # svc = None # expected_for_param.append(devicelist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(mode, uid, gid, size, group, targetptr) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.log.error(str(excep)) self.log.error(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: self.destroy_pools(pool)
class ContainerAsync(TestWithServers): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: recursive """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None self.pool = None def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=all,small,full_regression,container,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=all,small,full_regression,container,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=all,small,full_regression,container,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.close() self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=all,small,full_regression,container,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=all,small,full_regression,container,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
def test_basic_snapshot(self): """Test ID: DAOS-1370. Test Description: Create a pool, container in the pool, object in the container, add one key:value to the object. Commit the transaction. Perform a snapshot create on the container. Create 500 additional transactions with a small change to the object in each and commit each after the object update is done. Verify the snapshot is still available and the contents remain in their original state. :avocado: tags=snap,basicsnap """ # Set up the pool and container. try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/*') createuid = os.geteuid() creategid = os.getegid() # initialize a pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as error: self.log.error(str(error)) self.fail("Test failed before snapshot taken") try: # create an object and write some data into it obj_cls = self.params.get("obj_class", '/run/object_class/*') thedata = "Now is the winter of our discontent made glorious" datasize = len(thedata) + 1 dkey = "dkey" akey = "akey" tx_handle = self.container.get_new_tx() obj = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls, txn=tx_handle) self.container.commit_tx(tx_handle) obj.close() # Take a snapshot of the container self.snapshot = DaosSnapshot(self.context) self.snapshot.create(self.container.coh, tx_handle) self.log.info("Wrote an object and created a snapshot") except DaosApiError as error: self.fail( "Test failed during the initial object write.\n{0}".format( error)) # Make 500 changes to the data object. The write_an_obj function does a # commit when the update is complete try: self.log.info( "Committing 500 additional transactions to the same KV") more_transactions = 500 while more_transactions: size = random.randint(1, 250) + 1 new_data = get_random_string(size) new_obj = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() more_transactions -= 1 except DaosApiError as error: self.fail( "Test failed during the write of 500 objects.\n{0}".format( error)) # List the snapshot try: reported_epoch = self.snapshot.list(self.container.coh) except DaosApiError as error: self.fail( "Test was unable to list the snapshot\n{0}".format(error)) # Make sure the snapshot reflects the original epoch if self.snapshot.epoch != reported_epoch: self.fail( "The snapshot epoch returned from snapshot list is not the " "same as the original epoch snapshotted.") self.log.info( "After 500 additional commits the snapshot is still available") # Make sure the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey. try: obj.open() snap_handle = self.snapshot.open(self.container.coh) thedata2 = self.container.read_an_obj(datasize, dkey, akey, obj, txn=snap_handle.value) except DaosApiError as error: self.fail( "Error when retrieving the snapshot data.\n{0}".format(error)) # Compare the snapshot to the originally written data. if thedata2.value != thedata: self.fail( "The data in the snapshot is not the same as the original data" ) self.log.info("The snapshot data matches the data originally written.") # Now destroy the snapshot try: self.snapshot.destroy(self.container.coh) self.log.info("Snapshot successfully destroyed") except DaosApiError as error: self.fail(str(error))
def create(self): """Create a pool with dmg. To use dmg, the test needs to set dmg_command through the constructor. For example, self.pool = TestPool(self.context, DmgCommand(self.bin)) If it wants to use --nsvc option, it needs to set the value to svcn.value. Otherwise, 1 is used. If it wants to use --group, it needs to set groupname.value. If it wants to use --user, it needs to set username.value. If it wants to add other options, directly set it to self.dmg.action_command. Refer dmg_utils.py pool_create method for more details. To test the negative case on create, the test needs to catch CommandFailure. Thus, we need to make more than one line modification to the test only for this purpose. Currently, pool_svc is the only test that needs this change. """ self.destroy() if self.target_list.value is not None: self.log.info( "Creating a pool on targets %s", self.target_list.value) else: self.log.info("Creating a pool") self.pool = DaosPool(self.context) kwargs = { "uid": self.uid, "gid": self.gid, "scm_size": self.scm_size.value, "group": self.name.value} for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value is not None: kwargs[key] = value if self.control_method.value == self.USE_API: raise CommandFailure( "Error: control method {} not supported for create()".format( self.control_method.value)) elif self.control_method.value == self.USE_DMG and self.dmg: # Create a pool with the dmg command and store its CmdResult self._log_method("dmg.pool_create", kwargs) data = self.dmg.pool_create(**kwargs) if self.dmg.result.exit_status == 0: # Populate the empty DaosPool object with the properties of the # pool created with dmg pool create. if self.name.value: self.pool.group = ctypes.create_string_buffer( self.name.value) # Convert the string of service replicas from the dmg command # output into an ctype array for the DaosPool object using the # same technique used in DaosPool.create(). service_replicas = [ int(value) for value in data["svc"].split(",")] rank_t = ctypes.c_uint * len(service_replicas) rank = rank_t(*list([svc for svc in service_replicas])) rl_ranks = ctypes.POINTER(ctypes.c_uint)(rank) self.pool.svc = daos_cref.RankList( rl_ranks, len(service_replicas)) # Set UUID and attached to the DaosPool object self.pool.set_uuid_str(data["uuid"]) self.pool.attached = 1 elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error( "Error: Undefined control_method: %s", self.control_method.value) # Set the TestPool attributes for the created pool if self.pool.attached: self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr)] self.uuid = self.pool.get_uuid_str()
class PunchTest(TestWithServers): """ Simple test to verify the 3 different punch calls. :avocado: recursive """ def setUp(self): try: super(PunchTest, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=all,object,pr,small,dkeypunch """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" tx_handle = self.container.get_new_tx() obj = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1, txn=tx_handle) self.container.commit_tx(tx_handle) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn=tx_handle) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(tx_handle, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as dummy_e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as dummy_e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=all,object,pr,small,akeypunch """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] tx_handle = self.container.get_new_tx() obj = self.container.write_multi_akeys(dkey, data1, obj_cls=1, txn=tx_handle) self.container.commit_tx(tx_handle) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys(dkey, readbuf, obj, txn=tx_handle) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(tx_handle, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as excep: self.fail("Punch should have worked: {}\n".format(excep)) def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=all,object,pr,small,objpunch """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" tx_handle = self.container.get_new_tx() obj = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1, txn=tx_handle) self.container.commit_tx(tx_handle) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn=tx_handle) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(tx_handle) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: obj.punch(0) # expecting it to work without a tx except DaosApiError as excep: print(excep) self.fail("Punch should have worked.\n")
class TestPool(TestDaosApiBase): """A class for functional testing of DaosPools objects.""" def __init__(self, context, log=None, cb_handler=None): # pylint: disable=unused-argument """Initialize a TestPool object. Note: 'log' is now a defunct argument and will be removed in the future Args: context (DaosContext): [description] log (logging): logging object used to report the pool status cb_handler (CallbackHandler, optional): callback object to use with the API methods. Defaults to None. """ super(TestPool, self).__init__("/run/pool/*", cb_handler) self.context = context self.uid = os.geteuid() self.gid = os.getegid() self.mode = BasicParameter(None) self.name = BasicParameter(None) # server group name self.svcn = BasicParameter(None) self.target_list = BasicParameter(None) self.scm_size = BasicParameter(None) self.nvme_size = BasicParameter(None) self.pool = None self.uuid = None self.info = None self.svc_ranks = None self.connected = False @fail_on(DaosApiError) def create(self): """Create a pool. Destroys an existing pool if defined and assigns self.pool and self.uuid. """ self.destroy() if self.target_list.value is not None: self.log.info( "Creating a pool on targets %s", self.target_list.value) else: self.log.info("Creating a pool") self.pool = DaosPool(self.context) kwargs = { "mode": self.mode.value, "uid": self.uid, "gid": self.gid, "scm_size": self.scm_size.value, "group": self.name.value} for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value: kwargs[key] = value self._call_method(self.pool.create, kwargs) self.uuid = self.pool.get_uuid_str() self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr)] self.log.info( " Pool created with uuid %s and svc ranks %s", self.uuid, self.svc_ranks) @fail_on(DaosApiError) def connect(self, permission=1): """Connect to the pool. Args: permission (int, optional): connect permission. Defaults to 1. Returns: bool: True if the pool has been connected; False if the pool was already connected or the pool is not defined. """ if self.pool and not self.connected: kwargs = {"flags": 1 << permission} self.log.info( "Connecting to pool %s with permission %s (flag: %s)", self.uuid, permission, kwargs["flags"]) self._call_method(self.pool.connect, kwargs) self.connected = True return True return False @fail_on(DaosApiError) def disconnect(self): """Disconnect from connected pool. Returns: bool: True if the pool has been disconnected; False if the pool was already disconnected or the pool is not defined. """ if self.pool and self.connected: self.log.info("Disonnecting from pool %s", self.uuid) self._call_method(self.pool.disconnect, {}) self.connected = False return True return False @fail_on(DaosApiError) def destroy(self, force=1): """Destroy the pool. Args: force (int, optional): force flag. Defaults to 1. Returns: bool: True if the pool has been destroyed; False if the pool is not defined. """ if self.pool: self.disconnect() self.log.info("Destroying pool %s", self.uuid) if self.pool.attached: self._call_method(self.pool.destroy, {"force": force}) self.pool = None self.uuid = None self.info = None self.svc_ranks = None return True return False @fail_on(DaosApiError) def get_info(self): """Query the pool for information. Sets the self.info attribute. """ if self.pool: self.connect() self._call_method(self.pool.pool_query, {}) self.info = self.pool.pool_info def check_pool_info(self, pi_uuid=None, pi_ntargets=None, pi_nnodes=None, pi_ndisabled=None, pi_map_ver=None, pi_leader=None, pi_bits=None): # pylint: disable=unused-argument """Check the pool info attributes. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Args: pi_uuid (str, optional): pool uuid. Defaults to None. pi_ntargets (int, optional): number of targets. Defaults to None. pi_nnodes (int, optional): number of nodes. Defaults to None. pi_ndisabled (int, optional): number of disabled. Defaults to None. pi_map_ver (int, optional): pool map version. Defaults to None. pi_leader (int, optional): pool leader. Defaults to None. pi_bits (int, optional): pool bits. Defaults to None. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ (key, c_uuid_to_str(getattr(self.info, key)) if key == "pi_uuid" else getattr(self.info, key), val) for key, val in locals().items() if key != "self" and val is not None] return self._check_info(checks) def check_pool_space(self, ps_free_min=None, ps_free_max=None, ps_free_mean=None, ps_ntargets=None, ps_padding=None): # pylint: disable=unused-argument """Check the pool info space attributes. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Args: ps_free_min (list, optional): minimum free space per device. Defaults to None. ps_free_max (list, optional): maximum free space per device. Defaults to None. ps_free_mean (list, optional): mean free space per device. Defaults to None. ps_ntargets (int, optional): number of targets. Defaults to None. ps_padding (int, optional): space padding. Defaults to None. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [] for key in ("ps_free_min", "ps_free_max", "ps_free_mean"): val = locals()[key] if isinstance(val, list): for index, item in val: checks.append(( "{}[{}]".format(key, index), getattr(self.info.pi_space, key)[index], item)) for key in ("ps_ntargets", "ps_padding"): val = locals()[key] if val is not None: checks.append(key, getattr(self.info.pi_space, key), val) return self._check_info(checks) def check_pool_daos_space(self, s_total=None, s_free=None): # pylint: disable=unused-argument """Check the pool info daos space attributes. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Args: s_total (list, optional): total space per device. Defaults to None. s_free (list, optional): free space per device. Defaults to None. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ ("{}_{}".format(key, index), getattr(self.info.pi_space.ps_space, key)[index], item) for key, val in locals().items() if key != "self" and val is not None for index, item in enumerate(val)] return self._check_info(checks) def check_rebuild_status(self, rs_version=None, rs_seconds=None, rs_errno=None, rs_done=None, rs_padding32=None, rs_fail_rank=None, rs_toberb_obj_nr=None, rs_obj_nr=None, rs_rec_nr=None, rs_size=None): # pylint: disable=unused-argument # pylint: disable=too-many-arguments """Check the pool info rebuild attributes. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Args: rs_version (int, optional): rebuild version. Defaults to None. rs_seconds (int, optional): rebuild seconds. Defaults to None. rs_errno (int, optional): rebuild error number. Defaults to None. rs_done (int, optional): rebuild done flag. Defaults to None. rs_padding32 (int, optional): padding. Defaults to None. rs_fail_rank (int, optional): rebuild fail target. Defaults to None. rs_toberb_obj_nr (int, optional): number of objects to be rebuilt. Defaults to None. rs_obj_nr (int, optional): number of rebuilt objects. Defaults to None. rs_rec_nr (int, optional): number of rebuilt records. Defaults to None. rs_size (int, optional): size of all rebuilt records. Note: Arguments may also be provided as a string with a number preceeded by '<', '<=', '>', or '>=' for other comparisions besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ (key, getattr(self.info.pi_rebuild_st, key), val) for key, val in locals().items() if key != "self" and val is not None] return self._check_info(checks) def _check_info(self, check_list): """Verify each pool info attribute value matches an expected value. Args: check_list (list): a list of tuples containing the name of the pool information attribute to check, the current value of the attribute, and the expected value of the attribute. If the expected value is specified as a string with a number preceeded by '<', '<=', '>', or '>=' then this comparision will be used instead of the defult '=='. Returns: bool: True if at least one check has been specified and all the actual and expected values match; False otherwise. """ check_status = len(check_list) > 0 for check, actual, expect in check_list: # Determine which comparision to utilize for this check compare = ("==", lambda x, y: x == y, "does not match") if isinstance(expect, str): comparisions = { "<": (lambda x, y: x < y, "is too large"), ">": (lambda x, y: x > y, "is too small"), "<=": ( lambda x, y: x <= y, "is too large or does not match"), ">=": ( lambda x, y: x >= y, "is too small or does not match"), } for key, val in comparisions.items(): # If the expected value is preceeded by one of the known # comparision keys, use the comparision and remove the key # from the expected value if expect[:len(key)] == key: compare = (key, val[0], val[1]) expect = expect[len(key):] try: expect = int(expect) except ValueError: # Allow strings to be strings pass break self.log.info( "Verifying the pool %s: %s %s %s", check, actual, compare[0], expect) if not compare[1](actual, expect): msg = " The {} {}: actual={}, expected={}".format( check, compare[2], actual, expect) self.log.error(msg) check_status = False return check_status def rebuild_complete(self): """Determine if the pool rebuild is complete. Returns: bool: True if pool rebuild is complete; False otherwise """ self.display_pool_rebuild_status() return self.info.pi_rebuild_st.rs_done == 1 def wait_for_rebuild(self, to_start, interval=1): """Wait for the rebuild to start or end. Args: to_start (bool): whether to wait for rebuild to start or end interval (int): number of seconds to wait in between rebuild completion checks """ self.log.info( "Waiting for rebuild to %s ...", "start" if to_start else "complete") while self.rebuild_complete() == to_start: self.log.info( " Rebuild %s ...", "has not yet started" if to_start else "in progress") sleep(interval) self.log.info( "Rebuild %s detected", "start" if to_start else "completion") @fail_on(DaosApiError) def start_rebuild(self, ranks, daos_log): """Kill the specific server ranks using this pool. Args: ranks (list): a list of daos server ranks (int) to kill daos_log (DaosLog): object for logging messages Returns: bool: True if the server ranks have been killed and the ranks have been excluded from the pool; False if the pool is undefined """ msg = "Killing DAOS ranks {} from server group {}".format( ranks, self.name.value) self.log.info(msg) daos_log.info(msg) for rank in ranks: server = DaosServer(self.context, self.name.value, rank) self._call_method(server.kill, {"force": 1}) return self.exclude(ranks, daos_log) @fail_on(DaosApiError) def exclude(self, ranks, daos_log): """Manually exclude a rank from this pool. Args: ranks (list): a list daos server ranks (int) to exclude daos_log (DaosLog): object for logging messages Returns: bool: True if the ranks were excluded from the pool; False if the pool is undefined """ if self.pool: msg = "Excluding server ranks {} from pool {}".format( ranks, self.uuid) self.log.info(msg) daos_log.info(msg) self._call_method(self.pool.exclude, {"rank_list": ranks}) return True return False def check_files(self, hosts): """Check if pool files exist on the specified list of hosts. Args: hosts (list): list of hosts Returns: bool: True if the files for this pool exist on each host; False otherwise """ return check_pool_files(self.log, hosts, self.uuid.lower()) def write_file(self, orterun, processes, hostfile, size, timeout=60): """Write a file to the pool. Args: orterun (str): full path to the orterun command processes (int): number of processes to launch hosts (list): list of clients from which to write the file size (int): size of the file to create in bytes timeout (int, optional): number of seconds before timing out the command. Defaults to 60 seconds. Returns: process.CmdResult: command execution result """ self.log.info("Writing %s bytes to pool %s", size, self.uuid) env = { "DAOS_POOL": self.uuid, "DAOS_SVCL": "1", "DAOS_SINGLETON_CLI": "1", "PYTHONPATH": os.getenv("PYTHONPATH", ""), } current_path = os.path.dirname(os.path.abspath(__file__)) command = "{} --np {} --hostfile {} {} {} testfile".format( orterun, processes, hostfile, os.path.join(current_path, "write_some_data.py"), size) return process.run(command, timeout, True, False, "both", True, env) def get_pool_daos_space(self): """Get the pool info daos space attributes as a dictionary. Returns: dict: a dictionary of lists of the daos space attributes """ self.get_info() keys = ("s_total", "s_free") return {key: getattr(self.info.pi_space.ps_space, key) for key in keys} def display_pool_daos_space(self, msg=None): """Display the pool info daos space attributes. Args: msg (str, optional): optional text to include in the output. Defaults to None. """ daos_space = self.get_pool_daos_space() sizes = [ "{}[{}]={}".format(key, index, item) for key in sorted(daos_space.keys()) for index, item in enumerate(daos_space[key])] self.log.info( "Pool %s space%s:\n %s", self.uuid, " " + msg if isinstance(msg, str) else "", "\n ".join(sizes)) def get_pool_rebuild_status(self): """Get the pool info rebuild status attributes as a dictionary. Returns: dict: a dictionary of lists of the rebuild status attributes """ self.get_info() keys = ( "rs_version", "rs_pad_32", "rs_errno", "rs_done", "rs_toberb_obj_nr", "rs_obj_nr", "rs_rec_nr") return {key: getattr(self.info.pi_rebuild_st, key) for key in keys} def display_pool_rebuild_status(self): """Display the pool info rebuild status attributes.""" status = self.get_pool_rebuild_status() self.log.info( "Pool rebuild status: %s", ", ".join( ["{}={}".format(key, status[key]) for key in sorted(status)])) def read_data_during_rebuild(self, container): """Read data from the container while rebuild is active. Args: container (TestContainer): container from which to read data Returns: bool: True if all the data is read sucessfully befoire rebuild completes; False otherwise """ container.open() self.log.info( "Reading objects in container %s during rebuild", self.uuid) # Attempt to read all of the data from the container during rebuild index = 0 status = read_incomplete = index < len(container.written_data) while not self.rebuild_complete() and read_incomplete: try: status &= container.written_data[index].read_object(container) except DaosTestError as error: self.log.error(str(error)) status = False index += 1 read_incomplete = index < len(container.written_data) # Verify that all of the container data was read successfully if read_incomplete: self.log.error( "Rebuild completed before all the written data could be read") status = False elif not status: self.log.error("Errors detected reading data during rebuild") return status
def test_connect(self): """ Pass bad parameters to pool connect :avocado: tags=all,pool,full_regression,tiny,badconnect """ # parameters used in pool create createmode = self.params.get("mode", '/run/connecttests/createmode/') createuid = self.params.get("uid", '/run/connecttests/uids/createuid/') creategid = self.params.get("gid", '/run/connecttests/gids/creategid/') createsetid = self.params.get("setname", '/run/connecttests/setnames/createset/') createsize = self.params.get("size", '/run/connecttests/psize/createsize/') # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] modelist = self.params.get("mode", '/run/connecttests/connectmode/*/') connectmode = modelist[0] expected_for_param.append(modelist[1]) svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/') svc = svclist[0] expected_for_param.append(svclist[1]) setlist = self.params.get("setname", '/run/connecttests/connectsetnames/*/') connectset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/') connectuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break puuid = (ctypes.c_ubyte * 16)() psvc = RankList() pgroup = ctypes.create_string_buffer(0) pool = None try: # setup the DAOS python API with open('../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib64/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # save this uuid since we might trash it as part of the test ctypes.memmove(puuid, pool.uuid, 16) # trash the the pool service rank list psvc.rl_ranks = pool.svc.rl_ranks psvc.rl_nr = pool.svc.rl_nr if not svc == 'VALID': rl_ranks = ctypes.POINTER(ctypes.c_uint)() pool.svc = RankList(rl_ranks, 1) # trash the pool group value pgroup = pool.group if connectset == 'NULLPTR': pool.group = None # trash the UUID value in various ways if connectuuid == 'NULLPTR': pool.uuid = None if connectuuid == 'JUNK': pool.uuid[4] = 244 pool.connect(connectmode) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") # cleanup the pool finally: if pool is not None and pool.attached == 1: # restore values in case we trashed them during test pool.svc.rl_ranks = psvc.rl_ranks pool.svc.rl_nr = psvc.rl_nr pool.group = pgroup ctypes.memmove(pool.uuid, puuid, 16) print("pool uuid after restore {}".format(pool.get_uuid_str())) pool.destroy(1)
def test_tx_basics(self): """ Perform I/O to an object in a container in 2 different transactions, verifying basic I/O and transactions in particular. NOTE: this was an epoch test and all I did was get it working with tx Not a good test at this point, need to redesign when tx is fully working. :avocado: tags=all,container,tx,small,smoke,pr,basictx """ pool = None try: # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = 45 dkey = "this is the dkey" akey = "this is the akey" oid, txn = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) # read the data back and make sure its correct thedata2 = container.read_an_obj(thedatasize, dkey, akey, oid, txn) if thedata != thedata2.value: print("thedata>" + thedata) print("thedata2>" + thedata2.value) self.fail("Write data 1, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" thedatasize2 = 19 # note using the same keys so writing to the same spot dkey = "this is the dkey" akey = "this is the akey" oid, tx2 = container.write_an_obj(thedata3, thedatasize2, dkey, akey, oid, None, 2) # read the data back and make sure its correct thedata4 = container.read_an_obj(thedatasize2, dkey, akey, oid, tx2) if thedata3 != thedata4.value: self.fail("Write data 2, read it back, didn't match\n") # transactions generally don't work this way but need to explore # an alternative to below code once model is complete, maybe # read from a snapshot or read from TX_NONE etc. # the original data should still be there too #thedata5 = container.read_an_obj(thedatasize, dkey, akey, # oid, transaction) #if thedata != thedata5.value: # self.fail("Write data 3, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1)
class ObjFetchBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_fetch function. :avocado: recursive """ def setUp(self): super(ObjFetchBadParam, self).setUp() time.sleep(5) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj( thedata, self.datasize, self.dkey, self.akey, None, None, 2) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) self.fail("Error reading back data, test failed during"\ " the initial setup.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.\n") def test_bad_handle(self): """ Test ID: DAOS-1377 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=all,object,full_regression,small,objbadhandle """ try: # trash the handle and read again saved_oh = self.obj.obj_handle self.obj.obj_handle = 99999 # expecting this to fail with -1002 dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch) self.container.oh = saved_oh self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: self.container.oh = saved_oh if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n") def test_null_ptrs(self): """ Test ID: DAOS-1377 Test Description: Pass null pointers for various fetch parameters. :avocado: tags=all,object,full_regression,small,objfetchnull """ try: # now try it with a bad dkey, expecting this to fail with -1003 dummy_thedata2 = self.container.read_an_obj( self.datasize, None, self.akey, self.obj, self.epoch) self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # now try it with a null sgl (iod_size is not set) # expecting this to fail with -2013 test_hints = ['sglnull'] dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch, test_hints) # behavior not as expect so commented out for now # when DAOS-1448 is complete, uncomment and retest self.fail("Test was expected to return a -2013 but it has not.\n") except DaosApiError as excep: if '-2013' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -2013 but it has not.\n") try: # when DAOS-1449 is complete, uncomment and retest # now try it with a null iod, expecting this to fail with -1003 #test_hints = ['iodnull'] #thedata2 = self.container.read_an_obj(self.datasize, dkey, akey, # self.obj, self.epoch, test_hints) pass #self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n")
def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=all,pool,pr,medium,svc """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svc", '/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.pool.connect(1 << 1) # checking returned rank list for server more than 1 iterator = 0 while ( int(self.pool.svc.rl_ranks[iterator]) > 0 and int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and int(self.pool.svc.rl_ranks[iterator]) != 999999 ): iterator += 1 if iterator != createsvc[0]: self.fail("Length of Returned Rank list is not equal to " "the number of Pool Service members.\n") rank_list = [] for iterator in range(createsvc[0]): rank_list.append(int(self.pool.svc.rl_ranks[iterator])) if len(rank_list) != len(set(rank_list)): self.fail("Duplicate values in returned rank list") self.pool.pool_query() leader = self.pool.pool_info.pi_leader if createsvc[0] == 3: # kill pool leader and exclude it self.pool.pool_svc_stop() self.pool.exclude([leader]) # perform pool disconnect, try connect again and disconnect self.pool.disconnect() self.pool.connect(1 << 1) self.pool.disconnect() # kill another server which is not a leader and exclude it server = DaosServer(self.context, self.server_group, 3) server.kill(1) self.pool.exclude([3]) # perform pool connect self.pool.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
class SameKeyDifferentValue(TestWithServers): """ Test Description: Test to verify different type of values passed to same akey and dkey. :avocado: recursive """ def setUp(self): try: super(SameKeyDifferentValue, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def test_single_to_array_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e. single to array value) to the same akey and dkey. Case1: Insert akey,dkey with single value Insert same akey,dkey with array value Result: should return -1001 ERR. Case2: Insert akey,dkey with single value Punch the keys Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with single value Punch the keys Trigger aggregation Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,singletoarray,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write single value data into it obj = self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct read_back_data = self.container.read_an_obj( len(single_value_data) + 1, dkey, akey, obj) if single_value_data != read_back_data.value: print("data I wrote:" + single_value_data) print("data I read back" + read_back_data.value) self.fail("Write data, read it back, didn't match\n") # test case 1 if i == 0: try: # write array value data to same keys, expected to fail self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Array value write to existing single value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with array value type self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object") def test_array_to_single_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e array to single value) to the same akey and dkey. Case1: Insert akey,dkey with array value Insert same akey,dkey with single value Result: should return -1001 ERR. Case2: Insert akey,dkey with array value Punch the keys Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with array value Punch the keys Trigger aggregation Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,arraytosingle,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write array value data into it obj = self.container.write_an_array_value(array_value_data, dkey, akey, obj_cls=1) # read the data back and make sure its correct length = len(array_value_data[0]) read_back_data = self.container.read_an_array( len(array_value_data), length + 1, dkey, akey, obj) for j in range(3): if (array_value_data[j][0:length - 1] != read_back_data[j][0:length - 1]): print("Written Data: {}".format(array_value_data[j])) print("Read Data: {}".format(read_back_data[j])) self.fail("Data mismatch\n") # test case 1 if i == 0: try: # write single value data to same keys, expected to fail self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Single value write to existing array value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with single value type self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object")
def create(self): """Create a pool with either API or dmg. To use dmg, the test needs to set control_method.value to USE_DMG prior to calling this method. The recommended way is to specify the pool block in yaml. For example, pool: control_method: dmg This tells this method to use dmg. The test also needs to set dmg_bin_path through the constructor if dmg is used. For example, self.pool = TestPool( self.context, dmg_bin_path=self.basepath + '/install/bin') If it wants to use --nsvc option, it needs to set the value to svcn.value. Otherwise, 1 is used. If it wants to use --group, it needs to set groupname.value. If it wants to use --user, it needs to set username.value. If it wants to add other options, directly set it to self.dmg.action_command. Refer dmg_utils.py pool_create method for more details. To test the negative case on create, the test needs to catch CommandFailure for dmg and DaosApiError for API. Thus, we need to make more than one line modification to the test only for this purpose. Currently, pool_svc is the only test that needs this change. """ self.destroy() if self.target_list.value is not None: self.log.info("Creating a pool on targets %s", self.target_list.value) else: self.log.info("Creating a pool") self.pool = DaosPool(self.context) kwargs = { "uid": self.uid, "gid": self.gid, "scm_size": self.scm_size.value, "group": self.name.value } for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value is not None: kwargs[key] = value if self.control_method.value == self.USE_API: # Create a pool with the API method kwargs["mode"] = self.mode.value self._call_method(self.pool.create, kwargs) elif self.control_method.value == self.USE_DMG and self.dmg: # Create a pool with the dmg command self._log_method("dmg.pool_create", kwargs) result = self.dmg.pool_create(**kwargs) # self.cmd_output to keep the actual stdout of dmg command for # checking the negative/warning message. self.cmd_output = result.stdout uuid, svc = get_pool_uuid_service_replicas_from_stdout( result.stdout) # Populte the empty DaosPool object with the properties of the pool # created with dmg pool create. if self.name.value: self.pool.group = ctypes.create_string_buffer(self.name.value) # Convert the string of service replicas from the dmg command output # into an ctype array for the DaosPool object using the same # technique used in DaosPool.create(). service_replicas = [int(value) for value in svc.split(",")] rank_t = ctypes.c_uint * len(service_replicas) rank = rank_t(*list([svc for svc in service_replicas])) rl_ranks = ctypes.POINTER(ctypes.c_uint)(rank) self.pool.svc = daos_cref.RankList(rl_ranks, len(service_replicas)) # Set UUID and attached to the DaosPool object self.pool.set_uuid_str(uuid) self.pool.attached = 1 elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error("Error: Undefined control_method: %s", self.control_method.value) # Set the TestPool attributes for the created pool self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr) ] self.uuid = self.pool.get_uuid_str()
class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. :avocado: recursive """ def setUp(self): super(CreateManyDkeys, self).setUp() self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: super(CreateManyDkeys, self).tearDown() def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_tx() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_tx(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format( key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data) + 1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n".format( "dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @skipForTicket("DAOS-1721") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=all,full,small,object,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class LlnlMpi4pyHdf5(TestWithServers): """ Runs LLNL, MPI4PY and HDF5 test suites. :avocado: recursive """ def setUp(self): super(LlnlMpi4pyHdf5, self).setUp() # initialising variables self.mpio = None self.hostfile_clients = None # setting client variables self.hostfile_clients = write_host_file.write_host_file( self.hostlist_clients, self.workdir, None) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.createsvc = self.params.get("svcn", '/run/pool/createsvc/') # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, self.createsvc) except (DaosApiError) as excep: self.fail("<Test Failed at pool create> \n{1}".format(excep)) def run_test(self, test_repo, test_name): """ Executable function to be used by test functions below test_repo --location of test repository test_name --name of the test to be run """ # initialize MpioUtils self.mpio = MpioUtils() if not self.mpio.mpich_installed(self.hostlist_clients): self.fail("Exiting Test: Mpich not installed") try: # initialise test specific variables client_processes = self.params.get("np", '/run/client_processes/') # obtaining pool uuid and svc list pool_uuid = self.pool.get_uuid_str() svc_list = "" for i in range(self.createsvc): svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":" svc_list = svc_list[:-1] # running tests self.mpio.run_llnl_mpi4py_hdf5(self.basepath, self.hostfile_clients, pool_uuid, test_repo, test_name, client_processes) # Parsing output to look for failures # stderr directed to stdout stdout = self.logdir + "/stdout" searchfile = open(stdout, "r") error_message = [ "non-zero exit code", "MPI_Abort", "MPI_ABORT", "ERROR" ] for line in searchfile: # pylint: disable=C0200 for i in range(len(error_message)): if error_message[i] in line: self.fail("Test Failed with error_message: {}".format( error_message[i])) except (MpioFailed, DaosApiError) as excep: self.fail("<{0} Test Failed> \n{1}".format(test_name, excep)) @skipForTicket("CORCI-635") def test_llnl(self): """ Jira ID: DAOS-2231 Test Description: Run LLNL test suite. Testing various I/O functions provided in llnl test suite such as:- test_collective, test_datareps, test_errhandlers, test_filecontrol, test_localpointer, test_manycomms, test_manyopens, test_openclose, test_openmodes, test_nb_collective, test_nb_localpointer, test_nb_rdwr, test_nb_readwrite, test_rdwr, test_readwrite :avocado: tags=all,mpiio,smoke,pr,small,llnlmpi4py """ test_repo = self.params.get("llnl", '/run/test_repo/') self.run_test(test_repo, "llnl") @skipForTicket("CORCI-635") def test_mpi4py(self): """ Jira ID: DAOS-2231 Test Description: Run mpi4py io test provided in mpi4py package Testing various I/O functions provided in mpi4py test suite such as:- testReadWriteAt, testIReadIWriteAt, testReadWrite testIReadIWrite, testReadWriteAtAll, testIReadIWriteAtAll testReadWriteAtAllBeginEnd, testReadWriteAll testIReadIWriteAll, testReadWriteAllBeginEnd :avocado: tags=all,mpiio,pr,small,llnlmpi4py,mpi4py """ test_repo = self.params.get("mpi4py", '/run/test_repo/') self.run_test(test_repo, "mpi4py") @skipForTicket("CORCI-635") def test_hdf5(self): """ Jira ID: DAOS-2252 Test Description: Run HDF5 testphdf5 and t_shapesame provided in HDF5 package. Testing various I/O functions provided in HDF5 test suite such as:- test_fapl_mpio_dup, test_split_comm_access, test_page_buffer_access, test_file_properties, dataset_writeInd, dataset_readInd, dataset_writeAll, dataset_readAll, extend_writeInd, extend_readInd, extend_writeAll, extend_readAll,extend_writeInd2,none_selection_chunk, zero_dim_dset, multiple_dset_write, multiple_group_write, multiple_group_read, compact_dataset, collective_group_write, independent_group_read, big_dataset, coll_chunk1, coll_chunk2, coll_chunk3, coll_chunk4, coll_chunk5, coll_chunk6, coll_chunk7, coll_chunk8, coll_chunk9, coll_chunk10, coll_irregular_cont_write, coll_irregular_cont_read, coll_irregular_simple_chunk_write, coll_irregular_simple_chunk_read , coll_irregular_complex_chunk_write, coll_irregular_complex_chunk_read , null_dataset , io_mode_confusion, rr_obj_hdr_flush_confusion, chunk_align_bug_1,lower_dim_size_comp_test, link_chunk_collective_io_test, actual_io_mode_tests, no_collective_cause_tests, test_plist_ed, file_image_daisy_chain_test, test_dense_attr, test_partial_no_selection_coll_md_read :avocado: tags=mpio,llnlmpi4pyhdf5,hdf5 """ test_repo = self.params.get("hdf5", '/run/test_repo/') self.run_test(test_repo, "hdf5")
class GlobalHandle(TestWithServers): """ This class contains tests to verify the ability to share pool handles amoung processes. :avocado: recursive """ def tearDown(self): try: super(GlobalHandle, self).tearDown() finally: # really make sure everything is gone check_for_pool.cleanup_pools(self.hostlist_servers) def check_handle(self, buf_len, iov_len, buf, uuidstr, rank): """ This gets run in a child process and verifyes the global handle can be turned into a local handle in another process. """ pool = DaosPool(self.context) pool.set_uuid_str(uuidstr) pool.set_svc(rank) pool.group = "daos_server" # note that the handle is stored inside the pool as well dummy_local_handle = pool.global2local(self.context, iov_len, buf_len, buf) # perform some operations that will use the new handle pool.pool_query() container = DaosContainer(self.context) container.create(pool.handle) def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=all,pool,pr,tiny,poolglobalhandle """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container just to make sure handle is good self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # create a global handle iov_len, buf_len, buf = self.pool.local2global() # this should work in the future but need on-line server addition #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0) #p = Process(target=check_handle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case self.check_handle(buf_len, iov_len, buf, self.pool.get_uuid_str(), 0) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_evict(self): """Test ID: DAOS-427. Test Description: Pass bad parameters to the pool evict clients call. :avocado: tags=all,pool,full_regression,tiny,badevict """ # parameters used in pool create createmode = self.params.get("mode", '/run/evicttests/createmode/') createsetid = self.params.get("setname", '/run/evicttests/createset/') createsize = self.params.get("size", '/run/evicttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] setlist = self.params.get("setname", '/run/evicttests/connectsetnames/*/') evictset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/evicttests/UUID/*/') excludeuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break saveduuid = None savedgroup = None pool = None try: # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # trash the pool group value savedgroup = pool.group if evictset is None: pool.group = None else: pool.set_group(evictset) # trash the UUID value in various ways if excludeuuid is None: saveduuid = (ctypes.c_ubyte * 16)(0) for item in range(0, len(saveduuid)): saveduuid[item] = pool.uuid[item] pool.uuid[0:] = [0 for item in range(0, len(pool.uuid))] elif excludeuuid == 'JUNK': saveduuid = (ctypes.c_ubyte * 16)(0) for item in range(0, len(saveduuid)): saveduuid[item] = pool.uuid[item] pool.uuid[4] = 244 pool.evict() if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.log.error(str(excep)) self.log.error(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") finally: if pool is not None: # if the test trashed some pool parameter, put it back the # way it was pool.group = savedgroup if saveduuid is not None: for item in range(0, len(saveduuid)): pool.uuid[item] = saveduuid[item] pool.destroy(0)
class ObjOpenBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. :avocado: recursive """ def setUp(self): super(ObjOpenBadParam, self).setUp() try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=all,object,full_regression,tiny,objopenbadhandle """ saved_handle = self.obj.obj_handle self.obj.obj_handle = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=all,object,full_regression,tiny,objopenbadcont """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=all,object,full_regression,tiny,objopenclosedcont """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=all,object,full_regression,tiny,objopenbadpool """ saved_oh = self.obj.obj_handle self.obj.obj_handle = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=all,object,full_regression,tiny,objopennullrl """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=all,object,full_regression,tiny,objopennulloid """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=all,object,full_regression,tiny,objopennulltgts """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=all,object,full_regression,tiny,objopennullattr """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
class TestPool(TestDaosApiBase): # pylint: disable=too-many-public-methods """A class for functional testing of DaosPools objects.""" def __init__(self, context, dmg_command, cb_handler=None): # pylint: disable=unused-argument """Initialize a TestPool object. Args: context (DaosContext): [description] dmg_command (DmgCommand): DmgCommand used to call dmg command. This value can be obtained by calling self.get_dmg_command() from a test. It'll return the object with -l <Access Point host:port> and --insecure. log (logging): logging object used to report the pool status cb_handler (CallbackHandler, optional): callback object to use with the API methods. Defaults to None. """ super(TestPool, self).__init__("/run/pool/*", cb_handler) self.context = context self.uid = os.geteuid() self.gid = os.getegid() self.mode = BasicParameter(None) self.name = BasicParameter(None) # server group name self.svcn = BasicParameter(None) self.target_list = BasicParameter(None) self.scm_size = BasicParameter(None) self.nvme_size = BasicParameter(None) self.prop_name = BasicParameter(None) # name of property to be set self.prop_value = BasicParameter(None) # value of property self.pool = None self.uuid = None self.info = None self.svc_ranks = None self.connected = False self.dmg = dmg_command self.query_data = [] @fail_on(CommandFailure) @fail_on(DaosApiError) def create(self): """Create a pool with dmg. To use dmg, the test needs to set dmg_command through the constructor. For example, self.pool = TestPool(self.context, DmgCommand(self.bin)) If it wants to use --nsvc option, it needs to set the value to svcn.value. Otherwise, 1 is used. If it wants to use --group, it needs to set groupname.value. If it wants to use --user, it needs to set username.value. If it wants to add other options, directly set it to self.dmg.action_command. Refer dmg_utils.py pool_create method for more details. To test the negative case on create, the test needs to catch CommandFailure. Thus, we need to make more than one line modification to the test only for this purpose. Currently, pool_svc is the only test that needs this change. """ self.destroy() if self.target_list.value is not None: self.log.info( "Creating a pool on targets %s", self.target_list.value) else: self.log.info("Creating a pool") self.pool = DaosPool(self.context) kwargs = { "uid": self.uid, "gid": self.gid, "scm_size": self.scm_size.value, "group": self.name.value} for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value is not None: kwargs[key] = value if self.control_method.value == self.USE_API: raise CommandFailure( "Error: control method {} not supported for create()".format( self.control_method.value)) elif self.control_method.value == self.USE_DMG and self.dmg: # Create a pool with the dmg command and store its CmdResult self._log_method("dmg.pool_create", kwargs) data = self.dmg.pool_create(**kwargs) if self.dmg.result.exit_status == 0: # Populate the empty DaosPool object with the properties of the # pool created with dmg pool create. if self.name.value: self.pool.group = ctypes.create_string_buffer( self.name.value) # Convert the string of service replicas from the dmg command # output into an ctype array for the DaosPool object using the # same technique used in DaosPool.create(). service_replicas = [ int(value) for value in data["svc"].split(",")] rank_t = ctypes.c_uint * len(service_replicas) rank = rank_t(*list([svc for svc in service_replicas])) rl_ranks = ctypes.POINTER(ctypes.c_uint)(rank) self.pool.svc = daos_cref.RankList( rl_ranks, len(service_replicas)) # Set UUID and attached to the DaosPool object self.pool.set_uuid_str(data["uuid"]) self.pool.attached = 1 elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error( "Error: Undefined control_method: %s", self.control_method.value) # Set the TestPool attributes for the created pool if self.pool.attached: self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr)] self.uuid = self.pool.get_uuid_str() @fail_on(DaosApiError) def connect(self, permission=2): """Connect to the pool. Args: permission (int, optional): connect permission. Defaults to 2. Returns: bool: True if the pool has been connected; False if the pool was already connected or the pool is not defined. """ if self.pool and not self.connected: kwargs = {"flags": permission} self.log.info( "Connecting to pool %s with permission %s (flag: %s)", self.uuid, permission, kwargs["flags"]) self._call_method(self.pool.connect, kwargs) self.connected = True return True return False @fail_on(DaosApiError) def disconnect(self): """Disconnect from connected pool. Returns: bool: True if the pool has been disconnected; False if the pool was already disconnected or the pool is not defined. """ if self.pool and self.connected: self.log.info("Disconnecting from pool %s", self.uuid) self._call_method(self.pool.disconnect, {}) self.connected = False return True return False @fail_on(CommandFailure) @fail_on(DaosApiError) def destroy(self, force=1, disconnect=1): """Destroy the pool with either API or dmg. It uses control_method member previously set, so if you want to use the other way for some reason, update it before calling this method. Args: force (int, optional): force flag. Defaults to 1. disconnect (int, optional): disconnect flag. Defaults to 1. Returns: bool: True if the pool has been destroyed; False if the pool is not defined. """ status = False if self.pool: if disconnect: self.disconnect() if self.pool.attached: self.log.info("Destroying pool %s", self.uuid) if self.control_method.value == self.USE_API: # Destroy the pool with the API method self._call_method(self.pool.destroy, {"force": force}) status = True elif self.control_method.value == self.USE_DMG and self.dmg: # Destroy the pool with the dmg command self.dmg.pool_destroy(pool=self.uuid, force=force) status = True elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error( "Error: Undefined control_method: %s", self.control_method.value) self.pool = None self.uuid = None self.info = None self.svc_ranks = None return status @fail_on(CommandFailure) def set_property(self, prop_name=None, prop_value=None): """Set Property. It sets property for a given pool uuid using dmg. Args: prop_name (str, optional): pool property name. Defaults to None, which uses the TestPool.prop_name.value prop_value (str, optional): value to be set for the property. Defaults to None, which uses the TestPool.prop_value.value Returns: None """ if self.pool: self.log.info("Set-prop for Pool: %s", self.uuid) if self.control_method.value == self.USE_DMG and self.dmg: # If specific values are not provided, use the class values if prop_name is None: prop_name = self.prop_name.value if prop_value is None: prop_value = self.prop_value.value self.dmg.pool_set_prop(self.uuid, prop_name, prop_value) elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error( "Error: Undefined control_method: %s", self.control_method.value) @fail_on(DaosApiError) def get_info(self): """Query the pool for information. Sets the self.info attribute. """ if self.pool: self.connect() self._call_method(self.pool.pool_query, {}) self.info = self.pool.pool_info def check_pool_info(self, pi_uuid=None, pi_ntargets=None, pi_nnodes=None, pi_ndisabled=None, pi_map_ver=None, pi_leader=None, pi_bits=None): # pylint: disable=unused-argument """Check the pool info attributes. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Args: pi_uuid (str, optional): pool uuid. Defaults to None. pi_ntargets (int, optional): number of targets. Defaults to None. pi_nnodes (int, optional): number of nodes. Defaults to None. pi_ndisabled (int, optional): number of disabled. Defaults to None. pi_map_ver (int, optional): pool map version. Defaults to None. pi_leader (int, optional): pool leader. Defaults to None. pi_bits (int, optional): pool bits. Defaults to None. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ (key, c_uuid_to_str(getattr(self.info, key)) if key == "pi_uuid" else getattr(self.info, key), val) for key, val in locals().items() if key != "self" and val is not None] return self._check_info(checks) def check_pool_space(self, ps_free_min=None, ps_free_max=None, ps_free_mean=None, ps_ntargets=None, ps_padding=None): # pylint: disable=unused-argument """Check the pool info space attributes. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Args: ps_free_min (list, optional): minimum free space per device. Defaults to None. ps_free_max (list, optional): maximum free space per device. Defaults to None. ps_free_mean (list, optional): mean free space per device. Defaults to None. ps_ntargets (int, optional): number of targets. Defaults to None. ps_padding (int, optional): space padding. Defaults to None. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [] for key in ("ps_free_min", "ps_free_max", "ps_free_mean"): val = locals()[key] if isinstance(val, list): for index, item in val: checks.append(( "{}[{}]".format(key, index), getattr(self.info.pi_space, key)[index], item)) for key in ("ps_ntargets", "ps_padding"): val = locals()[key] if val is not None: checks.append(key, getattr(self.info.pi_space, key), val) return self._check_info(checks) def check_pool_daos_space(self, s_total=None, s_free=None): # pylint: disable=unused-argument """Check the pool info daos space attributes. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Args: s_total (list, optional): total space per device. Defaults to None. s_free (list, optional): free space per device. Defaults to None. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ ("{}_{}".format(key, index), getattr(self.info.pi_space.ps_space, key)[index], item) for key, val in locals().items() if key != "self" and val is not None for index, item in enumerate(val)] return self._check_info(checks) def check_rebuild_status(self, rs_version=None, rs_seconds=None, rs_errno=None, rs_done=None, rs_padding32=None, rs_fail_rank=None, rs_toberb_obj_nr=None, rs_obj_nr=None, rs_rec_nr=None, rs_size=None): # pylint: disable=unused-argument # pylint: disable=too-many-arguments """Check the pool info rebuild attributes. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Args: rs_version (int, optional): rebuild version. Defaults to None. rs_seconds (int, optional): rebuild seconds. Defaults to None. rs_errno (int, optional): rebuild error number. Defaults to None. rs_done (int, optional): rebuild done flag. Defaults to None. rs_padding32 (int, optional): padding. Defaults to None. rs_fail_rank (int, optional): rebuild fail target. Defaults to None. rs_toberb_obj_nr (int, optional): number of objects to be rebuilt. Defaults to None. rs_obj_nr (int, optional): number of rebuilt objects. Defaults to None. rs_rec_nr (int, optional): number of rebuilt records. Defaults to None. rs_size (int, optional): size of all rebuilt records. Note: Arguments may also be provided as a string with a number preceded by '<', '<=', '>', or '>=' for other comparisons besides the default '=='. Returns: bool: True if at least one expected value is specified and all the specified values match; False otherwise """ self.get_info() checks = [ (key, getattr(self.info.pi_rebuild_st, key), val) for key, val in locals().items() if key != "self" and val is not None] return self._check_info(checks) def rebuild_complete(self): """Determine if the pool rebuild is complete. Returns: bool: True if pool rebuild is complete; False otherwise """ self.display_pool_rebuild_status() return self.info.pi_rebuild_st.rs_done == 1 def wait_for_rebuild(self, to_start, interval=1): """Wait for the rebuild to start or end. Args: to_start (bool): whether to wait for rebuild to start or end interval (int): number of seconds to wait in between rebuild completion checks """ self.log.info( "Waiting for rebuild to %s ...", "start" if to_start else "complete") while self.rebuild_complete() == to_start: self.log.info( " Rebuild %s ...", "has not yet started" if to_start else "in progress") sleep(interval) self.log.info( "Rebuild %s detected", "start" if to_start else "completion") @fail_on(DaosApiError) @fail_on(CommandFailure) def start_rebuild(self, ranks, daos_log): """Kill/Stop the specific server ranks using this pool. Args: ranks (list): a list of daos server ranks (int) to kill daos_log (DaosLog): object for logging messages Returns: bool: True if the server ranks have been killed/stopped and the ranks have been excluded from the pool; False otherwise. """ msg = "Killing DAOS ranks {} from server group {}".format( ranks, self.name.value) self.log.info(msg) daos_log.info(msg) if self.control_method.value == self.USE_DMG and self.dmg: # Stop desired ranks using dmg self.dmg.system_stop(ranks=convert_list(value=ranks)) return True elif self.control_method.value == self.USE_DMG: self.log.error("Error: Undefined dmg command") else: self.log.error( "Error: Unsupported control_method: %s", self.control_method.value) return False @fail_on(DaosApiError) def exclude(self, ranks, daos_log): """Manually exclude a rank from this pool. Args: ranks (list): a list daos server ranks (int) to exclude daos_log (DaosLog): object for logging messages Returns: bool: True if the ranks were excluded from the pool; False if the pool is undefined """ if self.pool: msg = "Excluding server ranks {} from pool {}".format( ranks, self.uuid) self.log.info(msg) daos_log.info(msg) self._call_method(self.pool.exclude, {"rank_list": ranks}) return True return False def check_files(self, hosts): """Check if pool files exist on the specified list of hosts. Args: hosts (list): list of hosts Returns: bool: True if the files for this pool exist on each host; False otherwise """ return check_pool_files(self.log, hosts, self.uuid.lower()) def write_file(self, orterun, processes, hostfile, size, timeout=60): """Write a file to the pool. Args: orterun (str): full path to the orterun command processes (int): number of processes to launch hosts (list): list of clients from which to write the file size (int): size of the file to create in bytes timeout (int, optional): number of seconds before timing out the command. Defaults to 60 seconds. Returns: process.CmdResult: command execution result """ self.log.info("Writing %s bytes to pool %s", size, self.uuid) #env = { # "DAOS_POOL": self.uuid, # "DAOS_SVCL": "1", # "PYTHONPATH": os.getenv("PYTHONPATH", "") #} env = { "DAOS_POOL": self.uuid, "PYTHONPATH": os.getenv("PYTHONPATH", "") } if not load_mpi("openmpi"): raise CommandFailure("Failed to load openmpi") current_path = os.path.dirname(os.path.abspath(__file__)) command = "{} --np {} --hostfile {} {} {} testfile".format( orterun, processes, hostfile, os.path.join(current_path, "write_some_data.py"), size) return run_command(command, timeout, True, env=env) def get_pool_daos_space(self): """Get the pool info daos space attributes as a dictionary. Returns: dict: a dictionary of lists of the daos space attributes """ self.get_info() keys = ("s_total", "s_free") return {key: getattr(self.info.pi_space.ps_space, key) for key in keys} def get_pool_free_space(self, device="scm"): """Get SCM or NVME free space. Args: device (str, optional): device type, e.g. "scm" or "nvme". Defaults to "scm". Returns: str: free SCM or NVME space """ free_space = "0" dev = device.lower() daos_space = self.get_pool_daos_space() if dev == "scm": free_space = daos_space["s_free"][0] elif dev == "nvme": free_space = daos_space["s_free"][1] return free_space def display_pool_daos_space(self, msg=None): """Display the pool info daos space attributes. Args: msg (str, optional): optional text to include in the output. Defaults to None. """ daos_space = self.get_pool_daos_space() sizes = [ "{}[{}]={}".format(key, index, item) for key in sorted(daos_space.keys()) for index, item in enumerate(daos_space[key])] self.log.info( "Pool %s space%s:\n %s", self.uuid, " " + msg if isinstance(msg, str) else "", "\n ".join(sizes)) def pool_percentage_used(self): """Get the pool storage used % for SCM and NVMe. Returns: dict: a dictionary of SCM/NVMe pool space usage in %(float) """ daos_space = self.get_pool_daos_space() pool_percent = {'scm': round(float(daos_space["s_free"][0]) / float(daos_space["s_total"][0]) * 100, 2), 'nvme': round(float(daos_space["s_free"][1]) / float(daos_space["s_total"][1]) * 100, 2)} return pool_percent def get_pool_rebuild_status(self): """Get the pool info rebuild status attributes as a dictionary. Returns: dict: a dictionary of lists of the rebuild status attributes """ self.get_info() keys = ( "rs_version", "rs_padding32", "rs_errno", "rs_done", "rs_toberb_obj_nr", "rs_obj_nr", "rs_rec_nr") return {key: getattr(self.info.pi_rebuild_st, key) for key in keys} def display_pool_rebuild_status(self): """Display the pool info rebuild status attributes.""" status = self.get_pool_rebuild_status() self.log.info( "Pool rebuild status: %s", ", ".join( ["{}={}".format(key, status[key]) for key in sorted(status)])) def read_data_during_rebuild(self, container): """Read data from the container while rebuild is active. Args: container (TestContainer): container from which to read data Returns: bool: True if all the data is read successfully before rebuild completes; False otherwise """ container.open() self.log.info( "Reading objects in container %s during rebuild", self.uuid) # Attempt to read all of the data from the container during rebuild index = 0 status = read_incomplete = index < len(container.written_data) while not self.rebuild_complete() and read_incomplete: try: status &= container.written_data[index].read_object(container) except DaosTestError as error: self.log.error(str(error)) status = False index += 1 read_incomplete = index < len(container.written_data) # Verify that all of the container data was read successfully if read_incomplete: self.log.error( "Rebuild completed before all the written data could be read") status = False elif not status: self.log.error("Errors detected reading data during rebuild") return status @fail_on(CommandFailure) def set_query_data(self): """Execute dmg pool query and store the results. Only supported with the dmg control method. """ self.query_data = [] if self.pool: if self.dmg: self.query_data = self.dmg.pool_query(self.pool.get_uuid_str()) else: self.log.error("Error: Undefined dmg command")
def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=all,small,full_regression,container,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
class FullPoolContainerCreate(TestWithServers): """ Class for test to create a container in a pool with no remaining free space. :avocado: recursive """ def __init__(self, *args, **kwargs): super(FullPoolContainerCreate, self).__init__(*args, **kwargs) self.cont = None self.cont2 = None @skipForTicket("DAOS-3142") def test_no_space_cont_create(self): """ :avocado: tags=all,container,tiny,full_regression,fullpoolcontcreate """ # full storage rc err = "-1007" # probably should be -1007, revisit later err2 = "-1009" # create pool self.pool = DaosPool(self.context) mode = self.params.get("mode", '/conttests/createmode/') self.d_log.debug("mode is {0}".format(mode)) uid = os.geteuid() gid = os.getegid() # 16 mb pool, minimum size currently possible size = 16777216 self.d_log.debug("creating pool") self.pool.create(mode, uid, gid, size, self.server_group, None) self.d_log.debug("created pool") # connect to the pool self.d_log.debug("connecting to pool") self.pool.connect(1 << 1) self.d_log.debug("connected to pool") # query the pool self.d_log.debug("querying pool info") dummy_pool_info = self.pool.pool_query() self.d_log.debug("queried pool info") # create a container try: self.d_log.debug("creating container") self.cont = DaosContainer(self.context) self.cont.create(self.pool.handle) self.d_log.debug("created container") except DaosApiError as excep: self.d_log.error("caught exception creating container: " "{0}".format(excep)) self.fail("caught exception creating container: {0}".format(excep)) self.d_log.debug("opening container") self.cont.open() self.d_log.debug("opened container") # generate random dkey, akey each time # write 1mb until no space, then 1kb, etc. to fill pool quickly for obj_sz in [1048576, 10240, 10, 1]: write_count = 0 while True: self.d_log.debug("writing obj {0}, sz {1} to " "container".format(write_count, obj_sz)) my_str = "a" * obj_sz my_str_sz = obj_sz dkey = (''.join( random.choice(string.lowercase) for i in range(5))) akey = (''.join( random.choice(string.lowercase) for i in range(5))) try: dummy_oid, dummy_tx = self.cont.write_an_obj( my_str, my_str_sz, dkey, akey, obj_cls="OC_SX") self.d_log.debug("wrote obj {0}, sz {1}".format( write_count, obj_sz)) write_count += 1 except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught exception while writing " "object: {0}".format(repr(excep))) self.fail("caught exception while writing object: {0}". format(repr(excep))) else: self.d_log.debug("pool is too full for {0} byte " "objects".format(obj_sz)) break self.d_log.debug("closing container") self.cont.close() self.d_log.debug("closed container") # create a 2nd container now that pool is full try: self.d_log.debug("creating 2nd container") self.cont2 = DaosContainer(self.context) self.cont2.create(self.pool.handle) self.d_log.debug("created 2nd container") self.d_log.debug("opening container 2") self.cont2.open() self.d_log.debug("opened container 2") self.d_log.debug("writing one more object, write expected to fail") self.cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls="OC_SX") self.fail("wrote one more object after pool was completely filled," " this should never print") except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught unexpected exception while " "writing object: {0}".format(repr(excep))) self.fail("caught unexpected exception while writing " "object: {0}".format(repr(excep))) else: self.d_log.debug("correctly caught -1007 while attempting " "to write object in full pool")
def create(self): """Create a pool with dmg. To use dmg, the test needs to set dmg_command through the constructor. For example, self.pool = TestPool(self.context, DmgCommand(self.bin)) If it wants to use --nsvc option, it needs to set the value to svcn.value. Otherwise, 1 is used. If it wants to use --group, it needs to set groupname.value. If it wants to use --user, it needs to set username.value. If it wants to add other options, directly set it to self.dmg.action_command. Refer dmg_utils.py pool_create method for more details. To test the negative case on create, the test needs to catch CommandFailure. Thus, we need to make more than one line modification to the test only for this purpose. Currently, pool_svc is the only test that needs this change. """ self.destroy() if self.target_list.value is not None: self.log.info("Creating a pool on targets %s", self.target_list.value) else: self.log.info("Creating a pool") self.pool = DaosPool(self.context) kwargs = { "uid": self.uid, "gid": self.gid, "size": self.size.value, "tier_ratio": self.tier_ratio.value, "scm_size": self.scm_size.value, "nranks": self.nranks.value, "properties": self.properties.value, "acl_file": self.acl_file.value, "label": self.label.value } for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value is not None: kwargs[key] = value # Create a pool with the dmg command and store its CmdResult self._log_method("dmg.pool_create", kwargs) data = self.dmg.pool_create(**kwargs) if self.dmg.result.exit_status == 0: # Convert the string of service replicas from the dmg command # output into an ctype array for the DaosPool object using the # same technique used in DaosPool.create(). service_replicas = [int(value) for value in data["svc"].split(",")] rank_t = ctypes.c_uint * len(service_replicas) rank = rank_t(*service_replicas) rl_ranks = ctypes.POINTER(ctypes.c_uint)(rank) self.pool.svc = daos_cref.RankList(rl_ranks, len(service_replicas)) # Set UUID and attached to the DaosPool object self.uuid = data["uuid"] self.pool.attached = 1 # Set effective size of mediums per rank self.scm_per_rank = data["scm_per_rank"] self.nvme_per_rank = data["nvme_per_rank"] # Set the TestPool attributes for the created pool if self.pool.attached: self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr) ]