def get_pool(context, mode, size, name, svcn=1, log=None, connect=True): """Return a DAOS pool that has been created an connected. Args: context (DaosContext): the context to use to create the pool mode (int): the pool mode size (int): the size of the pool name (str): the name of the pool svcn (int): the pool service leader quantity log (DaosLog, optional): object for logging messages. Defaults to None. connect (bool, optional): connect to the new pool. Defaults to True. Returns: DaosPool: an object representing a DAOS pool """ if log: log.info("Creating a pool") pool = DaosPool(context) pool.create(mode, os.geteuid(), os.getegid(), size, name, svcn=svcn) if connect: if log: log.info("Connecting to the pool") pool.connect(1 << 1) return pool
def test_destroy_connect(self): """ Test destroying a pool that has a connected client with force == false. Should fail. :avocado: tags=pool,pooldestroy,x """ host = self.hostlist_servers[0] try: # write out a hostfile_servers and start the servers with it self.hostlist_servers = self.params.get("test_machines1", '/run/hosts/') hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.tmp) self.agent_sessions = agent_utils.run_agent( self.basepath, self.hostlist_servers) server_utils.run_server(hostfile_servers, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # destroy pool with connection open pool.destroy(0) # should throw an exception and not hit this self.fail("Shouldn't hit this line.\n") except DaosApiError as excep: print("got exception which is expected so long as it is BUSY") print(excep) print(traceback.format_exc()) # pool should still be there exists = check_for_pool.check_for_pool(host, pool.get_uuid_str) if exists != 0: self.fail("Pool gone, but destroy should have failed.\n") # no matter what happens cleanup finally: if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=self.hostlist_servers) os.remove(hostfile_servers)
def test_destroy_connect(self): """ Test destroying a pool that has a connected client with force == false. Should fail. :avocado: tags=pool,pooldestroy,x """ host = self.hostlist[0] try: # write out a hostfile and start the servers with it self.hostlist = self.params.get("test_machines1", '/run/hosts/') hostfile = write_host_file.write_host_file(self.hostlist, self.tmp) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # destroy pool with connection open pool.destroy(0) # should throw an exception and not hit this self.fail("Shouldn't hit this line.\n") except DaosApiError as excep: print("got exception which is expected so long as it is BUSY") print(excep) print(traceback.format_exc()) # pool should still be there exists = check_for_pool.check_for_pool(host, pool.get_uuid_str) if exists != 0: self.fail("Pool gone, but destroy should have failed.\n") # no matter what happens cleanup finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) os.remove(hostfile)
def test_destroy_connect(self): """ Test destroying a pool that has a connected client with force == false. Should fail. :avocado: tags=pool,pooldestroy,x """ try: # write out a hostfile and start the servers with it hostlist = self.params.get("test_machines1", '/run/hosts/') hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) ServerUtils.runServer(hostfile, self.server_group, self.basepath) # give it time to reach steady state time.sleep(1) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage POOL = DaosPool(self.Context) POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container POOL.connect(1 << 1) # destroy pool with connection open POOL.destroy(0) # should throw an exception and not hit this self.fail("Shouldn't hit this line.\n") except ValueError as e: print("got exception which is expected so long as it is BUSY") print(e) print(traceback.format_exc()) # pool should still be there exists = CheckForPool.checkForPool(host, uuid_str) if exists != 0: self.fail("Pool gone, but destroy should have failed.\n") # no matter what happens cleanup finally: ServerUtils.stopServer() os.remove(hostfile)
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=pool,poolhandle,vm,small,regression """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a container just to make sure handle is good container = DaosContainer(self.context) container.create(pool.handle) # create a global handle iov_len, buf_len, buf = pool.local2global() # this should work in the future but need on-line server addition #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0) #p = Process(target=check_handle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=all,pool,pr,tiny,poolglobalhandle """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a container just to make sure handle is good container = DaosContainer(self.context) container.create(pool.handle) # create a global handle iov_len, buf_len, buf = pool.local2global() # this should work in the future but need on-line server addition #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0) #p = Process(target=check_handle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class OpenClose(TestWithServers): """ Tests DAOS container open/close function with handle parameter. :avocado: recursive """ def __init__(self, *args, **kwargs): super(OpenClose, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: super(OpenClose, self).tearDown() except server_utils.ServerFailed: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.container1.coh if coh_params[0] == 'GOOD': coh = self.container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.container2 = DaosContainer(self.context) self.container2.create(poh) self.container2.open(poh, cuuid, 2, None) coh = self.container2.coh self.container2.close() self.container2.destroy() # close container with either good or bad handle self.container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: if expected_result == 'PASS': print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.container1.close(saved_coh) finally: self.container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
def test_query(self): """ Pass bad parameters to pool query :avocado: tags=pool,poolquery,badparam,badquery """ # parameters used in pool create/connect connectmode = self.params.get("mode", '/run/querytests/connectmode/') createmode = self.params.get("mode", '/run/querytests/createmode/') createuid = self.params.get("uid", '/run/querytests/createuid/') creategid = self.params.get("gid", '/run/querytests/creategid/') createsetid = self.params.get("setname", '/run/querytests/createset/') createsize = self.params.get("size", '/run/querytests/createsize/') # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] handlelist = self.params.get("handle", '/run/querytests/handles/*/') handle = handlelist[0] expected_for_param.append(handlelist[1]) infolist = self.params.get("info", '/run/querytests/infoptr/*/') dummy_infoptr = infolist[0] expected_for_param.append(infolist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # setup the DAOS python API with open('../../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(connectmode) # trash the pool handle value if not handle == 'VALID': pool.handle = handle pool.pool_query() if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n")
class ObjFetchBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_fetch function. :avocado: recursive """ def setUp(self): super(ObjFetchBadParam, self).setUp() time.sleep(5) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj( thedata, self.datasize, self.dkey, self.akey, None, None, 2) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) self.fail("Error reading back data, test failed during"\ " the initial setup.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.\n") def test_bad_handle(self): """ Test ID: DAOS-1377 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=all,object,full_regression,small,objbadhandle """ try: # trash the handle and read again saved_oh = self.obj.obj_handle self.obj.obj_handle = 99999 # expecting this to fail with -1002 dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch) self.container.oh = saved_oh self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: self.container.oh = saved_oh if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n") def test_null_ptrs(self): """ Test ID: DAOS-1377 Test Description: Pass null pointers for various fetch parameters. :avocado: tags=all,object,full_regression,small,objfetchnull """ try: # now try it with a bad dkey, expecting this to fail with -1003 dummy_thedata2 = self.container.read_an_obj( self.datasize, None, self.akey, self.obj, self.epoch) self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # now try it with a null sgl (iod_size is not set) # expecting this to fail with -2013 test_hints = ['sglnull'] dummy_thedata2 = self.container.read_an_obj( self.datasize, self.dkey, self.akey, self.obj, self.epoch, test_hints) # behavior not as expect so commented out for now # when DAOS-1448 is complete, uncomment and retest self.fail("Test was expected to return a -2013 but it has not.\n") except DaosApiError as excep: if '-2013' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -2013 but it has not.\n") try: # when DAOS-1449 is complete, uncomment and retest # now try it with a null iod, expecting this to fail with -1003 #test_hints = ['iodnull'] #thedata2 = self.container.read_an_obj(self.datasize, dkey, akey, # self.obj, self.epoch, test_hints) pass #self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n")
class PunchTest(Test): """ Simple test to verify the 3 different punch calls. """ def setUp(self): self.agent_sessions = None try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(txn, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as dummy_e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as dummy_e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, txn = self.container.write_multi_akeys(dkey, data1, obj_cls=1) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys(dkey, readbuf, obj, txn) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(txn, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as excep: self.fail("Punch should have worked.\n") def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata)+1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj(len(thedata)+1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(txn) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: obj.punch(0) # expecting it to work without a tx except DaosApiError as excep: print(excep) self.fail("Punch should have worked.\n")
def test_bad_handle(self): """ Test ID: DAOS-1376 Test Description: Pass a bogus object handle, should return bad handle. :avocado: tags=object,objupdate,objbadhand,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 dkey = "this is the dkey" akey = "this is the akey" obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) saved_oh = obj.obj_handle obj.obj_handle = 99999 obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey, akey, obj, None, 2) container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.fail("Test was expected to return a -1002 but it has not.\n") except DaosApiError as excep: container.oh = saved_oh container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") if '-1002' not in str(excep): print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1002 but it has not.\n")
def test_container_create(self): """ Test ID: DAOS-689 Test Description: valid and invalid container creation and close. :avocado: tags=regression,cont,contcreate """ pool = None contuuid = None expected_results = [] try: # initialize a python pool object then create the underlying # daos storage createmode = self.params.get("mode", '/run/poolparams/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/poolparams/') createsize = self.params.get("size", '/run/poolparams/') # setup the pool pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid) pool.connect(1 << 1) # maybe use the good handle, maybe not handleparam = self.params.get("handle", '/run/poolhandle/*') if handleparam == 'VALID': poh = pool.handle else: poh = handleparam expected_results.append('FAIL') # maybe use a good UUID, maybe not uuidparam = self.params.get("uuid", "/uuids/*") expected_results.append(uuidparam[1]) if uuidparam[0] == 'NULLPTR': self.cancel("skipping this test until DAOS-2043 is fixed") contuuid = 'NULLPTR' else: contuuid = uuid.UUID(uuidparam[0]) should_fail = False for result in expected_results: if result == 'FAIL': should_fail = True break self.container = DaosContainer(self.context) self.container.create(poh, contuuid) # check UUID is the specified one if (uuidparam[0]).upper() != self.container.get_uuid_str().upper(): print("uuidparam[0] is {}, uuid_str is {}".format( uuidparam[0], self.container.get_uuid_str())) self.fail("Container UUID differs from specified at create\n") if should_fail: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if not should_fail: self.fail("Test was expected to pass but it failed.\n") finally: if pool is not None: pool.disconnect() pool.destroy(1)
class InfoTests(Test): """ Tests DAOS pool query. """ def setUp(self): # get paths from the build_vars generated by build with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../.build_vars.json")) as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') context = DaosContext(build_paths['PREFIX'] + '/lib/') print("initialized!!!\n") self.pool = DaosPool(context) self.hostlist = self.params.get("test_machines1", '/run/hosts/') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) def tearDown(self): # shut 'er down self.pool.destroy(1) ServerUtils.stopServer() os.remove(self.hostfile) def test_simple_query(self): """ Test querying a pool created on a single server. :avocado: tags=pool,poolquery,infotest """ # there is a presumption that this test lives in a specific spot # in the repo # create pool mode = self.params.get("mode", '/run/testparams/modes/*', 0731) uid = os.geteuid() gid = os.getegid() size = self.params.get("size", '/run/testparams/sizes/*', 0) tgt_list = None group = self.server_group self.pool.create(mode, uid, gid, size, group, tgt_list) PROGRESS_LOG.info("created pool") # connect to the pool flags = self.params.get("perms", '/run/testparams/connectperms/*', '') connect_flags = 1 << flags self.pool.connect(connect_flags) PROGRESS_LOG.info("connected to pool") # query the pool pool_info = self.pool.pool_query() PROGRESS_LOG.info("queried pool info") # check uuid uuid_str = c_uuid_to_str(pool_info.pi_uuid) PROGRESS_LOG.info("pool uuid pool_info.pi_uuid: {0}".format(uuid_str)) PROGRESS_LOG.info("pool uuid saved in api at create time: " "{0}".format(self.pool.get_uuid_str())) if uuid_str != self.pool.get_uuid_str(): self.fail("UUID str does not match expected string") # validate size of pool is what we expect PROGRESS_LOG.info("pool should be {0} bytes".format(size)) PROGRESS_LOG.info("pool actual space is {0} bytes".format( pool_info.pi_space)) ''' This check is currently disabled, as space is not implemented in DAOS C API yet. if size != pool_info.pi_space: self.fail("expected size {0} did not match actual size {1}" .format(size, pool_info.pi_space)) ''' # number of targets PROGRESS_LOG.info("number of targets in pool: %s", pool_info.pi_ntargets) if pool_info.pi_ntargets != len(self.hostlist): self.fail("found number of targets in pool did not match " "expected number, 1. num targets: {0}".format( pool_info.pi_ntargets)) # number of disabled targets PROGRESS_LOG.info("number of disabled targets in pool: %s", pool_info.pi_ndisabled) if pool_info.pi_ndisabled > 0: self.fail("found disabled targets, none expected to be disabled") # mode PROGRESS_LOG.info("pool mode: %s", pool_info.pi_mode) if pool_info.pi_mode != mode: self.fail("found different mode than expected. expected {0}, " "found {1}.".format(mode, pool_info.pi_mode)) # uid PROGRESS_LOG.info("expected uid is {0}".format(uid)) if pool_info.pi_uid != uid: self.fail("found actual pool uid {0} does not match expected uid " "{1}".format(pool_info.pi_uid, uid)) # gid PROGRESS_LOG.info("expected gid is {0}".format(gid)) if pool_info.pi_gid != gid: self.fail("found actual pool gid {0} does not match expected gid " "{1}".format(pool_info.pi_gid, gid))
def test_multipool_rebuild(self): """ Test ID: Rebuild-002 Test Description: Expand on the basic test by rebuilding 2 pools at once. Use Cases: -- multipool rebuild, single client, various object and record counds :avocado: tags=pool,rebuild,rebuildmulti """ try: # initialize python pool object then create the underlying # daos storage, the way the code is now the pools should be # on the same storage and have the same service leader pool1 = DaosPool(self.context) pool2 = DaosPool(self.context) pool1.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid) pool2.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid) # want an open connection during rebuild pool1.connect(1 << 1) pool2.connect(1 << 1) # create containers container1 = DaosContainer(self.context) container1.create(pool1.handle) container2 = DaosContainer(self.context) container2.create(pool2.handle) # now open them container1.open() container2.open() # Putting the same data in both pools, at least for now to simplify # checking its correct saved_data = [] for _objc in range(self.objcount): obj = None for _recc in range(self.reccount): # make some stuff up and write dkey = ( ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))) akey = ( ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))) data = ( ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(self.size))) # Used DAOS_OC_R1S_SPEC_RANK # 1 replica with specified rank obj, txn = container1.write_an_obj(data, len(data), dkey, akey, obj, self.rank, obj_cls=15) obj, txn = container2.write_an_obj(data, len(data), dkey, akey, obj, self.rank, obj_cls=15) saved_data.append((obj, dkey, akey, data, txn)) # read the data back and make sure its correct containers data2 = container1.read_an_obj(self.size, dkey, akey, obj, txn) if data != data2.value: self.fail("Wrote data P1, read it back, didn't match\n") data2 = container2.read_an_obj(self.size, dkey, akey, obj, txn) if data != data2.value: self.fail("Wrote data P2, read it back, didn't match\n") # kill a server server = DaosServer(self.context, self.server_group, self.rank) server.kill(1) # temporarily, the exclude of a failed target must be done # manually pool1.exclude([self.rank]) pool2.exclude([self.rank]) # check that rebuild finishes, no errors, progress data as # know it to be. Check pool 1 first then we'll check 2 below. while True: pool1.pool_query() if pool1.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check there are no errors and other data matches what we # apriori know to be true, if pool1.pool_info.pi_ndisabled != 1: self.fail("P1 number disabled targets reporting incorrectly: {}" .format(pool1.pool_info.pi_ndisabled)) if pool1.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("P1 rebuild error reported: {}" .format(pool1.pool_info.pi_rebuild_st.rs_errno)) if pool1.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount: self.fail("P1 rebuilt objs not as expected: {0} {1}" .format(pool1.pool_info.pi_rebuild_st.rs_obj_nr, self.objcount)) if (pool1.pool_info.pi_rebuild_st.rs_rec_nr != (self.reccount*self.objcount)): self.fail("P1 rebuilt recs not as expected: {0} {1}" .format(pool1.pool_info.pi_rebuild_st.rs_rec_nr, self.reccount*self.objcount)) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container1.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") # now check the other pool while True: pool2.pool_query() if pool2.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check there are no errors and other data matches what we # apriori know to be true if pool2.pool_info.pi_ndisabled != 1: self.fail("Number disabled targets reporting incorrectly: {}" .format(pool2.pool_info.pi_ndisabled)) if pool2.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}" .format(pool2.pool_info.pi_rebuild_st.rs_errno)) if pool2.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount: self.fail("Rebuilt objs not as expected: {0} {1}" .format(pool2.pool_info.pi_rebuild_st.rs_obj_nr, self.objcount)) if (pool2.pool_info.pi_rebuild_st.rs_rec_nr != (self.reccount*self.objcount)): self.fail("Rebuilt recs not as expected: {0} {1}". format(pool2.pool_info.pi_rebuild_st.rs_rec_nr, (self.reccount*self.objcount))) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container2.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") except DaosApiError as excp: print (excp) print (traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_simple_rebuild(self): """ Test ID: Rebuild-001 Test Description: The most basic rebuild test. Use Cases: -- single pool rebuild, single client, various reord/object counts :avocado: tags=pool,rebuild,rebuildsimple """ try: # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid) # want an open connection during rebuild pool.connect(1 << 1) # get pool status we want to test later pool.pool_query() if pool.pool_info.pi_ndisabled != 0: self.fail("Number of disabled targets reporting incorrectly.\n") if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error but rebuild hasn't run.\n") if pool.pool_info.pi_rebuild_st.rs_done != 1: self.fail("Rebuild is running but device hasn't failed yet.\n") if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0: self.fail("Rebuilt objs not zero.\n") if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0: self.fail("Rebuilt recs not zero.\n") # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() saved_data = [] for _objc in range(self.objcount): obj = None for _recc in range(self.reccount): # make some stuff up and write dkey = ( ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))) akey = ( ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))) data = (''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(self.size))) obj, txn = container.write_an_obj(data, len(data), dkey, akey, obj, self.rank, obj_cls=16) saved_data.append((obj, dkey, akey, data, txn)) # read the data back and make sure its correct data2 = container.read_an_obj(self.size, dkey, akey, obj, txn) if data != data2.value: self.fail("Write data 1, read it back, didn't match\n") # kill a server that has server = DaosServer(self.context, self.server_group, self.rank) server.kill(1) # temporarily, the exclude of a failed target must be done manually pool.exclude([self.rank]) while True: # get the pool/rebuild status again pool.pool_query() if pool.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) if pool.pool_info.pi_ndisabled != 1: self.fail("Number of disabled targets reporting incorrectly: {}" .format(pool.pool_info.pi_ndisabled)) if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}" .format(pool.pool_info.pi_rebuild_st.rs_errno)) if pool.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount: self.fail("Rebuilt objs not as expected: {0} {1}" .format(pool.pool_info.pi_rebuild_st.rs_obj_nr, self.objcount)) if (pool.pool_info.pi_rebuild_st.rs_rec_nr != (self.reccount*self.objcount)): self.fail("Rebuilt recs not as expected: {0} {1}" .format(pool.pool_info.pi_rebuild_st.rs_rec_nr, self.reccount*self.objcount)) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") except DaosApiError as excp: print (excp) print (traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class ObjectDataValidation(avocado.Test): """ Test Class Description: Tests that create Different length records, Disconnect the pool/container and reconnect, validate the data after reconnect. """ # pylint: disable=too-many-instance-attributes def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def reconnect(self): ''' Function to reconnect the pool/container and reopen the Object for read verification. ''' #Close the Obj/Container, Disconnect the Pool. self.obj.close() self.container.close() self.pool.disconnect() time.sleep(5) #Connect Pool, Open Container and Object self.pool.connect(2) self.container.open() self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4) @avocado.fail_on(DaosApiError) def test_single_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify single data after pool/container disconnect/reconnect. :avocado: tags=single_object,data_verification,medium,vm """ self.d_log.info("Writing the Single Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_value = ctypes.create_string_buffer(indata) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) new_transaction = self.container.get_new_tx() self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Single Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata)+1) if indata != (repr(val.value)[1:-1]): self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}" .format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 @avocado.fail_on(DaosApiError) def test_array_object_validation(self): """ Test ID: DAOS-707 Test Description: Write Avocado Test to verify Array data after pool/container disconnect/reconnect. :avocado: tags=array_object,data_verification,array,medium,vm """ self.d_log.info("Writing the Array Dataset") record_index = 0 transaction = [] for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): c_values = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): c_values.append((ctypes.create_string_buffer(value), len(value)+1)) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) new_transaction = self.container.get_new_tx() self.ioreq.insert_array(c_dkey, c_akey, c_values, new_transaction) self.container.commit_tx(new_transaction) transaction.append(new_transaction) record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0 self.reconnect() self.d_log.info("Array Dataset Verification -- Started") record_index = 0 transaction_index = 0 for dkey in range(self.no_of_dkeys): for akey in range(self.no_of_akeys): indata = [] value = ("{0}".format(str(akey)[0]) * self.record_length[record_index]) for item in range(self.array_size): indata.append(value) c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey)) c_akey = ctypes.create_string_buffer("akey {0}".format(akey)) c_rec_count = ctypes.c_uint(len(indata)) c_rec_size = ctypes.c_size_t(len(indata[0]) + 1) outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count, c_rec_size) for item in enumerate(indata): if indata[item[0]] != outdata[item[0]][:-1]: self.d_log.error("ERROR:Data mismatch for " "dkey = {0}, " "akey = {1}".format( "dkey {0}".format(dkey), "akey {0}".format(akey))) self.fail("ERROR:Data mismatch for dkey = {0}, akey={1}" .format("dkey {0}".format(dkey), "akey {0}".format(akey))) transaction_index = transaction_index + 1 record_index = record_index + 1 if record_index == len(self.record_length): record_index = 0
class NvmeIo(avocado.Test): """ Test Class Description: Test the general Metadata operations and boundary conditions. """ def setUp(self): self.pool = None self.hostlist = None self.hostfile_clients = None self.hostfile = None self.out_queue = None self.pool_connect = False with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("servers", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) #Start Server server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.pool_connect: self.pool.disconnect() self.pool.destroy(1) finally: server_utils.stop_server(hosts=self.hostlist) def verify_pool_size(self, original_pool_info, ior_args): """ Function is to validate the pool size original_pool_info: Pool info prior to IOR ior_args: IOR args to calculate the file size """ #Get the current pool size for comparison current_pool_info = self.pool.pool_query() #if Transfer size is < 4K, Pool size will verified against NVMe, else #it will be checked against SCM if ior_args['stripe_size'] >= 4096: print("Size is > 4K,Size verification will be done with NVMe size") storage_index = 1 else: print("Size is < 4K,Size verification will be done with SCM size") storage_index = 0 free_pool_size = ( original_pool_info.pi_space.ps_space.s_free[storage_index] - current_pool_info.pi_space.ps_space.s_free[storage_index]) obj_multiplier = 1 replica_number = re.findall(r'\d+', "ior_args['object_class']") if replica_number: obj_multiplier = int(replica_number[0]) expected_pool_size = (ior_args['slots'] * ior_args['block_size'] * obj_multiplier) if free_pool_size < expected_pool_size: raise DaosTestError( 'Pool Free Size did not match Actual = {} Expected = {}' .format(free_pool_size, expected_pool_size)) @avocado.fail_on(DaosApiError) def test_nvme_io(self): """ Test ID: DAOS-2082 Test Description: Test will run IOR with standard and non standard sizes.IOR will be run for all Object type supported. Purpose is to verify pool size (SCM and NVMe) for IOR file. This test is running multiple IOR on same server start instance. :avocado: tags=nvme,nvme_io,large """ ior_args = {} hostlist_clients = self.params.get("clients", '/run/hosts/*') tests = self.params.get("ior_sequence", '/run/ior/*') object_type = self.params.get("object_type", '/run/ior/*') #Loop for every IOR object type for obj_type in object_type: for ior_param in tests: self.hostfile_clients = write_host_file.write_host_file( hostlist_clients, self.workdir, ior_param[4]) #There is an issue with NVMe if Transfer size>64M, Skipped this #sizes for now if ior_param[2] > 67108864: print ("Xfersize > 64M getting failed, DAOS-1264") continue self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), ior_param[0], self.params.get("setname", '/run/pool/createset/*'), nvme_size=ior_param[1]) self.pool.connect(1 << 1) self.pool_connect = True createsvc = self.params.get("svcn", '/run/pool/createsvc/') svc_list = "" for i in range(createsvc): svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":" svc_list = svc_list[:-1] ior_args['client_hostfile'] = self.hostfile_clients ior_args['pool_uuid'] = self.pool.get_uuid_str() ior_args['svc_list'] = svc_list ior_args['basepath'] = self.basepath ior_args['server_group'] = self.server_group ior_args['tmp_dir'] = self.workdir ior_args['iorflags'] = self.params.get("iorflags", '/run/ior/*') ior_args['iteration'] = self.params.get("iteration", '/run/ior/*') ior_args['stripe_size'] = ior_param[2] ior_args['block_size'] = ior_param[3] ior_args['stripe_count'] = self.params.get("stripecount", '/run/ior/*') ior_args['async_io'] = self.params.get("asyncio", '/run/ior/*') ior_args['object_class'] = obj_type ior_args['slots'] = ior_param[4] #IOR is going to use the same --daos.stripeSize, #--daos.recordSize and Transfer size. try: size_before_ior = self.pool.pool_query() ior_utils.run_ior(ior_args['client_hostfile'], ior_args['iorflags'], ior_args['iteration'], ior_args['block_size'], ior_args['stripe_size'], ior_args['pool_uuid'], ior_args['svc_list'], ior_args['stripe_size'], ior_args['stripe_size'], ior_args['stripe_count'], ior_args['async_io'], ior_args['object_class'], ior_args['basepath'], ior_args['slots'], filename=str(uuid.uuid4()), display_output=True) self.verify_pool_size(size_before_ior, ior_args) except ior_utils.IorFailed as exe: print (exe) print (traceback.format_exc()) self.fail() try: if self.pool_connect: self.pool.disconnect() self.pool_connect = False if self.pool: self.pool.destroy(1) except DaosApiError as exe: print (exe) self.fail("Failed to Destroy/Disconnect the Pool")
class BasicSnapshot(Test): """ DAOS-1370 Basic snapshot test Test Class Description: Test that a snapshot taken of a container remains unchaged even after an object in the container has been updated 500 times. Create the container. Write an object to the container. Take a snapshot. Write 500 changes to the KV pair of the object. Check that the snapshot is still there. Confirm that the data in the snapshot is unchanged. Destroy the snapshot """ def __init__(self, *args, **kwargs): super(BasicSnapshot, self).__init__(*args, **kwargs) self.snapshot = None def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as finput: build_paths = json.load(finput) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) server_utils.run_server(hostfile, server_group, basepath) # Set up the pool and container. try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/*') createuid = os.geteuid() creategid = os.getegid() # initialize a pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as error: print(error) print(traceback.format_exc()) self.fail("Test failed before snapshot taken") def tearDown(self): try: if self.container: self.container.close() self.container.destroy() if self.pool: self.pool.disconnect() self.pool.destroy(1) finally: server_utils.stop_server() def test_basic_snapshot(self): """ Test ID: DAOS-1370 Test Description: Create a pool, container in the pool, object in the container, add one key:value to the object. Commit the transaction. Perform a snapshot create on the container. Create 500 additional transactions with a small change to the object in each and commit each after the object update is done. Verify the snapshot is still available and the contents remain in their original state. :avocado: tags=snapshot,basicsnap """ try: # create an object and write some data into it obj_cls = self.params.get("obj_class", '/run/object_class/*') thedata = "Now is the winter of our discontent made glorious" datasize = len(thedata) + 1 dkey = "dkey" akey = "akey" obj, epoch = self.container.write_an_obj(thedata, datasize, dkey, akey, obj_cls=obj_cls) obj.close() # Take a snapshot of the container self.snapshot = DaosSnapshot(self.context) self.snapshot.create(self.container.coh, epoch) print("Wrote an object and created a snapshot") except DaosApiError as error: self.fail("Test failed during the initial object write.\n{0}" .format(error)) # Make 500 changes to the data object. The write_an_obj function does a # commit when the update is complete try: rand_str = lambda n: ''.join([random.choice(string.lowercase) for i in xrange(n)]) print("Committing 500 additional transactions to the same KV") more_transactions = 500 while more_transactions: size = random.randint(1, 250) + 1 new_data = rand_str(size) new_obj, _ = self.container.write_an_obj(new_data, size, dkey, akey, obj_cls=obj_cls) new_obj.close() more_transactions -= 1 except Exception as error: self.fail("Test failed during the write of 500 objects.\n{0}" .format(error)) # List the snapshot and make sure it reflects the original epoch try: reported_epoch = self.snapshot.list(self.container.coh) if self.snapshot.epoch != reported_epoch: raise Exception("The snapshot epoch returned from snapshot " "list is not the same as the original epoch " "snapshotted.") print("After 500 additional commits the snapshot is still " "available") except Exception as error: self.fail("Test was unable to list the snapshot\n{0}" .format(error)) # Make sure the data in the snapshot is the original data. # Get a handle for the snapshot and read the object at dkey, akey. # Compare it to the originally written data. try: obj.open() snap_handle = self.snapshot.open(self.container.coh) thedata2 = self.container.read_an_obj(datasize, dkey, akey, obj, snap_handle.value) if thedata2.value != thedata: raise Exception("The data in the snapshot is not the same as " "the original data") print("The snapshot data matches the data originally written.") except Exception as error: self.fail("Error when retrieving the snapshot data.\n{0}" .format(error)) # Now destroy the snapshot try: self.snapshot.destroy(self.container.coh) print("Snapshot successfully destroyed") except Exception as error: self.fail("{0}".format(error))
class ObjOpenBadParam(Test): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. """ def __init__(self, *args, **kwargs): """ Initialize values for variables that are used in tearDown() such that if setUp() fails for any reason, tearDown() will avoid throwing an AttributeError exception. """ super(ObjOpenBadParam, self).__init__(*args, **kwargs) self.container = None self.pool = None def setUp(self): self.agent_sessions = None self.hostlist = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def tearDown(self): try: self.container.close() self.container.destroy() self.pool.disconnect() self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=object,objopen,objopenbadhand,regression,vm,small """ saved_handle = self.obj.obj_handle self.obj.obj_handle = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=object,objopen,objopenbadconthand,regression,vm,small """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=object,objopen,objopenclosedcont,regression,vm,small """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=object,objopen,objopenpoolhandle,regression,vm,small """ saved_oh = self.obj.obj_handle self.obj.obj_handle = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=object,objopen,objopennullrl,regression,vm,small """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=object,objopen,objopennulloid,regression,vm,small """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=object,objopen,objopennulltgts,regression,vm,small """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=object,objopen,objopennullattr,regression,vm,small """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=container,conthandle,vm,small,regression """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a pool global handle iov_len, buf_len, buf = pool.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_pool_handle = ( sharedctypes.RawValue(IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)) # create a container container = DaosContainer(self.context) container.create(pool.handle) container.open() # create a container global handle iov_len, buf_len, buf = container.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_cont_handle = ( sharedctypes.RawValue(IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)) sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid) # this should work in the future but need on-line server addition #arg_list = ( #p = Process(target=check_handle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case check_handle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class PoolSvc(Test): """ Tests svc argument while pool create. """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group",'/server/','daos_server') self.daosctl = self.basepath + '/install/bin/daosctl' # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL = None self.hostfile = None self.hostlist = self.params.get("test_machines",'/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) print("Host file is: {}".format(self.hostfile)) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(5) def tearDown(self): try: if self.hostfile is not None: os.remove(self.hostfile) if self.POOL is not None and self.POOL.attached: self.POOL.destroy(1) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=pool,svc """ # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createsvc = self.params.get("svc",'/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.POOL.connect(1 << 1) # checking returned rank list value for single server if ((len(self.hostlist) == 1) and (int(self.POOL.svc.rl_ranks[i] != 0))): self.fail("Incorrect returned rank list value for single server") # checking returned rank list for server more than 1 i = 0 while ((int(self.POOL.svc.rl_ranks[i]) > 0) and \ (int(self.POOL.svc.rl_ranks[i]) <= createsvc[0]) and \ (int(self.POOL.svc.rl_ranks[i]) != 999999)): i +=1 if i != createsvc[0]: self.fail("Length of Returned Rank list is not equal to" \ " the number of Pool Service members.\n") list = [] for j in range(createsvc[0]): list.append(int(self.POOL.svc.rl_ranks[j])) if len(list) != len(set(list)): self.fail("Duplicate values in returned rank list") if (createsvc[0] == 3): self.POOL.disconnect() cmd = ('{0} kill-leader --uuid={1}' .format(self.daosctl, self.POOL.get_uuid_str())) process.system(cmd) self.POOL.connect(1 << 1) self.POOL.disconnect() server = DaosServer(self.Context, self.server_group, 2) server.kill(1) self.POOL.exclude([2]) self.POOL.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: print(e) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_connect(self): """ Pass bad parameters to pool connect :avocado: tags=pool,poolconnect,badparam,badconnect """ # parameters used in pool create createmode = self.params.get("mode", '/run/connecttests/createmode/') createuid = self.params.get("uid", '/run/connecttests/uids/createuid/') creategid = self.params.get("gid", '/run/connecttests/gids/creategid/') createsetid = self.params.get("setname", '/run/connecttests/setnames/createset/') createsize = self.params.get("size", '/run/connecttests/psize/createsize/') # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] modelist = self.params.get("mode", '/run/connecttests/connectmode/*/') connectmode = modelist[0] expected_for_param.append(modelist[1]) svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/') svc = svclist[0] expected_for_param.append(svclist[1]) setlist = self.params.get("setname", '/run/connecttests/connectsetnames/*/') connectset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/') connectuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break puuid = (ctypes.c_ubyte * 16)() psvc = RankList() pgroup = ctypes.create_string_buffer(0) pool = None try: # setup the DAOS python API with open('../../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # save this uuid since we might trash it as part of the test ctypes.memmove(puuid, pool.uuid, 16) # trash the the pool service rank list psvc.rl_ranks = pool.svc.rl_ranks psvc.rl_nr = pool.svc.rl_nr if not svc == 'VALID': rl_ranks = ctypes.POINTER(ctypes.c_uint)() pool.svc = RankList(rl_ranks, 1) # trash the pool group value pgroup = pool.group if connectset == 'NULLPTR': pool.group = None # trash the UUID value in various ways if connectuuid == 'NULLPTR': pool.uuid = None if connectuuid == 'JUNK': pool.uuid[4] = 244 pool.connect(connectmode) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") # cleanup the pool finally: if pool is not None and pool.attached == 1: # restore values in case we trashed them during test pool.svc.rl_ranks = psvc.rl_ranks pool.svc.rl_nr = psvc.rl_nr pool.group = pgroup ctypes.memmove(pool.uuid, puuid, 16) print("pool uuid after restore {}".format( pool.get_uuid_str())) pool.destroy(1)
class PunchTest(Test): """ Simple test to verify the 3 different punch calls. """ def setUp(self): try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile( self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.Context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) if self.hostfile is not None: os.remove(self.hostfile) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: ServerUtils.stopServer(hosts=self.hostlist) def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, epoch = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" # note using the same keys so writing to the same spot obj, epoch2 = self.container.write_an_obj(thedata3, len(thedata3) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata4 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if thedata3 != thedata4.value: print("data I wrote:" + thedata3) print("data I read back" + thedata4.value) self.fail("wrote in new epoch, read it back, didn't match\n") # the original data should still be there too thedata5 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata5.value: self.fail("original data isn't there any more\n") # repeat, so there will be 3 epochs thedata6 = "a really different string" # note using the same keys so writing to the same spot obj, epoch3 = self.container.write_an_obj(thedata6, len(thedata6) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata7 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if thedata6 != thedata7.value: print("data I wrote:" + thedata6) print("data I read back" + thedata7.value) self.fail("wrote in new epoch, read it back, didn't match\n") # now punch the data from the middle epoch obj.punch_dkeys(epoch2, [dkey]) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") try: # read the data from the middle epoch thedata8 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if len(thedata8.value) is not 0: print("data8: {} {}", thedata8.value, len(thedata8.value)) self.fail("punch from middle epoch didn't work") # read the data from the last epoch thedata9 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if len(thedata9.value) is not 0: print("data9: {} {}", thedata9.value, len(thedata9.value)) self.fail("after punch data in the last epoch should be gone") # lastly check the first epoch thedata10 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata10.value: self.fail( "Epoch preceeding the punch should still have data\n") except DaosApiError as e: print(e) self.fail("Test failed.\n") def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, epoch1 = self.container.write_multi_akeys(dkey, data1) # do this again, note that the epoch has been advanced by # the write_multi_akeys function data2 = [("this is akey 1", "this is data value 4"), ("this is akey 2", "this is data value 5"), ("this is akey 3", "this is data value 6")] obj, epoch2 = self.container.write_multi_akeys(dkey, data2, obj) # do this again, note that the epoch has been advanced by # the write_multi_akeys function data3 = [("this is akey 1", "this is data value 7"), ("this is akey 2", "this is data value 8"), ("this is akey 3", "this is data value 9")] obj, epoch3 = self.container.write_multi_akeys(dkey, data3, obj) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, epoch1) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey, 1st epoch {}".format( retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from the middle epoch print("punching: {}".format([data2[1][0]])) obj.punch_akeys(epoch2, dkey, [data2[1][0]]) # verify its gone from the epoch where it was punched readbuf = [(data2[1][0], len(data2[1][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, epoch2) if len(retrieved_data[data2[1][0]]) != 0: print("retrieved: {}".format(retrieved_data)) print("retrieved punched data but it was still there") self.fail("punched data still present") except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") @avocado.skip('Currently this test fails') def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, epoch = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # repeat above, but know that the write_an_obj call is advancing # the epoch so the original copy remains and the new copy is in # a new epoch. thedata3 = "a different string" # note using the same keys so writing to the same spot obj, epoch2 = self.container.write_an_obj(thedata3, len(thedata3) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata4 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if thedata3 != thedata4.value: print("data I wrote:" + thedata3) print("data I read back" + thedata4.value) self.fail("wrote in new epoch, read it back, didn't match\n") # the original data should still be there too thedata5 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata5.value: self.fail("original data isn't there any more\n") # repeat, so there will be 3 epochs thedata6 = "a really different string" # note using the same keys so writing to the same spot obj, epoch3 = self.container.write_an_obj(thedata6, len(thedata6) + 1, dkey, akey, obj) # read the data back and make sure its correct thedata7 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if thedata6 != thedata7.value: print("data I wrote:" + thedata6) print("data I read back" + thedata7.value) self.fail("wrote in new epoch, read it back, didn't match\n") # now punch the object from the middle epoch obj.punch(epoch2) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed.\n") try: # read the data from the middle epoch, should be gone thedata8 = self.container.read_an_obj( len(thedata3) + 1, dkey, akey, obj, epoch2) if len(thedata8.value) is not 0: print("data8: {} {}", thedata8.value, len(thedata8.value)) self.fail("punch from middle epoch didn't work") except DaosApiError as e: print(e) self.fail("READ FROM DELETED OBJECT FAILED.\n") try: # read the data from the last epoch thedata9 = self.container.read_an_obj( len(thedata6) + 1, dkey, akey, obj, epoch3) if len(thedata9.value) is not 0: print("data9: {} {}", thedata8.value, len(thedata8.value)) self.fail("after punch data in the last epoch should be gone") except DaosApiError as e: print(e) self.fail("READ FROM DELETED OBJECT FAILED.\n") try: # lastly check the first epoch, this one should still be there thedata10 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, epoch) if thedata != thedata10.value: self.fail( "Epoch preceeding the punch should still have data\n") except DaosApiError as e: print(e) self.fail("Test failed.\n")
class ContainerAttributeTest(Test): """ Tests DAOS container attribute get/set/list. """ def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.hostlist = None self.large_data_set = {} with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/attrtests/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/attrtests/createsize/*'), self.params.get("setname", '/run/attrtests/createset/*'), None) self.pool.connect(1 << 1) poh = self.pool.handle self.container = DaosContainer(self.context) self.container.create(poh) self.container.open() def tearDown(self): try: if self.container: self.container.close() finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def create_data_set(self): """ To create the large attribute dictionary """ allchar = string.ascii_letters + string.digits for i in range(1024): self.large_data_set[str(i)] = ( "".join(random.choice(allchar) for x in range(random.randint(1, 100)))) def test_container_attribute(self): """ Test basic container attribute tests. :avocado: tags=container,container_attr,attribute,sync_conattribute """ expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) attr_dict = {name[0]:value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set attr_dict[name[0]] = value[0] expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.container.set_attr(data=attr_dict) size, buf = self.container.list_attr() verify_list_attr(attr_dict, size, buf) # Request something that doesn't exist if name[0] is not None and "Negative" in name[0]: name[0] = "rubbish" results = {} results = self.container.get_attr([name[0]]) # for this test the dictionary has been altered, need to just # set it to what we are expecting to get back if name[0] is not None: if "largenumberofattr" in name[0]: attr_dict.clear() attr_dict[name[0]] = value[0] verify_get_attr(attr_dict, results) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except (DaosApiError, DaosTestError) as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") def test_container_attribute_asyn(self): """ Test basic container attribute tests. :avocado: tags=container,container_attr,attribute,async_conattribute """ global GLOB_SIGNAL global GLOB_RC expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) attr_dict = {name[0]:value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set attr_dict[name[0]] = value[0] expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: GLOB_SIGNAL = threading.Event() self.container.set_attr(data=attr_dict, cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail("RC not as expected after set_attr First {0}" .format(GLOB_RC)) GLOB_SIGNAL = threading.Event() size, buf = self.container.list_attr(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail("RC not as expected after list_attr First {0}" .format(GLOB_RC)) if expected_result in ['PASS']: verify_list_attr(attr_dict, size, buf, mode="async") # Request something that doesn't exist if name[0] is not None and "Negative" in name[0]: name[0] = "rubbish" GLOB_SIGNAL = threading.Event() self.container.get_attr([name[0]], cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail("RC not as expected after get_attr {0}" .format(GLOB_RC)) # not verifying the get_attr since its not available asynchronously if value[0] != None: if GLOB_RC == 0 and expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_multipool_rebuild(self): """ Test ID: Rebuild-002 Test Description: Expand on the basic test by rebuilding 2 pools at once. Use Cases: -- multipool rebuild, single client, various object and record counds :avocado: tags=pool,rebuild,rebuildmulti """ # the rebuild tests need to redo this stuff each time so not in setup # as it usually would be setid = self.params.get("setname", '/run/testparams/setnames/') server_group = self.params.get("server_group", '/server/', 'daos_server') basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../") tmp = self.build_paths['PREFIX'] + '/tmp' self.hostlist = self.params.get("test_machines", '/run/hosts/') hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) try: ServerUtils.runServer(hostfile, server_group, basepath) # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize python pool object then create the underlying # daos storage, the way the code is now the pools should be # on the same storage and have the same service leader pool1 = DaosPool(self.Context) pool2 = DaosPool(self.Context) pool1.create(createmode, createuid, creategid, createsize, createsetid, None) pool2.create(createmode, createuid, creategid, createsize, createsetid, None) # want an open connection during rebuild pool1.connect(1 << 1) pool2.connect(1 << 1) # create containers container1 = DaosContainer(self.Context) container1.create(pool1.handle) container2 = DaosContainer(self.Context) container2.create(pool2.handle) # now open them container1.open() container2.open() # how many objects and records are we creating objcount = self.params.get("objcount", '/run/testparams/numobjects/*') reccount = self.params.get("reccount", '/run/testparams/numrecords/*') if objcount == 0: reccount = 0 # which rank to write to and kill rank = self.params.get("rank", '/run/testparams/ranks/*') # how much data to write with each key size = self.params.get("size", '/run/testparams/datasize/') # Putting the same data in both pools, at least for now to simplify # checking its correct saved_data = [] for i in range(0, objcount): obj = None for j in range(0, reccount): # make some stuff up and write dkey = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) akey = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) data = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(size)) obj, tx = container1.write_an_obj(data, len(data), dkey, akey, obj, rank) obj, tx = container2.write_an_obj(data, len(data), dkey, akey, obj, rank) saved_data.append((obj, dkey, akey, data, tx)) # read the data back and make sure its correct # containers data2 = container1.read_an_obj(size, dkey, akey, obj, tx) if data != data2.value: self.fail( "Wrote data P1, read it back, didn't match\n") # containers data2 = container2.read_an_obj(size, dkey, akey, obj, tx) if data != data2.value: self.fail( "Wrote data P2, read it back, didn't match\n") # kill a server server = DaosServer(self.Context, server_group, rank) server.kill(1) # temporarily, the exclude of a failed target must be done # manually pool1.exclude([rank]) pool2.exclude([rank]) # check that rebuild finishes, no errors, progress data as # know it to be. Check pool 1 first then we'll check 2 below. while True: pool1.pool_query() if pool1.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check there are no errors and other data matches what we # apriori know to be true, if pool1.pool_info.pi_ndisabled != 1: self.fail( "P1 number disabled targets reporting incorrectly: {}". format(pool1.pool_info.pi_ndisabled)) if pool1.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("P1 rebuild error reported: {}".format( pool1.pool_info.pi_rebuild_st.rs_errno)) if pool1.pool_info.pi_rebuild_st.rs_obj_nr != objcount: self.fail("P1 rebuilt objs not as expected: {0} {1}".format( pool1.pool_info.pi_rebuild_st.rs_obj_nr, objcount)) if pool1.pool_info.pi_rebuild_st.rs_rec_nr != (reccount * objcount): self.fail("P1 rebuilt recs not as expected: {0} {1}".format( pool1.pool_info.pi_rebuild_st.rs_rec_nr, reccount * objcount)) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container1.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") # now check the other pool while True: pool2.pool_query() if pool2.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check there are no errors and other data matches what we # apriori know to be true if pool2.pool_info.pi_ndisabled != 1: self.fail( "Number disabled targets reporting incorrectly: {}".format( pool2.pool_info.pi_ndisabled)) if pool2.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}".format( pool2.pool_info.pi_rebuild_st.rs_errno)) if pool2.pool_info.pi_rebuild_st.rs_obj_nr != objcount: self.fail("Rebuilt objs not as expected: {0} {1}".format( pool2.pool_info.pi_rebuild_st.rs_obj_nr, objcount)) if pool2.pool_info.pi_rebuild_st.rs_rec_nr != (reccount * objcount): self.fail("Rebuilt recs not as expected: {0} {1}".format( pool2.pool_info.pi_rebuild_st.rs_rec_nr, (reccount * objcount))) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container2.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n") finally: ServerUtils.stopServer(hosts=self.hostlist) os.remove(hostfile) CheckForPool.CleanupPools(self.hostlist) ServerUtils.killServer(self.hostlist)
class ContainerAsync(Test): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def setUp(self): self.agent_sessions = None self.hostlist = None self.pool = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) print("Host file is: {}".format(self.hostfile)) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) time.sleep(10) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) time.sleep(5) server_utils.stop_server(hosts=self.hostlist) def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=container,containerasync,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=container,containerasync,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=container,containerasync,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.close() self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=container,containerasync,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=container,containerasync,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
class OpenClose(Test): """ Tests DAOS container open/close function with handle parameter. """ def setUp(self): # these are first since they are referenced in teardown self.pool = None self.hostlist = None self.hostlist = self.params.get("test_servers", '/run/hosts/') # get paths from the build_vars generated by build with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../../.build_vars.json')) as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(5) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: ServerUtils.stopServer(hosts=self.hostlist) except ServerFailed as e: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.Container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.Container1.coh if coh_params[0] == 'GOOD': coh = self.Container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.Container2 = DaosContainer(self.Context) self.Container2.create(poh) self.Container2.open(poh, cuuid, 2, None) coh = self.Container2.coh self.Container2.close() self.Container2.destroy() # close container with either good or bad handle self.Container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: if expected_result == 'PASS': print(e) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.Container1.close(saved_coh) finally: self.Container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
class RebuildNoCap(Test): """ Test Class Description: This class contains tests for pool rebuild. :avocado: tags=pool,rebuild,nocap """ def setUp(self): """ setup for the test """ self.agent_sessions = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") # generate a hostfile self.hostlist = self.params.get("test_machines", '/run/hosts/') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) # fire up the DAOS servers self.server_group = self.params.get("name", '/run/server_config/', 'daos_server') self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, build_paths['PREFIX'] + '/../') # create a pool to test with createmode = self.params.get("mode", '/run/pool/createmode/') createuid = self.params.get("uid", '/run/pool/createuid/') creategid = self.params.get("gid", '/run/pool/creategid/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid) uuid = self.pool.get_uuid_str() time.sleep(2) # stuff some bogus data into the pool how_many_bytes = long(self.params.get("datasize", '/run/testparams/datatowrite/')) exepath = os.path.join(build_paths['PREFIX'], "/../src/tests/ftest/util/write_some_data.py") cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\ " --np 1 --host {1} {2} {3} testfile".format( uuid, self.hostlist[0], exepath, how_many_bytes) subprocess.call(cmd, shell=True) def tearDown(self): """ cleanup after the test """ try: os.remove(self.hostfile) if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_rebuild_no_capacity(self): """ :avocado: tags=pool,rebuild,nocap """ try: print("\nsetup complete, starting test\n") # create a server object that references on of our pool target hosts # and then kill it svr_to_kill = int(self.params.get("rank_to_kill", '/run/testparams/ranks/')) d_server = DaosServer(self.context, bytes(self.server_group), svr_to_kill) time.sleep(1) d_server.kill(1) # exclude the target from the dead server self.pool.exclude([svr_to_kill]) # exclude should trigger rebuild, check self.pool.connect(1 << 1) status = self.pool.pool_query() if not status.pi_ntargets == len(self.hostlist): self.fail("target count wrong.\n") if not status.pi_ndisabled == 1: self.fail("disabled target count wrong.\n") # the pool should be too full to start a rebuild so # expecting an error # not sure yet specifically what error if status.pi_rebuild_st.rs_errno == 0: self.fail("expecting rebuild to fail but it didn't.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=object,arrayobj,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() print("uid is {} gid is {}".format(createuid, creategid)) # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.Context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pl.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.Context) container.create(pool.handle) self.pl.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid, epoch = container.write_an_array_value(thedata, dkey, akey) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length + 1, dkey, akey, oid, epoch) if thedata[0][0:length - 1] != thedata2[0][0:length - 1]: self.pl.error("Data mismatch") self.pl.error("Wrote: >%s<" (thedata[0])) self.pl.error("Read: >%s<" (thedata2[0])) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length - 1] != thedata2[2][0:length - 1]: self.pl.error("Data mismatch") self.pl.error("Wrote: >%s<" (thedata[2])) self.pl.error("Read: >%s<" (thedata2[2])) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() # cleanup the pool pool.disconnect() pool.destroy(1) self.pl.info("Test Complete") except ValueError as e: self.pl.error("Test Failed, exception was thrown.") print e print traceback.format_exc() self.fail("Test was expected to pass but it failed.\n")
class OpenContainerTest(Test): """ Tests DAOS container bad create (non existing pool handle, bad uuid) and close. :avocado: tags=container,containeropen """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool1 = None self.pool2 = None self.container1 = None self.container2 = None self.hostfile = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) # common parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # pool 1 UID GID self.createuid1 = self.params.get("uid", '/run/createtests/createuid1/') self.creategid1 = self.params.get("gid", '/run/createtests/creategid1/') # pool 2 UID GID self.createuid2 = self.params.get("uid", '/run/createtests/createuid2/') self.creategid2 = self.params.get("gid", '/run/createtests/creategid2/') self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.container1 is not None: self.container1.destroy() if self.container2 is not None: self.container2.destroy() if self.pool1 is not None and self.pool1.attached: self.pool1.destroy(1) if self.pool2 is not None and self.pool2.attached: self.pool2.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_container_open(self): """ Test basic container bad create. :avocado: tags=container,containeropen """ container_uuid = None expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/') container_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/handles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # create two pools and try to create containers in these pools self.pool1 = DaosPool(self.context) self.pool1.create(self.createmode, self.createuid1, self.creategid1, self.createsize, self.createsetid, None) self.pool2 = DaosPool(self.context) self.pool2.create(self.createmode, self.createuid2, self.creategid2, self.createsize, None, None) # Connect to the pools self.pool1.connect(1 << 1) self.pool2.connect(1 << 1) # defines pool handle for container open if pohlist[0] == 'pool1': poh = self.pool1.handle else: poh = self.pool2.handle # Create a container in pool1 self.container1 = DaosContainer(self.context) self.container1.create(self.pool1.handle) # defines test UUID for container open if uuidlist[0] == 'pool1': struuid = self.container1.get_uuid_str() container_uuid = uuid.UUID(struuid) else: if uuidlist[0] == 'MFUUID': container_uuid = "misformed-uuid-0000" else: container_uuid = uuid.uuid4() # random uuid # tries to open the container1 # open should be ok only if poh = pool1.handle && # containerUUID = container1.uuid self.container1.open(poh, container_uuid) # wait a few seconds and then destroy containers time.sleep(5) self.container1.close() self.container1.destroy() self.container1 = None # cleanup the pools self.pool1.disconnect() self.pool1.destroy(1) self.pool1 = None self.pool2.disconnect() self.pool2.destroy(1) self.pool2 = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if self.hostfile is not None: os.remove(self.hostfile)
class ContainerAsync(TestWithServers): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: recursive """ def __init__(self, *args, **kwargs): super(ContainerAsync, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None self.pool = None def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=all,small,full_regression,container,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) GLOB_SIGNAL = threading.Event() self.container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.pool.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after unsuccessful container create: ", GLOB_RC) # cleanup the pool and container self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=all,small,full_regression,container,contdestroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) GLOB_SIGNAL = threading.Event() self.container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=all,small,full_regression,container,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) GLOB_SIGNAL = threading.Event() self.container1.open(poh, cuuid, 2, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.open(None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test") print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.close() self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=all,small,full_regression,container,closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2) GLOB_SIGNAL = threading.Event() self.container1.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.close(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc()) def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=all,small,full_regression,container,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container2 = DaosContainer(self.context) self.container1.create(poh) dummy_str_cuuid = self.container1.get_uuid_str() # Open container self.container1.open(poh, None, 2, None) GLOB_SIGNAL = threading.Event() self.container1.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after successful container create: ", GLOB_RC) # Close opened container self.container1.close() # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.container2.query(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC == 0: self.fail("RC not as expected in async test: " "{0}".format(GLOB_RC)) print("RC after container destroy failed:", GLOB_RC) # cleanup the pool and container self.container1.destroy() self.pool.disconnect() self.pool.destroy(1) self.pool = None except DaosApiError as excep: print(excep) print(traceback.format_exc())
class PoolAttributeTest(Test): """ Test class Description: Tests DAOS pool attribute get/set/list. """ def setUp(self): try: self.pool = None self.hostlist = None with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) server_utils.run_server(self.hostfile, server_group, basepath) createmode = self.params.get("mode", '/run/attrtests/createmode/') createuid = os.geteuid() creategid = os.getgid() createsetid = self.params.get("setname", '/run/attrtests/createset/') createsize = self.params.get("size", '/run/attrtests/createsize/') self.pool = DaosPool(context) self.pool.create(createmode, createuid, creategid, createsize, createsetid) self.pool.connect(1 << 1) self.large_data_set = {} except DaosApiError as excep: print("In the setup exception handler\n") print(excep) print(traceback.format_exc()) def tearDown(self): try: if self.pool is not None: self.pool.disconnect() self.pool.destroy(1) finally: server_utils.stop_server(hosts=self.hostlist) def create_data_set(self): """ To create the large attribute dictionary """ allchar = string.ascii_letters + string.digits for i in range(1024): self.large_data_set[str(i)] = ( "".join(random.choice(allchar) for x in range(random.randint(1, 100)))) def test_pool_attributes(self): """ Test ID: DAOS-1359 Test description: Test basic pool attribute tests (sync). :avocado: tags=regression,pool,pool_attr,attribute,sync_poolattribute """ expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') if value[0] is None: self.cancel("skipping these tests until DAOS-2170 is fixed") expected_for_param.append(value[1]) attr_dict = {name[0]:value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.pool.set_attr(data=attr_dict) size, buf = self.pool.list_attr() verify_list_attr(attr_dict, size.value, buf) if name[0] is not None: # Request something that doesn't exist if "Negative" in name[0]: name[0] = "rubbish" # large attr test messes with the dictionary so skip # the get test if "largenumberofattr" not in name[0]: results = {} results = self.pool.get_attr([name[0]]) verify_get_attr(attr_dict, results) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") def test_pool_attribute_asyn(self): """ Test ID: DAOS-1359 Test description: Test basic pool attribute tests (async). :avocado: tags=regression,pool,pool_attr,attribute,async_poolattribute """ global GLOB_SIGNAL global GLOB_RC expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') # workaround until async functions are fixed if name[0] is not None and "Negative" in name[0]: pass else: expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') if value[0] is None: self.cancel("skipping this test until DAOS-2170 is fixed") expected_for_param.append(value[1]) attr_dict = {name[0]:value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: GLOB_SIGNAL = threading.Event() self.pool.set_attr(attr_dict, None, cb_func) GLOB_SIGNAL.wait() if expected_result == 'PASS' and GLOB_RC != 0: self.fail("RC not as expected after set_attr {0}" .format(GLOB_RC)) if expected_result == 'FAIL' and GLOB_RC == 0: self.fail("RC not as expected after set_attr {0}" .format(GLOB_RC)) except DaosApiError as excep: print (excep) print (traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_rebuild_with_io(self): """ Test ID: Rebuild-003 Test Description: Trigger a rebuild while I/O is ongoing. Use Cases: -- single pool, single client performing continous read/write/verify sequence while failure/rebuild is triggered in another process :avocado: tags=pool,rebuild,rebuildwithio """ # the rebuild tests need to redo this stuff each time so not in setup # as it usually would be server_group = self.params.get("name", '/server_config/', 'daos_server') self.hostlist_servers = self.params.get("test_machines", '/run/hosts/') hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.workdir) try: self.agent_sessions = agent_utils.run_agent(self.basepath, self.hostlist_servers) server_utils.run_server(hostfile_servers, server_group, self.basepath) # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) container = DaosContainer(self.context) container.create(pool.handle) container.open() # get pool status and make sure it all looks good before we start pool.pool_query() if pool.pool_info.pi_ndisabled != 0: self.fail("Number of disabled targets reporting incorrectly.\n") if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error but rebuild hasn't run.\n") if pool.pool_info.pi_rebuild_st.rs_done != 1: self.fail("Rebuild is running but device hasn't failed yet.\n") if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0: self.fail("Rebuilt objs not zero.\n") if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0: self.fail("Rebuilt recs not zero.\n") dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version # do I/O for 30 seconds dummy_bw = io_utilities.continuous_io(container, 30) # trigger the rebuild rank = self.params.get("rank", '/run/testparams/ranks/*') server = DaosServer(self.context, server_group, rank) server.kill(1) pool.exclude([rank]) # do another 30 seconds of I/O, # waiting for some improvements in server bootstrap # at which point we can move the I/O to a separate client and # really pound it with I/O dummy_bw = io_utilities.continuous_io(container, 30) # wait for the rebuild to finish while True: pool.pool_query() if pool.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check rebuild statistics if pool.pool_info.pi_ndisabled != 1: self.fail("Number of disabled targets reporting incorrectly: {}" .format(pool.pool_info.pi_ndisabled)) if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}".format( pool.pool_info.pi_rebuild_st.rs_errno)) if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0: self.fail("No objects have been rebuilt.") if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0: self.fail("No records have been rebuilt.") except (ValueError, DaosApiError) as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n") finally: # wait for the I/O process to finish try: server_utils.stop_server(hosts=self.hostlist_servers) os.remove(hostfile_servers) # really make sure everything is gone check_for_pool.cleanup_pools(self.hostlist_servers) finally: if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.kill_server(self.hostlist_servers)
class DeleteContainerTest(Test): """ Tests DAOS container delete and close. """ def setUp(self): self.agent_sessions = None # get paths from the build_vars generated by build self.hostlist = None with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # parameters used in pool create self.createmode = self.params.get("mode", '/run/createtests/createmode/') self.createuid = os.geteuid() self.creategid = os.getegid() self.createsetid = self.params.get("setname", '/run/createtests/createset/') self.createsize = self.params.get("size", '/run/createtests/createsize/') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = None self.container = None # hostlist and logging self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_container_delete(self): """ Test basic container delete :avocado: tags=regression,cont,vm,contdelete """ expected_for_param = [] uuidlist = self.params.get("uuid", '/run/createtests/ContainerUUIDS/*/') cont_uuid = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) openlist = self.params.get("opened", "/run/createtests/ConnectionOpened/*/") opened = openlist[0] expected_for_param.append(openlist[1]) forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/") force = forcelist[0] expected_for_param.append(forcelist[1]) if force >= 1: self.cancel("Force >= 1 blocked by issue described in " "https://jira.hpdd.intel.com/browse/DAOS-689") if force == 0: self.cancel("Force = 0 blocked by " "https://jira.hpdd.intel.com/browse/DAOS-1935") expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(self.createmode, self.createuid, self.creategid, self.createsize, self.createsetid, None) # need a connection to create container self.pool.connect(1 << 1) self.container = DaosContainer(self.context) # create should always work (testing destroy) if not cont_uuid == 'INVALID': cont_uuid = uuid.UUID(uuidlist[0]) self.container.create(self.pool.handle, cont_uuid) else: self.container.create(self.pool.handle) # Opens the container if required if opened: self.container.open(self.pool.handle) # wait a few seconds and then attempts to destroy container time.sleep(5) if poh == 'VALID': poh = self.pool.handle # if container is INVALID, overwrite with non existing UUID if cont_uuid == 'INVALID': cont_uuid = uuid.uuid4() self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid) self.container = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: self.d_log.error(excep) self.d_log.error(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: # clean up the pool if self.pool is not None: self.pool.destroy(1) self.pool = None
class PoolSvc(TestWithServers): """ Tests svc argument while pool create. :avocado: recursive """ def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: super(PoolSvc, self).tearDown() def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=pool,svc """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svc", '/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.pool.connect(1 << 1) # checking returned rank list for server more than 1 iterator = 0 while (int(self.pool.svc.rl_ranks[iterator]) > 0 and int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and int(self.pool.svc.rl_ranks[iterator]) != 999999): iterator += 1 if iterator != createsvc[0]: self.fail("Length of Returned Rank list is not equal to " "the number of Pool Service members.\n") rank_list = [] for iterator in range(createsvc[0]): rank_list.append(int(self.pool.svc.rl_ranks[iterator])) if len(rank_list) != len(set(rank_list)): self.fail("Duplicate values in returned rank list") self.pool.pool_query() leader = self.pool.pool_info.pi_leader if createsvc[0] == 3: # kill pool leader and exclude it self.pool.pool_svc_stop() self.pool.exclude([leader]) # perform pool disconnect, try connect again and disconnect self.pool.disconnect() self.pool.connect(1 << 1) self.pool.disconnect() # kill another server which is not a leader and exclude it server = DaosServer(self.context, self.server_group, leader - 1) server.kill(1) self.pool.exclude([leader - 1]) # perform pool connect self.pool.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
class PoolSvc(Test): """ Tests svc argument while pool create. """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') self.daosctl = self.basepath + '/install/bin/daosctl' # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = None self.hostfile = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) print("Host file is: {}".format(self.hostfile)) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=pool,svc """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svc", '/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.pool.connect(1 << 1) # checking returned rank list for server more than 1 i = 0 while ( int(self.pool.svc.rl_ranks[i]) > 0 and int(self.pool.svc.rl_ranks[i]) <= createsvc[0] and int(self.pool.svc.rl_ranks[i]) != 999999 ): i += 1 if i != createsvc[0]: self.fail("Length of Returned Rank list is not equal to " "the number of Pool Service members.\n") rank_list = [] for j in range(createsvc[0]): rank_list.append(int(self.pool.svc.rl_ranks[j])) if len(rank_list) != len(set(rank_list)): self.fail("Duplicate values in returned rank list") if createsvc[0] == 3: self.pool.disconnect() cmd = ('{0} kill-leader --uuid={1}' .format(self.daosctl, self.pool.get_uuid_str())) process.system(cmd) self.pool.connect(1 << 1) self.pool.disconnect() server = DaosServer(self.context, self.server_group, 2) server.kill(1) self.pool.exclude([2]) self.pool.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
class ObjOpenBadParam(TestWithServers): """ Test Class Description: Pass an assortment of bad parameters to the daos_obj_open function. :avocado: recursive """ def setUp(self): super(ObjOpenBadParam, self).setUp() try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.") def test_bad_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open a garbage object handle. :avocado: tags=all,object,full_regression,tiny,objopenbadhandle """ saved_handle = self.obj.obj_handle self.obj.obj_handle = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_handle def test_invalid_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object with a garbage container handle. :avocado: tags=all,object,full_regression,tiny,objopenbadcont """ saved_coh = self.container.coh self.container.coh = 8675309 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.coh = saved_coh def test_closed_container_handle(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with a closed handle. :avocado: tags=all,object,full_regression,tiny,objopenclosedcont """ self.container.close() try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.container.open() def test_pool_handle_as_obj_handle(self): """ Test ID: DAOS-1320 Test Description: Adding this test by request, this test attempts to open an object that's had its handle set to be the same as a valid pool handle. :avocado: tags=all,object,full_regression,tiny,objopenbadpool """ saved_oh = self.obj.obj_handle self.obj.obj_handle = self.pool.handle try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1002' not in str(excep): self.d_log.error("test expected a -1002 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1002 but did not get it") finally: self.obj.obj_handle = saved_oh def test_null_ranklist(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with an empty ranklist. :avocado: tags=all,object,full_regression,tiny,objopennullrl """ # null rl saved_rl = self.obj.tgt_rank_list self.obj.tgt_rank_list = None try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.tgt_rank_list = saved_rl def test_null_oid(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object id. :avocado: tags=all,object,full_regression,tiny,objopennulloid """ # null oid saved_oid = self.obj.c_oid self.obj.c_oid = DaosObjId(0, 0) try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_oid = saved_oid def test_null_tgts(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null tgt. :avocado: tags=all,object,full_regression,tiny,objopennulltgts """ # null tgts saved_ctgts = self.obj.c_tgts self.obj.c_tgts = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("Test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.c_tgts = saved_ctgts def test_null_attrs(self): """ Test ID: DAOS-1320 Test Description: Attempt to open an object in a container with null object attributes. :avocado: tags=all,object,full_regression,tiny,objopennullattr """ # null attr saved_attr = self.obj.attr self.obj.attr = 0 try: dummy_obj = self.obj.open() except DaosApiError as excep: if '-1003' not in str(excep): self.d_log.error("test expected a -1003 but did not get it") self.d_log.error(traceback.format_exc()) self.fail("test expected a -1003 but did not get it") finally: self.obj.attr = saved_attr
def test_destroy_withdata(self): """ Test destroy and recreate one right after the other multiple times Should fail. :avocado: tags=pool,pooldestroy,destroydata """ try: # write out a hostfile and start the servers with it self.hostlist = self.params.get("test_machines1", '/run/hosts/') hostfile = write_host_file.write_host_file(self.hostlist, self.tmp) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) pool.disconnect() daosctl = self.basepath + '/install/bin/daosctl' write_cmd = ('{0} write-pattern -i {1} -l 0 -c {2} -p sequential'. format(daosctl, c_uuid_to_str(pool.uuid), c_uuid_to_str(container.uuid))) process.system_output(write_cmd) # blow it away pool.destroy(1) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("create/destroy/create/destroy test failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely. Error: %s" % excep) # no matter what happens cleanup finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) os.remove(hostfile)
def test_array_obj(self): """ Test ID: DAOS-961 Test Description: Writes an array to an object and then reads it back and verifies it. :avocado: tags=object,arrayobj,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool_params/createmode/') createsetid = self.params.get("setname", '/run/pool_params/createset/') createsize = self.params.get("size", '/run/pool_params/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info\n") # create an object and write some data into it thedata = [] thedata.append("data string one") thedata.append("data string two") thedata.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey) oid, epoch = container.write_an_array_value(thedata, dkey, akey, obj_cls=3) # read the data back and make sure its correct length = len(thedata[0]) thedata2 = container.read_an_array(len(thedata), length+1, dkey, akey, oid, epoch) if thedata[0][0:length-1] != thedata2[0][0:length-1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[0]) self.plog.error("Read: >%s<", thedata2[0]) self.fail("Write data, read it back, didn't match\n") if thedata[2][0:length-1] != thedata2[2][0:length-1]: self.plog.error("Data mismatch") self.plog.error("Wrote: >%s<", thedata[2]) self.plog.error("Read: >%s<", thedata2[2]) self.fail("Write data, read it back, didn't match\n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() # cleanup the pool pool.disconnect() pool.destroy(1) self.plog.info("Test Complete") except DaosApiError as excep: self.plog.error("Test Failed, exception was thrown.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
class ObjectMetadata(avocado.Test): """ Test Class Description: Test the general Metadata operations and boundary conditions. """ def setUp(self): self.agent_sessions = None self.pool = None self.hostlist = None self.hostfile_clients = None self.hostfile = None self.out_queue = None self.pool_connect = True with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("servers", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.hostlist_clients = self.params.get("clients", '/run/hosts/*') self.hostfile_clients = ( write_host_file.write_host_file(hostlist_clients, self.workdir)) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist, self.hostlist_clients) server_utils.run_server(self.hostfile, self.server_group, self.basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), nvme_size=self.params.get("size", '/run/pool/nvmesize/*')) def tearDown(self): try: if self.pool_connect: self.pool.disconnect() if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist_clients, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.") def test_metadata_fillup(self): """ Test ID: DAOS-1512 Test Description: Test to verify no IO happens after metadata is full. :avocado: tags=metadata,metadata_fill,nvme,small """ self.pool.connect(2) container = DaosContainer(self.context) self.d_log.debug("Fillup Metadata....") for _cont in range(NO_OF_MAX_CONTAINER): container.create(self.pool.handle) self.d_log.debug("Metadata Overload...") #This should fail with no Metadata space Error. try: for _cont in range(250): container.create(self.pool.handle) except DaosApiError as exe: print (exe, traceback.format_exc()) return self.fail("Test was expected to fail but it passed.\n") @avocado.skip("Skipping until DAOS-1965 is fixed.") @avocado.fail_on(DaosApiError) def test_metadata_addremove(self): """ Test ID: DAOS-1512 Test Description: Verify metadata release the space after container delete. :avocado: tags=metadata,metadata_free_space,nvme,small """ self.pool.connect(2) for k in range(10): container_array = [] self.d_log.debug("Container Create Iteration {}".format(k)) for cont in range(NO_OF_MAX_CONTAINER): container = DaosContainer(self.context) container.create(self.pool.handle) container_array.append(container) self.d_log.debug("Container Remove Iteration {} ".format(k)) for cont in container_array: cont.destroy() def thread_control(self, threads, operation): """ Start threads and wait till all threads execution is finished. It check queue for "FAIL" message and fail the avocado test. """ self.d_log.debug("IOR {0} Threads Started -----".format(operation)) for thrd in threads: thrd.start() for thrd in threads: thrd.join() while not self.out_queue.empty(): if self.out_queue.get() == "FAIL": return "FAIL" self.d_log.debug("IOR {0} Threads Finished -----".format(operation)) return "PASS" @avocado.fail_on(DaosApiError) def test_metadata_server_restart(self): """ Test ID: DAOS-1512 Test Description: This test will verify 2000 IOR small size container after server restart. Test will write IOR in 5 different threads for faster execution time. Each thread will create 400 (8bytes) containers to the same pool. Restart the servers, read IOR container file written previously and validate data integrity by using IOR option "-R -G 1". :avocado: tags=metadata,metadata_ior,nvme,small """ self.pool_connect = False files_per_thread = 400 total_ior_threads = 5 threads = [] ior_args = {} createsvc = self.params.get("svcn", '/run/pool/createsvc/') svc_list = "" for i in range(createsvc): svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":" svc_list = svc_list[:-1] ior_args['client_hostfile'] = self.hostfile_clients ior_args['pool_uuid'] = self.pool.get_uuid_str() ior_args['svc_list'] = svc_list ior_args['basepath'] = self.basepath ior_args['server_group'] = self.server_group ior_args['tmp_dir'] = self.workdir ior_args['iorwriteflags'] = self.params.get("F", '/run/ior/iorwriteflags/') ior_args['iorreadflags'] = self.params.get("F", '/run/ior/iorreadflags/') ior_args['iteration'] = self.params.get("iter", '/run/ior/iteration/') ior_args['stripe_size'] = self.params.get("s", '/run/ior/stripesize/*') ior_args['stripe_count'] = self.params.get("c", '/run/ior/stripecount/') ior_args['async_io'] = self.params.get("a", '/run/ior/asyncio/') ior_args['object_class'] = self.params.get("o", '/run/ior/objectclass/') ior_args['slots'] = self.params.get("slots", '/run/ior/clientslots/*') ior_args['files_per_thread'] = files_per_thread self.out_queue = Queue.Queue() #IOR write threads for i in range(total_ior_threads): threads.append(threading.Thread(target=ior_runner_thread, args=(self.out_queue, "Thread-{}".format(i), "write"), kwargs=ior_args)) if self.thread_control(threads, "write") == "FAIL": self.d_log.error(" IOR write Thread FAIL") self.fail(" IOR write Thread FAIL") #Server Restart if self.agent_sessions: AgentUtils.stop_agent(self.hostlist_clients, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist_clients, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) #Read IOR with verification with same number of threads threads = [] for i in range(total_ior_threads): threads.append(threading.Thread(target=ior_runner_thread, args=(self.out_queue, "Thread-{}".format(i), "read"), kwargs=ior_args)) if self.thread_control(threads, "read") == "FAIL": self.d_log.error(" IOR write Thread FAIL") self.fail(" IOR read Thread FAIL")
class RebuildNoCap(Test): """ Test Class Description: This class contains tests for pool rebuild. :avocado: tags=pool,rebuild,nocap """ build_paths = [] server_group = "" CONTEXT = None POOL = None hostfile = "" def setUp(self): """ setup for the test """ # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/') # generate a hostfile self.host_list = self.params.get("test_machines", '/run/hosts/') tmp = build_paths['PREFIX'] + '/tmp' self.hostfile = WriteHostFile.WriteHostFile(self.host_list, tmp) # fire up the DAOS servers self.server_group = self.params.get("server_group", '/run/server/', 'daos_server') ServerUtils.runServer(self.hostfile, self.server_group, build_paths['PREFIX'] + '/../') time.sleep(3) # create a pool to test with createmode = self.params.get("mode", '/run/pool/createmode/') createuid = self.params.get("uid", '/run/pool/createuid/') creategid = self.params.get("gid", '/run/pool/creategid/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.POOL = DaosPool(self.CONTEXT) self.POOL.create(createmode, createuid, creategid, createsize, createsetid) uuid = self.POOL.get_uuid_str() time.sleep(2) # stuff some bogus data into the pool how_many_bytes = long( self.params.get("datasize", '/run/testparams/datatowrite/')) exepath = build_paths['PREFIX'] +\ "/../src/tests/ftest/util/WriteSomeData.py" cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\ " --np 1 --host {1} {2} {3} testfile".format( uuid, self.host_list[0], exepath, how_many_bytes) subprocess.call(cmd, shell=True) def tearDown(self): """ cleanup after the test """ os.remove(self.hostfile) self.POOL.destroy(1) ServerUtils.stopServer() def test_rebuild_no_capacity(self): """ :avocado: tags=pool,rebuild,nocap """ try: print "\nsetup complete, starting test\n" # create a server object that references on of our pool target hosts # and then kill it svr_to_kill = int( self.params.get("rank_to_kill", '/run/testparams/ranks/')) sh = DaosServer(self.CONTEXT, bytes(self.server_group), svr_to_kill) time.sleep(1) sh.kill(1) # exclude the target from the dead server self.POOL.exclude([svr_to_kill]) # exclude should trigger rebuild, check self.POOL.connect(1 << 1) status = self.POOL.pool_query() if not status.pi_ntargets == len(self.host_list): self.fail("target count wrong.\n") if not status.pi_ndisabled == 1: self.fail("disabled target count wrong.\n") # the pool should be too full to start a rebuild so # expecting an error # not sure yet specifically what error if status.pi_rebuild_st[2] == 0: self.fail("expecting rebuild to fail but it didn't.\n") except ValueError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class SameKeyDifferentValue(TestWithServers): """ Test Description: Test to verify different type of values passed to same akey and dkey. :avocado: recursive """ def setUp(self): try: super(SameKeyDifferentValue, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: super(SameKeyDifferentValue, self).tearDown() def test_single_to_array_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e. single to array value) to the same akey and dkey. Case1: Insert akey,dkey with single value Insert same akey,dkey with array value Result: should return -1001 ERR. Case2: Insert akey,dkey with single value Punch the keys Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with single value Punch the keys Trigger aggregation Insert same akey,dkey under same object with array value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,singletoarray,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write single value data into it obj, txn = self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct read_back_data = self.container.read_an_obj( len(single_value_data) + 1, dkey, akey, obj, txn) if single_value_data != read_back_data.value: print("data I wrote:" + single_value_data) print("data I read back" + read_back_data.value) self.fail("Write data, read it back, didn't match\n") # test case 1 if i == 0: try: # write array value data to same keys, expected to fail self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Array value write to existing single value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with array value type self.container.write_an_array_value(array_value_data, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object") def test_array_to_single_value(self): """ Jira ID: DAOS-2218 Test Description: Test to verify different type of values passed (i.e array to single value) to the same akey and dkey. Case1: Insert akey,dkey with array value Insert same akey,dkey with single value Result: should return -1001 ERR. Case2: Insert akey,dkey with array value Punch the keys Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR Case3: Insert akey,dkey with array value Punch the keys Trigger aggregation Insert same akey,dkey under same object with single value Result: should either pass or return -1001 ERR :avocado: tags=object,samekeydifferentvalue,arraytosingle,vm,small """ # define akey,dkey, single value data and array value data single_value_data = "a string that I want to stuff into an object" array_value_data = [] array_value_data.append("data string one") array_value_data.append("data string two") array_value_data.append("data string tre") dkey = "this is the dkey" akey = "this is the akey" aggregation = False for i in range(3): try: # create an object and write array value data into it obj, txn = self.container.write_an_array_value( array_value_data, dkey, akey, obj_cls=1) # read the data back and make sure its correct length = len(array_value_data[0]) read_back_data = self.container.read_an_array( len(array_value_data), length + 1, dkey, akey, obj, txn) for j in range(3): if (array_value_data[j][0:length - 1] != read_back_data[j][0:length - 1]): print("Written Data: {}".format(array_value_data[j])) print("Read Data: {}".format(read_back_data[j])) self.fail("Data mismatch\n") # test case 1 if i == 0: try: # write single value data to same keys, expected to fail self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above line is expected to return an error, # if not fail the test self.fail( "Single value write to existing array value" + " key should have failed\n") # should fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message, but it did not\n") # test case 2 and 3 elif i == 1 or 2: try: # punch the keys obj.punch_akeys(0, dkey, [akey]) obj.punch_dkeys(0, [dkey]) if aggregation is True: # trigger aggregation self.container.aggregate(self.container.coh, 0) # write to the same set of keys under same object # with single value type self.container.write_an_obj(single_value_data, len(single_value_data) + 1, dkey, akey, obj, obj_cls=1) # above write of array value should either succeed # or fail with -1001 ERR except DaosApiError as excp: if "-1001" not in str(excp): print(excp) self.fail("Should have failed with -1001 error" + " message or the write should have" + " been successful, but it did not\n") # change the value of aggregation to test Test Case 3 aggregation = True # punch the entire object after each iteration obj.close() # catch the exception if test fails to write to an object # or fails to punch the written object except DaosApiError as excp: self.fail("Failed to write to akey/dkey or punch the object")
class ContainerAsync(Test): """ Tests DAOS pool connect permissions (non existing pool handle, bad uuid) and close. :avocado: tags=container,containercreate2,connectpermission """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group",'/server/','daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL = None hostlist = self.params.get("test_machines",'/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) print("Host file is: {}".format(self.hostfile)) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) time.sleep(10) def tearDown(self): if self.hostfile is not None: os.remove(self.hostfile) if self.POOL is not None and self.POOL.attached: self.POOL.destroy(1) ServerUtils.stopServer() def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=container,containerasync,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) GLOB_SIGNAL = threading.Event() self.Container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.POOL.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1005: self.fail("RC not as expected in async test") print ("RC after Container create failed:", GLOB_RC) # cleanup the Pool and Container self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_destroyasync(self): """ Test container destroy for asynchronous mode. :avocado: tags=container,containerasync,destroyasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) GLOB_SIGNAL = threading.Event() self.Container1.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to destroy container again, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.destroy(1, poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_openasync(self): """ Test container open for asynchronous mode. :avocado: tags=container,containerasync,openasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh GLOB_SIGNAL = threading.Event() self.Container1.open(poh, cuuid, 2, coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.open(None, None, None, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1003: self.fail("RC not as expected in async test") print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.close() self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_closeasync(self): """ Test container close for asynchronous mode. :avocado: tags=container,containerasync.closeasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh self.Container1.open(poh, cuuid, 2, coh) GLOB_SIGNAL = threading.Event() self.Container1.close(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after successful Container create: " , GLOB_RC) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.close(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1002: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() def test_queryasync(self): """ Test container query for asynchronous mode. :avocado: tags=container,containerasync,queryasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) self.Container1.create(poh) str_cuuid = self.Container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) coh = self.Container1.coh # Open Container self.Container1.open(poh, None, 2, None, coh) GLOB_SIGNAL = threading.Event() self.Container1.query(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after successful Container create: " , GLOB_RC) # Close opened Container self.Container1.close(coh) # Try to open container2, this should fail, as non-existent. # Checking rc after failure. GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.query(coh, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1002: self.fail("RC not as expected in async test:{0}".format(GLOB_RC)) print ("RC after Container destroy failed:", GLOB_RC) # cleanup the Pool and Container self.Container1.destroy() self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc()
class FullPoolContainerCreate(TestWithServers): """ Class for test to create a container in a pool with no remaining free space. :avocado: recursive """ def __init__(self, *args, **kwargs): super(FullPoolContainerCreate, self).__init__(*args, **kwargs) self.cont = None self.cont2 = None @skipForTicket("DAOS-3142") def test_no_space_cont_create(self): """ :avocado: tags=all,container,tiny,full_regression,fullpoolcontcreate """ # full storage rc err = "-1007" # probably should be -1007, revisit later err2 = "-1009" # create pool self.pool = DaosPool(self.context) mode = self.params.get("mode", '/conttests/createmode/') self.d_log.debug("mode is {0}".format(mode)) uid = os.geteuid() gid = os.getegid() # 16 mb pool, minimum size currently possible size = 16777216 self.d_log.debug("creating pool") self.pool.create(mode, uid, gid, size, self.server_group, None) self.d_log.debug("created pool") # connect to the pool self.d_log.debug("connecting to pool") self.pool.connect(1 << 1) self.d_log.debug("connected to pool") # query the pool self.d_log.debug("querying pool info") dummy_pool_info = self.pool.pool_query() self.d_log.debug("queried pool info") # create a container try: self.d_log.debug("creating container") self.cont = DaosContainer(self.context) self.cont.create(self.pool.handle) self.d_log.debug("created container") except DaosApiError as excep: self.d_log.error("caught exception creating container: " "{0}".format(excep)) self.fail("caught exception creating container: {0}".format(excep)) self.d_log.debug("opening container") self.cont.open() self.d_log.debug("opened container") # generate random dkey, akey each time # write 1mb until no space, then 1kb, etc. to fill pool quickly for obj_sz in [1048576, 10240, 10, 1]: write_count = 0 while True: self.d_log.debug("writing obj {0}, sz {1} to " "container".format(write_count, obj_sz)) my_str = "a" * obj_sz my_str_sz = obj_sz dkey = (''.join( random.choice(string.lowercase) for i in range(5))) akey = (''.join( random.choice(string.lowercase) for i in range(5))) try: dummy_oid, dummy_tx = self.cont.write_an_obj( my_str, my_str_sz, dkey, akey, obj_cls="OC_SX") self.d_log.debug("wrote obj {0}, sz {1}".format( write_count, obj_sz)) write_count += 1 except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught exception while writing " "object: {0}".format(repr(excep))) self.fail("caught exception while writing object: {0}". format(repr(excep))) else: self.d_log.debug("pool is too full for {0} byte " "objects".format(obj_sz)) break self.d_log.debug("closing container") self.cont.close() self.d_log.debug("closed container") # create a 2nd container now that pool is full try: self.d_log.debug("creating 2nd container") self.cont2 = DaosContainer(self.context) self.cont2.create(self.pool.handle) self.d_log.debug("created 2nd container") self.d_log.debug("opening container 2") self.cont2.open() self.d_log.debug("opened container 2") self.d_log.debug("writing one more object, write expected to fail") self.cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls="OC_SX") self.fail("wrote one more object after pool was completely filled," " this should never print") except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught unexpected exception while " "writing object: {0}".format(repr(excep))) self.fail("caught unexpected exception while writing " "object: {0}".format(repr(excep))) else: self.d_log.debug("correctly caught -1007 while attempting " "to write object in full pool")
def test_simple_rebuild(self): """ Test ID: Rebuild-001 Test Description: The most basic rebuild test. Use Cases: -- single pool rebuild, single client, various reord/object counts :avocado: tags=pool,rebuild,rebuildsimple """ # the rebuild tests need to redo this stuff each time so not in setup # as it usually would be setid = self.params.get("setname", '/run/testparams/setnames/') server_group = self.params.get("server_group", '/server/', 'daos_server') basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../") tmp = self.build_paths['PREFIX'] + '/tmp' self.hostlist = self.params.get("test_machines", '/run/hosts/') hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) try: ServerUtils.runServer(hostfile, server_group, basepath) # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.Context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # want an open connection during rebuild pool.connect(1 << 1) # get pool status we want to test later pool.pool_query() if pool.pool_info.pi_ndisabled != 0: self.fail( "Number of disabled targets reporting incorrectly.\n") if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error but rebuild hasn't run.\n") if pool.pool_info.pi_rebuild_st.rs_done != 1: self.fail("Rebuild is running but device hasn't failed yet.\n") if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0: self.fail("Rebuilt objs not zero.\n") if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0: self.fail("Rebuilt recs not zero.\n") pool_version = pool.pool_info.pi_rebuild_st.rs_version # create a container container = DaosContainer(self.Context) container.create(pool.handle) # now open it container.open() # how many objects and records are we creating objcount = self.params.get("objcount", '/run/testparams/numobjects/*') reccount = self.params.get("reccount", '/run/testparams/numrecords/*') if objcount == 0: reccount = 0 # which rank to write to and kill rank = self.params.get("rank", '/run/testparams/ranks/*') # how much data to write with each key size = self.params.get("size", '/run/testparams/datasize/') saved_data = [] for i in range(0, objcount): obj = None for j in range(0, reccount): # make some stuff up and write dkey = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) akey = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(5)) data = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(size)) obj, tx = container.write_an_obj(data, len(data), dkey, akey, obj, rank) saved_data.append((obj, dkey, akey, data, tx)) # read the data back and make sure its correct data2 = container.read_an_obj(size, dkey, akey, obj, tx) if data != data2.value: self.fail("Write data 1, read it back, didn't match\n") # kill a server that has server = DaosServer(self.Context, server_group, rank) server.kill(1) # temporarily, the exclude of a failed target must be done # manually pool.exclude([rank]) while True: # get the pool/rebuild status again pool.pool_query() if pool.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) if pool.pool_info.pi_ndisabled != 1: self.fail( "Number of disabled targets reporting incorrectly: {}". format(pool.pool_info.pi_ndisabled)) if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}".format( pool.pool_info.pi_rebuild_st.rs_errno)) if pool.pool_info.pi_rebuild_st.rs_obj_nr != objcount: self.fail("Rebuilt objs not as expected: {0} {1}".format( pool.pool_info.pi_rebuild_st.rs_obj_nr, objcount)) if pool.pool_info.pi_rebuild_st.rs_rec_nr != (reccount * objcount): self.fail("Rebuilt recs not as expected: {0} {1}".format( pool.pool_info.pi_rebuild_st.rs_rec_nr, reccount * objcount)) # now that the rebuild finished verify the records are correct for tup in saved_data: data2 = container.read_an_obj(len(tup[3]), tup[1], tup[2], tup[0], tup[4]) if tup[3] != data2.value: self.fail("after rebuild data didn't check out") except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n") finally: try: ServerUtils.stopServer(hosts=self.hostlist) os.remove(hostfile) # really make sure everything is gone CheckForPool.CleanupPools(self.hostlist) finally: ServerUtils.killServer(self.hostlist)
class PunchTest(TestWithServers): """ Simple test to verify the 3 different punch calls. :avocado: recursive """ def setUp(self): try: super(PunchTest, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n") def tearDown(self): try: if self.container: self.container.close() # wait a few seconds and then destroy time.sleep(5) if self.container: self.container.destroy() # cleanup the pool if self.pool: self.pool.disconnect() self.pool.destroy(1) except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during teardown.\n") finally: super(PunchTest, self).tearDown() def test_dkey_punch(self): """ The most basic test of the dkey punch function. :avocado: tags=object,punch,dkeypunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch this data, should fail, can't punch committed data obj.punch_dkeys(txn, [dkey]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as dummy_e: pass try: # now punch this data obj.punch_dkeys(0, [dkey]) # this one should work so error if exception occurs except DaosApiError as dummy_e: self.fail("Punch should have worked.\n") # there are a bunch of other cases to test here, # --test punching the same updating and punching the same data in # the same tx, should fail # --test non updated data in an open tx, should work def test_akey_punch(self): """ The most basic test of the akey punch function. :avocado: tags=object,punch,akeypunch,regression,vm,small """ try: # create an object and write some data into it dkey = "this is the dkey" data1 = [("this is akey 1", "this is data value 1"), ("this is akey 2", "this is data value 2"), ("this is akey 3", "this is data value 3")] obj, txn = self.container.write_multi_akeys(dkey, data1, obj_cls=1) # read back the 1st epoch's data and check 1 value just to make sure # everything is on the up and up readbuf = [(data1[0][0], len(data1[0][1]) + 1), (data1[1][0], len(data1[1][1]) + 1), (data1[2][0], len(data1[2][1]) + 1)] retrieved_data = self.container.read_multi_akeys( dkey, readbuf, obj, txn) if retrieved_data[data1[1][0]] != data1[1][1]: print("middle akey: {}".format(retrieved_data[data1[1][0]])) self.fail("data retrieval failure") # now punch one akey from this data obj.punch_akeys(txn, dkey, [data1[1][0]]) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: # now punch the object without a tx obj.punch_akeys(0, dkey, [data1[1][0]]) # expecting it to work this time so error except DaosApiError as excep: self.fail("Punch should have worked.\n") def test_obj_punch(self): """ The most basic test of the object punch function. Really similar to above except the whole object is deleted. :avocado: tags=object,punch,objpunch,regression,vm,small """ try: # create an object and write some data into it thedata = "a string that I want to stuff into an object" dkey = "this is the dkey" akey = "this is the akey" obj, txn = self.container.write_an_obj(thedata, len(thedata) + 1, dkey, akey, obj_cls=1) # read the data back and make sure its correct thedata2 = self.container.read_an_obj( len(thedata) + 1, dkey, akey, obj, txn) if thedata != thedata2.value: print("data I wrote:" + thedata) print("data I read back" + thedata2.value) self.fail("Wrote data, read it back, didn't match\n") # now punch the object, commited so not expecting it to work obj.punch(txn) # expecting punch of commit data above to fail self.fail("Punch should have failed but it didn't.\n") # expecting an exception so do nothing except DaosApiError as excep: print(excep) try: obj.punch(0) # expecting it to work without a tx except DaosApiError as excep: print(excep) self.fail("Punch should have worked.\n")
class DestroyRebuild(Test): """ Test Class Description: This test verifies destruction of a pool that is rebuilding. :avocado: tags=pool,pooldestroy,rebuild,desreb """ build_paths = [] server_group = "" CONTEXT = None POOL = None hostfile = "" def setUp(self): """ setup for the test """ # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/') # generate a hostfile self.hostlist = self.params.get("test_machines",'/run/hosts/') tmp = build_paths['PREFIX'] + '/tmp' self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) # fire up the DAOS servers self.server_group = self.params.get("server_group",'/run/server/', 'daos_server') ServerUtils.runServer(self.hostfile, self.server_group, build_paths['PREFIX'] + '/../') time.sleep(3) # create a pool to test with createmode = self.params.get("mode",'/run/pool/createmode/') createuid = self.params.get("uid",'/run/pool/createuid/') creategid = self.params.get("gid",'/run/pool/creategid/') createsetid = self.params.get("setname",'/run/pool/createset/') createsize = self.params.get("size",'/run/pool/createsize/') self.POOL = DaosPool(self.CONTEXT) self.POOL.create(createmode, createuid, creategid, createsize, createsetid) uuid = self.POOL.get_uuid_str() time.sleep(2) def tearDown(self): """ cleanup after the test """ try: os.remove(self.hostfile) if self.POOL: self.POOL.destroy(1) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_destroy_while_rebuilding(self): """ :avocado: tags=pool,pooldestroy,rebuild,desreb """ try: print "\nsetup complete, starting test\n" # create a server object that references on of our pool target hosts # and then kill it svr_to_kill = int(self.params.get("rank_to_kill", '/run/testparams/ranks/')) sh = DaosServer(self.CONTEXT, bytes(self.server_group), svr_to_kill) print "created server " # BUG if you don't connect the rebuild doesn't start correctly self.POOL.connect(1 << 1) status = self.POOL.pool_query() if not status.pi_ntargets == len(self.hostlist): self.fail("target count wrong.\n") if not status.pi_ndisabled == 0: self.fail("disabled target count wrong.\n") print "connect " time.sleep(1) sh.kill(1) print "killed server " # exclude the target from the dead server self.POOL.exclude([svr_to_kill]) print "exclude target " #self.POOL.disconnect() #print "disconnect " # the rebuild won't take long since there is no data so do # the destroy quickly self.POOL.destroy(1) print "destroy " except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=container,conthandle,vm,small,regression """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.Context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a pool global handle iov_len, buf_len, buf = pool.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_pool_handle = sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len) # create a container container = DaosContainer(self.Context) container.create(pool.handle) container.open() # create a container global handle iov_len, buf_len, buf = container.local2global() buftype = ctypes.c_byte * buf_len c_buf = buftype.from_buffer(buf) sct_cont_handle = sharedctypes.RawValue( IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len) sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid) # this should work in the future but need on-line server addition #arg_list = ( #p = Process(target=CheckHandle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class InfoTests(Test): """ Tests DAOS pool query. """ def setUp(self): # get paths from the build_vars generated by build with open( os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../.build_vars.json")) as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') context = DaosContext(build_paths['PREFIX'] + '/lib/') self.pool = DaosPool(context) self.d_log = DaosLog(context) self.hostlist = self.params.get("test_machines1", '/run/hosts/') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) def tearDown(self): # shut 'er down try: if self.pool: self.pool.destroy(1) os.remove(self.hostfile) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_simple_query(self): """ Test querying a pool created on a single server. :avocado: tags=pool,poolquery,infotest """ # create pool mode = self.params.get("mode", '/run/testparams/modes/*', 0731) if mode == 73: self.cancel('Cancel the mode test 73 because of DAOS-1877') uid = os.geteuid() gid = os.getegid() size = self.params.get("size", '/run/testparams/sizes/*', 0) group = self.server_group self.pool.create(mode, uid, gid, size, group, None) # connect to the pool flags = self.params.get("perms", '/run/testparams/connectperms/*', '') connect_flags = 1 << flags self.pool.connect(connect_flags) # query the pool pool_info = self.pool.pool_query() # check uuid uuid_str = c_uuid_to_str(pool_info.pi_uuid) if uuid_str != self.pool.get_uuid_str(): self.d_log.error("UUID str does not match expected string") self.fail("UUID str does not match expected string") ''' # validate size of pool is what we expect This check is currently disabled, as space is not implemented in DAOS C API yet. if size != pool_info.pi_space: self.d_log.error("expected size {0} did not match actual size {1}" .format(size, pool_info.pi_space)) self.fail("expected size {0} did not match actual size {1}" .format(size, pool_info.pi_space)) ''' # number of targets if pool_info.pi_ntargets != len(self.hostlist): self.d_log.error("found number of targets in pool did not match " "expected number, 1. num targets: {0}".format( pool_info.pi_ntargets)) self.fail("found number of targets in pool did not match " "expected number, 1. num targets: {0}".format( pool_info.pi_ntargets)) # number of disabled targets if pool_info.pi_ndisabled > 0: self.d_log.error("found disabled targets, none expected to be") self.fail("found disabled targets, none expected to be disabled") # mode if pool_info.pi_mode != mode: self.d_log.error( "found different mode than expected. expected {0}, " "found {1}.".format(mode, pool_info.pi_mode)) self.fail("found different mode than expected. expected {0}, " "found {1}.".format(mode, pool_info.pi_mode)) # uid if pool_info.pi_uid != uid: self.d_log.error( "found actual pool uid {0} does not match expected " "uid {1}".format(pool_info.pi_uid, uid)) self.fail("found actual pool uid {0} does not match expected uid " "{1}".format(pool_info.pi_uid, uid)) # gid if pool_info.pi_gid != gid: self.d_log.error( "found actual pool gid {0} does not match expected " "gid {1}".format(pool_info.pi_gid, gid)) self.fail("found actual pool gid {0} does not match expected gid " "{1}".format(pool_info.pi_gid, gid))
class DestroyRebuild(Test): """ Test Class Description: This test verifies destruction of a pool that is rebuilding. :avocado: tags=pool,pooldestroy,rebuild,desreb """ build_paths = [] server_group = "" context = None pool = None hostfile = "" def setUp(self): """ setup for the test """ self.agent_sessions = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") # generate a hostfile self.hostlist = self.params.get("test_machines", '/run/hosts/') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) # fire up the DAOS servers self.server_group = self.params.get("name", '/run/server_config/', 'daos_server') self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, build_paths['PREFIX'] + '/../') # create a pool to test with createmode = self.params.get("mode", '/run/pool/createmode/') createuid = self.params.get("uid", '/run/pool/createuid/') creategid = self.params.get("gid", '/run/pool/creategid/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid) self.pool.get_uuid_str() time.sleep(2) def tearDown(self): """ cleanup after the test """ try: os.remove(self.hostfile) if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_destroy_while_rebuilding(self): """ :avocado: tags=pool,pooldestroy,rebuild,desreb """ try: print("\nsetup complete, starting test\n") # create a server object that references on of our pool target hosts # and then kill it svr_to_kill = int(self.params.get("rank_to_kill", '/run/testparams/ranks/')) server = DaosServer(self.context, bytes(self.server_group), svr_to_kill) print("created server ") # BUG if you don't connect the rebuild doesn't start correctly self.pool.connect(1 << 1) status = self.pool.pool_query() if not status.pi_ntargets == len(self.hostlist): self.fail("target count wrong.\n") if not status.pi_ndisabled == 0: self.fail("disabled target count wrong.\n") print("connect ") time.sleep(1) server.kill(1) print("killed server ") # exclude the target from the dead server self.pool.exclude([svr_to_kill]) print("exclude target ") #self.pool.disconnect() #print "disconnect " # the rebuild won't take long since there is no data so do # the destroy quickly self.pool.destroy(1) print("destroy ") except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
class ObjectMetadata(TestWithServers): """Test class for metadata testing. Test Class Description: Test the general Metadata operations and boundary conditions. :avocado: recursive """ def __init__(self, *args, **kwargs): """Initialize a ObjectMetadata object.""" super(ObjectMetadata, self).__init__(*args, **kwargs) self.out_queue = None def setUp(self): """Set up each test case.""" # Start the servers and agents super(ObjectMetadata, self).setUp() # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir, None) # Create a pool self.pool = DaosPool(self.context) self.pool.create( self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("scm_size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None, None, self.params.get("svcn", '/run/pool/createsvc/*'), self.params.get("nvme_size", '/run/pool/createsize/*')) def tearDown(self): """Tear down each test case.""" try: if self.pool is not None: self.pool.destroy(1) finally: super(ObjectMetadata, self).tearDown() def thread_control(self, threads, operation): """Start threads and wait until all threads are finished. Args: threads (list): list of threads to execute operation (str): IOR operation, e.g. "read" or "write" Returns: str: "PASS" if all threads completed successfully; "FAIL" otherwise """ self.d_log.debug("IOR {0} Threads Started -----".format(operation)) for thrd in threads: thrd.start() for thrd in threads: thrd.join() while not self.out_queue.empty(): if self.out_queue.get() == "FAIL": return "FAIL" self.d_log.debug("IOR {0} Threads Finished -----".format(operation)) return "PASS" @skipForTicket("DAOS-1936/DAOS-1946") def test_metadata_fillup(self): """JIRA ID: DAOS-1512. Test Description: Test to verify no IO happens after metadata is full. Use Cases: ? :avocado: tags=metadata,metadata_fill,nvme,small """ self.pool.connect(2) container = DaosContainer(self.context) self.d_log.debug("Fillup Metadata....") for _cont in range(NO_OF_MAX_CONTAINER): container.create(self.pool.handle) # This should fail with no Metadata space Error. self.d_log.debug("Metadata Overload...") try: for _cont in range(250): container.create(self.pool.handle) self.fail("Test expected to fail with a no metadata space error") except DaosApiError as exe: print(exe, traceback.format_exc()) return self.fail("Test was expected to fail but it passed.\n") @skipForTicket("DAOS-1965") @avocado.fail_on(DaosApiError) def test_metadata_addremove(self): """JIRA ID: DAOS-1512. Test Description: Verify metadata release the space after container delete. Use Cases: ? :avocado: tags=metadata,metadata_free_space,nvme,small """ self.pool.connect(2) for k in range(10): container_array = [] self.d_log.debug("Container Create Iteration {}".format(k)) for cont in range(NO_OF_MAX_CONTAINER): container = DaosContainer(self.context) container.create(self.pool.handle) container_array.append(container) self.d_log.debug("Container Remove Iteration {} ".format(k)) for cont in container_array: cont.destroy() @avocado.fail_on(DaosApiError) def test_metadata_server_restart(self): """JIRA ID: DAOS-1512. Test Description: This test will verify 2000 IOR small size container after server restart. Test will write IOR in 5 different threads for faster execution time. Each thread will create 400 (8bytes) containers to the same pool. Restart the servers, read IOR container file written previously and validate data integrity by using IOR option "-R -G 1". Use Cases: ? :avocado: tags=metadata,metadata_ior,nvme,small """ files_per_thread = 400 total_ior_threads = 5 self.out_queue = Queue.Queue() processes = self.params.get("slots", "/run/ior/clientslots/*") list_of_uuid_lists = [[ str(uuid.uuid4()) for _ in range(files_per_thread) ] for _ in range(total_ior_threads)] # Launch threads to run IOR to write data, restart the agents and # servers, and then run IOR to read the data for operation in ("write", "read"): # Create the IOR threads threads = [] for index in range(total_ior_threads): # Define the arguments for the ior_runner_thread method ior_cmd = IorCommand() ior_cmd.get_params(self) ior_cmd.set_daos_params(self.server_group, self.pool) ior_cmd.flags.value = self.params.get( "F", "/run/ior/ior{}flags/".format(operation)) # Add a thread for these IOR arguments threads.append( threading.Thread(target=ior_runner_thread, kwargs={ "ior_cmd": ior_cmd, "uuids": list_of_uuid_lists[index], "mgr": self.orterun, "attach": self.tmp, "hostfile": self.hostfile_clients, "procs": processes, "results": self.out_queue })) self.log.info("Creatied %s thread %s with container uuids %s", operation, index, list_of_uuid_lists[index]) # Launch the IOR threads if self.thread_control(threads, operation) == "FAIL": self.d_log.error("IOR {} Thread FAIL".format(operation)) self.fail("IOR {} Thread FAIL".format(operation)) # Restart the agents and servers after the write / before the read if operation == "write": # Stop the agents and servers if self.agent_sessions: stop_agent(self.agent_sessions, self.hostlist_clients) stop_server(hosts=self.hostlist_servers) # Start the agents self.agent_sessions = run_agent(self.basepath, self.hostlist_clients, self.hostlist_servers) # Start the servers run_server(self.hostfile_servers, self.server_group, self.basepath, clean=False)
class FullPoolContainerCreate(Test): """ Class for test to create a container in a pool with no remaining free space. """ def setUp(self): self.agent_sessions = None # get paths from the build_vars generated by build with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../.build_vars.json")) as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_default_oops') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.cont = None self.cont2 = None self.pool = DaosPool(self.context) self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines1", '/hosts/') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): # shut 'er down """ wrap pool destroy in a try; in case pool create didn't succeed, we still need the server to be shut down in any case """ try: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def test_no_space_cont_create(self): """ :avocado: tags=pool,cont,fullpoolcontcreate,small,vm """ # full storage rc err = "-1007" # probably should be -1007, revisit later err2 = "-1009" # create pool mode = self.params.get("mode", '/conttests/createmode/') self.d_log.debug("mode is {0}".format(mode)) uid = os.geteuid() gid = os.getegid() # 16 mb pool, minimum size currently possible size = 16777216 self.d_log.debug("creating pool") self.pool.create(mode, uid, gid, size, self.server_group, None) self.d_log.debug("created pool") # connect to the pool self.d_log.debug("connecting to pool") self.pool.connect(1 << 1) self.d_log.debug("connected to pool") # query the pool self.d_log.debug("querying pool info") dummy_pool_info = self.pool.pool_query() self.d_log.debug("queried pool info") # create a container try: self.d_log.debug("creating container") self.cont = DaosContainer(self.context) self.cont.create(self.pool.handle) self.d_log.debug("created container") except DaosApiError as excep: self.d_log.error("caught exception creating container: " "{0}".format(excep)) self.fail("caught exception creating container: {0}".format(excep)) self.d_log.debug("opening container") self.cont.open() self.d_log.debug("opened container") # generate random dkey, akey each time # write 1mb until no space, then 1kb, etc. to fill pool quickly for obj_sz in [1048576, 1024, 1]: write_count = 0 while True: self.d_log.debug("writing obj {0}, sz {1} to " "container".format(write_count, obj_sz)) my_str = "a" * obj_sz my_str_sz = obj_sz dkey = ( ''.join(random.choice(string.lowercase) for i in range(5))) akey = ( ''.join(random.choice(string.lowercase) for i in range(5))) try: dummy_oid, dummy_tx = self.cont.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls=1) self.d_log.debug("wrote obj {0}, sz {1}".format(write_count, obj_sz)) write_count += 1 except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught exception while writing " "object: {0}".format(repr(excep))) self.fail("caught exception while writing object: {0}" .format(repr(excep))) else: self.d_log.debug("pool is too full for {0} byte " "objects".format(obj_sz)) break self.d_log.debug("closing container") self.cont.close() self.d_log.debug("closed container") # create a 2nd container now that pool is full try: self.d_log.debug("creating 2nd container") self.cont2 = DaosContainer(self.context) self.cont2.create(self.pool.handle) self.d_log.debug("created 2nd container") self.d_log.debug("opening container 2") self.cont2.open() self.d_log.debug("opened container 2") self.d_log.debug("writing one more object, write expected to fail") self.cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls=1) self.d_log.debug("wrote one more object--this should never print") except DaosApiError as excep: if not (err in repr(excep) or err2 in repr(excep)): self.d_log.error("caught unexpected exception while " "writing object: {0}".format(repr(excep))) self.fail("caught unexpected exception while writing " "object: {0}".format(repr(excep))) else: self.d_log.debug("correctly caught -1007 while attempting " "to write object in full pool")
def test_container_basics(self): """ Test basic container create/destroy/open/close/query. Nothing fancy just making sure they work at a rudimentary level :avocado: tags=container,containercreate,containerdestroy,basecont """ pool = None hostlist = None try: hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(hostlist, self.workdir) self.agent_sessions = agent_utils.run_agent(self.basepath, hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # give it time to start time.sleep(2) # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createuid = self.params.get("uid", '/run/conttests/createuid/') creategid = self.params.get("gid", '/run/conttests/creategid/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info'n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely, error: %s" % excep) finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1) if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=hostlist)
class CreateManyDkeys(Test): """ Test Class Description: Tests that create large numbers of keys in objects/containers and then destroy the containers and verify the space has been reclaimed. """ def setUp(self): self.agent_sessions = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.container = None self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) def tearDown(self): try: if self.hostfile is not None: os.remove(self.hostfile) if self.pool: self.pool.destroy(1) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) def write_a_bunch_of_values(self, how_many): """ Write data to an object, each with a dkey and akey. The how_many parameter determines how many key:value pairs are written. """ self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() ioreq = IORequest(self.context, self.container, None) epoch = self.container.get_new_epoch() c_epoch = ctypes.c_uint64(epoch) print("Started Writing the Dataset-----------\n") inc = 50000 last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) c_value = ctypes.create_string_buffer( "some data that gets stored with the key {0}".format(key)) c_size = ctypes.c_size_t(ctypes.sizeof(c_value)) ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch) if key > last_key: print("written: {}".format(key)) sys.stdout.flush() last_key = key + inc self.container.commit_epoch(c_epoch) print("Started Verification of the Dataset-----------\n") last_key = inc for key in range(how_many): c_dkey = ctypes.create_string_buffer("dkey {0}".format(key)) c_akey = ctypes.create_string_buffer("akey {0}".format(key)) the_data = "some data that gets stored with the key {0}".format(key) val = ioreq.single_fetch(c_dkey, c_akey, len(the_data)+1, c_epoch) if the_data != (repr(val.value)[1:-1]): self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, " "Expected Value={2} and Received Value={3}\n" .format("dkey {0}".format(key), "akey {0}".format(key), the_data, repr(val.value)[1:-1])) if key > last_key: print("veried: {}".format(key)) sys.stdout.flush() last_key = key + inc print("starting destroy") self.container.close() self.container.destroy() print("destroy complete") @avocado.fail_on(DaosApiError) @avocado.skip("Skipping until DAOS-1721 is fixed.") def test_many_dkeys(self): """ Test ID: DAOS-1701 Test Description: Test many of dkeys in same object. Use Cases: 1. large key counts 2. space reclaimation after destroy :avocado: tags=object,vm,many_dkeys """ no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/') # write a lot of individual data items, verify them, then destroy self.write_a_bunch_of_values(no_of_dkeys) # do it again, which should verify the first container # was truely destroyed because a second round won't fit otherwise self.write_a_bunch_of_values(no_of_dkeys)
class ContainerAttributeTest(TestWithServers): """ Tests DAOS container attribute get/set/list. :avocado: recursive """ def setUp(self): super(ContainerAttributeTest, self).setUp() self.large_data_set = {} self.pool = DaosPool(self.context) self.pool.create( self.params.get("mode", '/run/attrtests/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/attrtests/createsize/*'), self.params.get("setname", '/run/attrtests/createset/*'), None) self.pool.connect(1 << 1) poh = self.pool.handle self.container = DaosContainer(self.context) self.container.create(poh) self.container.open() def tearDown(self): try: if self.container: self.container.close() finally: super(ContainerAttributeTest, self).tearDown() def create_data_set(self): """ To create the large attribute dictionary """ allchar = string.ascii_letters + string.digits for i in range(1024): self.large_data_set[str(i)] = ("".join( random.choice(allchar) for x in range(random.randint(1, 100)))) def test_container_attribute(self): """ Test basic container attribute tests. :avocado: tags=container,container_attr,attribute,sync_conattribute """ expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) attr_dict = {name[0]: value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set attr_dict[name[0]] = value[0] expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.container.set_attr(data=attr_dict) size, buf = self.container.list_attr() verify_list_attr(attr_dict, size, buf) # Request something that doesn't exist if name[0] is not None and "Negative" in name[0]: name[0] = "rubbish" results = {} results = self.container.get_attr([name[0]]) # for this test the dictionary has been altered, need to just # set it to what we are expecting to get back if name[0] is not None: if "largenumberofattr" in name[0]: attr_dict.clear() attr_dict[name[0]] = value[0] verify_get_attr(attr_dict, results) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except (DaosApiError, DaosTestError) as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") def test_container_attribute_asyn(self): """ Test basic container attribute tests. :avocado: tags=container,container_attr,attribute,async_conattribute """ global GLOB_SIGNAL global GLOB_RC expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) attr_dict = {name[0]: value[0]} if name[0] is not None: if "largenumberofattr" in name[0]: self.create_data_set() attr_dict = self.large_data_set attr_dict[name[0]] = value[0] expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: GLOB_SIGNAL = threading.Event() self.container.set_attr(data=attr_dict, cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail("RC not as expected after set_attr First {0}".format( GLOB_RC)) GLOB_SIGNAL = threading.Event() size, buf = self.container.list_attr(cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail( "RC not as expected after list_attr First {0}".format( GLOB_RC)) if expected_result in ['PASS']: verify_list_attr(attr_dict, size, buf, mode="async") # Request something that doesn't exist if name[0] is not None and "Negative" in name[0]: name[0] = "rubbish" GLOB_SIGNAL = threading.Event() self.container.get_attr([name[0]], cb_func=cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0 and expected_result in ['PASS']: self.fail( "RC not as expected after get_attr {0}".format(GLOB_RC)) # not verifying the get_attr since its not available asynchronously if value[0] != None: if GLOB_RC == 0 and expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_rebuild_with_io(self): """ Test ID: Rebuild-003 Test Description: Trigger a rebuild while I/O is ongoing. Use Cases: -- single pool, single client performing continous read/write/verify sequence while failure/rebuild is triggered in another process :avocado: tags=pool,rebuild,rebuildwithio """ # the rebuild tests need to redo this stuff each time so not in setup # as it usually would be server_group = self.params.get("name", '/server_config/', 'daos_server') basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../") self.hostlist = self.params.get("test_machines", '/run/hosts/') hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) try: self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist) server_utils.run_server(hostfile, server_group, basepath) # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) container = DaosContainer(self.context) container.create(pool.handle) container.open() # get pool status and make sure it all looks good before we start pool.pool_query() if pool.pool_info.pi_ndisabled != 0: self.fail("Number of disabled targets reporting incorrectly.\n") if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error but rebuild hasn't run.\n") if pool.pool_info.pi_rebuild_st.rs_done != 1: self.fail("Rebuild is running but device hasn't failed yet.\n") if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0: self.fail("Rebuilt objs not zero.\n") if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0: self.fail("Rebuilt recs not zero.\n") dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version # do I/O for 30 seconds dummy_bw = io_utilities.continuous_io(container, 30) # trigger the rebuild rank = self.params.get("rank", '/run/testparams/ranks/*') server = DaosServer(self.context, server_group, rank) server.kill(1) pool.exclude([rank]) # do another 30 seconds of I/O, # waiting for some improvements in server bootstrap # at which point we can move the I/O to a separate client and # really pound it with I/O dummy_bw = io_utilities.continuous_io(container, 30) # wait for the rebuild to finish while True: pool.pool_query() if pool.pool_info.pi_rebuild_st.rs_done == 1: break else: time.sleep(2) # check rebuild statistics if pool.pool_info.pi_ndisabled != 1: self.fail("Number of disabled targets reporting incorrectly: {}" .format(pool.pool_info.pi_ndisabled)) if pool.pool_info.pi_rebuild_st.rs_errno != 0: self.fail("Rebuild error reported: {}".format( pool.pool_info.pi_rebuild_st.rs_errno)) if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0: self.fail("No objects have been rebuilt.") if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0: self.fail("No records have been rebuilt.") except (ValueError, DaosApiError) as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n") finally: # wait for the I/O process to finish try: server_utils.stop_server(hosts=self.hostlist) os.remove(hostfile) # really make sure everything is gone check_for_pool.cleanup_pools(self.hostlist) finally: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.kill_server(self.hostlist)
class OpenContainerTest(Test): """ Tests DAOS container bad create (non existing pool handle, bad uuid) and close. :avocado: tags=container,containeropen """ def setUp(self): # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group",'/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.POOL1 = None self.POOL2 = None self.CONTAINER1 = None self.CONTAINER2 = None self.hostfile = None self.hostlist = self.params.get("test_machines",'/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) # common parameters used in pool create self.createmode = self.params.get("mode",'/run/createtests/createmode/') self.createsetid = self.params.get("setname",'/run/createtests/createset/') self.createsize = self.params.get("size",'/run/createtests/createsize/') # POOL 1 UID GID self.createuid1 = self.params.get("uid",'/run/createtests/createuid1/') self.creategid1 = self.params.get("gid",'/run/createtests/creategid1/') # POOL 2 UID GID self.createuid2 = self.params.get("uid",'/run/createtests/createuid2/') self.creategid2 = self.params.get("gid",'/run/createtests/creategid2/') ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.CONTAINER1 is not None: self.CONTAINER1.destroy() if self.CONTAINER2 is not None: self.CONTAINER2.destroy() if self.POOL1 is not None and self.POOL1.attached: self.POOL1.destroy(1) if self.POOL2 is not None and self.POOL2.attached: self.POOL2.destroy(1) finally: ServerUtils.stopServer(hosts=self.hostlist) def test_container_open(self): """ Test basic container bad create. :avocado: tags=container,containeropen """ expected_for_param = [] uuidlist = self.params.get("uuid",'/run/createtests/uuids/*/') containerUUID = uuidlist[0] expected_for_param.append(uuidlist[1]) pohlist = self.params.get("poh",'/run/createtests/handles/*/') poh = pohlist[0] expected_for_param.append(pohlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: # create two pools and try to create containers in these pools self.POOL1 = DaosPool(self.Context) self.POOL1.create(self.createmode, self.createuid1, self.creategid1, self.createsize, self.createsetid, None) self.POOL2 = DaosPool(self.Context) self.POOL2.create(self.createmode, self.createuid2, self.creategid2, self.createsize, None, None) # Connect to the pools self.POOL1.connect(1 << 1) self.POOL2.connect(1 << 1) # defines pool handle for container open if pohlist[0] == 'POOL1': poh = self.POOL1.handle else: poh = self.POOL2.handle # Create a container in POOL1 self.CONTAINER1 = DaosContainer(self.Context) self.CONTAINER1.create(self.POOL1.handle) # defines test UUID for container open if uuidlist[0] == 'POOL1': struuid = self.CONTAINER1.get_uuid_str() containerUUID = uuid.UUID(struuid) else: if uuidlist[0] == 'MFUUID': containerUUID = "misformed-uuid-0000" else: containerUUID = uuid.uuid4() # random uuid # tries to open the container1 # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid self.CONTAINER1.open(poh, containerUUID) # wait a few seconds and then destroy containers time.sleep(5) self.CONTAINER1.close() self.CONTAINER1.destroy() self.CONTAINER1 = None # cleanup the pools self.POOL1.disconnect() self.POOL1.destroy(1) self.POOL1 = None self.POOL2.disconnect() self.POOL2.destroy(1) self.POOL2 = None if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: print(e) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if self.hostfile is not None: os.remove(self.hostfile)
class OpenClose(Test): """ Tests DAOS container open/close function with handle parameter. """ def __init__(self, *args, **kwargs): super(OpenClose, self).__init__(*args, **kwargs) self.container1 = None self.container2 = None def setUp(self): # these are first since they are referenced in teardown self.pool = None self.hostlist = None self.hostlist = self.params.get("test_servers", '/run/hosts/') # get paths from the build_vars generated by build with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../../.build_vars.json')) as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostfile = write_host_file.write_host_file(self.hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self.hostfile, self.server_group, self.basepath) def tearDown(self): try: if self.pool is not None and self.pool.attached: self.pool.destroy(1) finally: try: if self.agent_sessions: AgentUtils.stop_agent(self.hostlist, self.agent_sessions) server_utils.stop_server(hosts=self.hostlist) except server_utils.ServerFailed: pass def test_closehandle(self): """ Test container close function with container handle paramter. :avocado: tags=container,openclose,closehandle """ saved_coh = None # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') coh_params = self.params.get("coh", '/run/container/container_handle/*/') expected_result = coh_params[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.pool.handle self.pool.connect(1 << 1) # Container initialization and creation self.container1 = DaosContainer(self.context) self.container1.create(poh) str_cuuid = self.container1.get_uuid_str() cuuid = uuid.UUID(str_cuuid) self.container1.open(poh, cuuid, 2, None) # Defining 'good' and 'bad' container handles saved_coh = self.container1.coh if coh_params[0] == 'GOOD': coh = self.container1.coh else: # create a second container, open to get a handle # then close & destroy so handle is invalid self.container2 = DaosContainer(self.context) self.container2.create(poh) self.container2.open(poh, cuuid, 2, None) coh = self.container2.coh self.container2.close() self.container2.destroy() # close container with either good or bad handle self.container1.close(coh) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: if expected_result == 'PASS': print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") # close above failed so close for real with the right coh if saved_coh is not None: self.container1.close(saved_coh) finally: self.container1.destroy(1) self.pool.disconnect() self.pool.destroy(1) self.pool = None
def test_container_basics(self): """ Test basic container create/destroy/open/close/query. Nothing fancy just making sure they work at a rudimentary level :avocado: tags=container,containercreate,containerdestroy,basecont """ pool = None hostlist = None try: hostlist = self.params.get("test_machines", '/run/hosts/*') hostfile = write_host_file.write_host_file(hostlist, self.workdir) self.agent_sessions = AgentUtils.run_agent(self.basepath, hostlist) server_utils.run_server(hostfile, self.server_group, self.basepath) # give it time to start time.sleep(2) # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createuid = self.params.get("uid", '/run/conttests/createuid/') creategid = self.params.get("gid", '/run/conttests/creategid/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) # now open it container.open() # do a query and compare the UUID returned from create with # that returned by query container.query() if container.get_uuid_str() != c_uuid_to_str( container.info.ci_uuid): self.fail("Container UUID did not match the one in info'n") container.close() # wait a few seconds and then destroy time.sleep(5) container.destroy() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely, error: %s" % excep) finally: # cleanup the pool if pool is not None: pool.disconnect() pool.destroy(1) if self.agent_sessions: AgentUtils.stop_agent(hostlist, self.agent_sessions) server_utils.stop_server(hosts=hostlist)
def test_null_values(self): """ Test ID: DAOS-1376 Test Description: Pass a dkey and an akey that is null. :avocado: tags=object,objupdate,objupdatenull,regression,vm,small """ try: # parameters used in pool create createmode = self.params.get("mode", '/run/conttests/createmode/') createsetid = self.params.get("setname", '/run/conttests/createset/') createsize = self.params.get("size", '/run/conttests/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.plog.info("Pool %s created.", pool.get_uuid_str()) # need a connection to create container pool.connect(1 << 1) # create a container container = DaosContainer(self.context) container.create(pool.handle) self.plog.info("Container %s created.", container.get_uuid_str()) # now open it container.open() # data used in the test thedata = "a string that I want to stuff into an object" thedatasize = len(thedata) + 1 except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during setup .\n") try: # try using a null dkey dkey = None akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # try using a null akey/io descriptor dkey = "this is the dkey" akey = None container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.fail("Test was expected to return a -1003 but it has not.\n") except DaosApiError as excep: if '-1003' not in str(excep): self.plog.error("Didn't get expected return code.") print(excep) print(traceback.format_exc()) self.fail("Test was expected to get -1003 but it has not.\n") try: # lastly try passing no data thedata = None thedatasize = 0 dkey = "this is the dkey" akey = "this is the akey" container.write_an_obj(thedata, thedatasize, dkey, akey, None, None, 2) self.plog.info("Update with no data worked") except DaosApiError as excep: container.close() container.destroy() pool.disconnect() pool.destroy(1) print(excep) print(traceback.format_exc()) self.plog.error("Update with no data failed") self.fail("Update with no data failed.\n") container.close() container.destroy() pool.disconnect() pool.destroy(1) self.plog.info("Test Complete")