def create(self): """Create a pool. Destroys an existing pool if defined and assigns self.pool and self.uuid. """ self.destroy() self.log.info("Creating a pool{}".format(" on targets {}".format( self.target_list.value) if self.target_list.value else "")) self.pool = DaosPool(self.context) kwargs = { "mode": self.mode.value, "uid": self.uid, "gid": self.gid, "scm_size": self.scm_size.value, "group": self.name.value } for key in ("target_list", "svcn", "nvme_size"): value = getattr(self, key).value if value: kwargs[key] = value self._call_method(self.pool.create, kwargs) self.uuid = self.pool.get_uuid_str() self.svc_ranks = [ int(self.pool.svc.rl_ranks[index]) for index in range(self.pool.svc.rl_nr) ] self.log.info(" Pool created with uuid {} and svc ranks {}".format( self.uuid, self.svc_ranks))
def setUp(self): try: super(PunchTest, self).setUp() # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as excpn: print(excpn) print(traceback.format_exc()) self.fail("Test failed during setup.\n")
def get_pool(context, mode, size, name, svcn=1, log=None, connect=True): """Return a DAOS pool that has been created an connected. Args: context (DaosContext): the context to use to create the pool mode (int): the pool mode size (int): the size of the pool name (str): the name of the pool svcn (int): the pool service leader quantity log (DaosLog, optional): object for logging messages. Defaults to None. connect (bool, optional): connect to the new pool. Defaults to True. Returns: DaosPool: an object representing a DAOS pool """ if log: log.info("Creating a pool") pool = DaosPool(context) pool.create(mode, os.geteuid(), os.getegid(), size, name, svcn=svcn) if connect: if log: log.info("Connecting to the pool") pool.connect(1 << 1) return pool
def check_handle(buf_len, iov_len, buf, uuidstr, rank): """ This gets run in a child process and verifyes the global handle can be turned into a local handle in another process. """ try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) # setup the DAOS python API in this process context = DaosContext(build_paths['PREFIX'] + '/lib/') pool = DaosPool(context) pool.set_uuid_str(uuidstr) pool.set_svc(rank) pool.group = "daos_server" # note that the handle is stored inside the pool as well dummy_local_handle = pool.global2local(context, iov_len, buf_len, buf) # perform some operations that will use the new handle pool.pool_query() container = DaosContainer(context) container.create(pool.handle) except DaosApiError as excep: print(excep) print(traceback.format_exc()) raise return
def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=pool,svc """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svc", '/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.pool.connect(1 << 1) # checking returned rank list for server more than 1 iterator = 0 while (int(self.pool.svc.rl_ranks[iterator]) > 0 and int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0] and int(self.pool.svc.rl_ranks[iterator]) != 999999): iterator += 1 if iterator != createsvc[0]: self.fail("Length of Returned Rank list is not equal to " "the number of Pool Service members.\n") rank_list = [] for iterator in range(createsvc[0]): rank_list.append(int(self.pool.svc.rl_ranks[iterator])) if len(rank_list) != len(set(rank_list)): self.fail("Duplicate values in returned rank list") if createsvc[0] == 3: self.pool.disconnect() cmd = ('{0} kill-leader --uuid={1}'.format( self.daosctl, self.pool.get_uuid_str())) process.system(cmd) self.pool.connect(1 << 1) self.pool.disconnect() server = DaosServer(self.context, self.server_group, 2) server.kill(1) self.pool.exclude([2]) self.pool.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def setUp(self): with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") tmp = build_paths['PREFIX'] + '/tmp' server_group = self.params.get("server_group", '/server/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) ServerUtils.runServer(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1) poh = self.pool.handle self.container = DaosContainer(self.context) self.container.create(poh) self.container.open()
def setUp(self): """ setup for the test """ self.agent_sessions = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") # generate a hostfile self.hostlist_servers = self.params.get("test_machines", '/run/hosts/') self.hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.workdir) # fire up the DAOS servers self.server_group = self.params.get("name", '/run/server_config/', 'daos_server') self.agent_sessions = agent_utils.run_agent(self.basepath, self.hostlist_servers) server_utils.run_server(self.hostfile_servers, self.server_group, build_paths['PREFIX'] + '/../') # create a pool to test with createmode = self.params.get("mode", '/run/pool/createmode/') createuid = self.params.get("uid", '/run/pool/createuid/') creategid = self.params.get("gid", '/run/pool/creategid/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid) self.pool.get_uuid_str() time.sleep(2)
def setUp(self): self.pool = None self.hostlist = None self.hostfile_clients = None self.hostfile = None self.out_queue = None self.pool_connect = True with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("server_group", '/server/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("servers", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir) hostlist_clients = self.params.get("clients", '/run/hosts/*') self.hostfile_clients = WriteHostFile.WriteHostFile( hostlist_clients, self.workdir) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), nvme_size=self.params.get("size", '/run/pool/nvmesize/*'))
def setUp(self): super(LlnlMpi4pyHdf5, self).setUp() # initialising variables self.mpio = None self.hostfile_clients = None # setting client variables self.hostfile_clients = write_host_file.write_host_file( self.hostlist_clients, self.workdir, None) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.createsvc = self.params.get("svcn", '/run/pool/createsvc/') # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, self.createsvc) except (DaosApiError) as excep: self.fail("<Test Failed at pool create> \n{1}".format(excep))
def test_createasync(self): """ Test container create for asynchronous mode. :avocado: tags=container,containerasync,createasync """ global GLOB_SIGNAL global GLOB_RC # parameters used in pool create createmode = self.params.get("mode",'/run/createtests/createmode/*/') createsetid = self.params.get("setname",'/run/createtests/createset/') createsize = self.params.get("size",'/run/createtests/createsize/') createuid = os.geteuid() creategid = os.getegid() try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) poh = self.POOL.handle self.POOL.connect(1 << 1) # Container initialization and creation self.Container1 = DaosContainer(self.Context) self.Container2 = DaosContainer(self.Context) GLOB_SIGNAL = threading.Event() self.Container1.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != 0: self.fail("RC not as expected in async test") print ("RC after successful Container create: " , GLOB_RC) # Try to recreate container after destroying pool, # this should fail. Checking rc after failure. self.POOL.destroy(1) GLOB_SIGNAL = threading.Event() GLOB_RC = -9900000 self.Container2.create(poh, None, cb_func) GLOB_SIGNAL.wait() if GLOB_RC != -1005: self.fail("RC not as expected in async test") print ("RC after Container create failed:", GLOB_RC) # cleanup the Pool and Container self.POOL = None except ValueError as e: print e print traceback.format_exc()
def setUp(self): """ setup for the test """ # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/') # generate a hostfile self.hostlist = self.params.get("test_machines",'/run/hosts/') tmp = build_paths['PREFIX'] + '/tmp' self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp) # fire up the DAOS servers self.server_group = self.params.get("server_group",'/run/server/', 'daos_server') ServerUtils.runServer(self.hostfile, self.server_group, build_paths['PREFIX'] + '/../') time.sleep(3) # create a pool to test with createmode = self.params.get("mode",'/run/pool/createmode/') createuid = self.params.get("uid",'/run/pool/createuid/') creategid = self.params.get("gid",'/run/pool/creategid/') createsetid = self.params.get("setname",'/run/pool/createset/') createsize = self.params.get("size",'/run/pool/createsize/') self.POOL = DaosPool(self.CONTEXT) self.POOL.create(createmode, createuid, creategid, createsize, createsetid) uuid = self.POOL.get_uuid_str() time.sleep(2)
def test_filemodification(self): """ Test whether file modification happens as expected under different permission levels. :avocado: tags=pool,permission,filemodification """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = self.params.get("uid", '/run/createtests/createuid/') creategid = self.params.get("gid", '/run/createtests/creategid/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') if createmode == 73: expected_result = 'FAIL' elif createmode in [146, 511]: permissions = 1 expected_result = 'PASS' elif createmode == 292: permissions = 2 expected_result = 'PASS' try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.d_log.debug("Pool initialisation successful") self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.d_log.debug("Pool Creation successful") self.pool.connect(1 << permissions) self.d_log.debug("Pool Connect successful") self.container = DaosContainer(self.Context) self.d_log.debug("Contianer initialisation successful") self.container.create(self.pool.handle) self.d_log.debug("Container create successful") # now open it self.container.open() self.d_log.debug("Container open successful") thedata = "a string that I want to stuff into an object" size = 45 dkey = "this is the dkey" akey = "this is the akey" self.container.write_an_obj(thedata, size, dkey, akey) self.d_log.debug("Container write successful") if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as e: print(e) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_destroy_connect(self): """ Test destroying a pool that has a connected client with force == false. Should fail. :avocado: tags=pool,pooldestroy,x """ host = self.hostlist_servers[0] try: # write out a hostfile_servers and start the servers with it self.hostlist_servers = self.params.get("test_machines1", '/run/hosts/') hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.tmp) self.agent_sessions = agent_utils.run_agent( self.basepath, self.hostlist_servers) server_utils.run_server(hostfile_servers, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container pool.connect(1 << 1) # destroy pool with connection open pool.destroy(0) # should throw an exception and not hit this self.fail("Shouldn't hit this line.\n") except DaosApiError as excep: print("got exception which is expected so long as it is BUSY") print(excep) print(traceback.format_exc()) # pool should still be there exists = check_for_pool.check_for_pool(host, pool.get_uuid_str) if exists != 0: self.fail("Pool gone, but destroy should have failed.\n") # no matter what happens cleanup finally: if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=self.hostlist_servers) os.remove(hostfile_servers)
def test_destroy_recreate(self): """ Test destroy and recreate one right after the other multiple times Should fail. :avocado: tags=pool,pooldestroy,destroyredo """ try: # write out a hostfile and start the servers with it self.hostlist = self.params.get("test_machines1", '/run/hosts/') hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp) ServerUtils.runServer(hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage POOL = DaosPool(self.Context) POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # blow it away immediately POOL.destroy(1) # now recreate POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # blow it away immediately POOL.destroy(1) # now recreate POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # blow it away immediately POOL.destroy(1) except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("create/destroy/create/destroy test failed.\n") except Exception as e: self.fail("Daos code segfaulted most likely. Error: %s" % e) # no matter what happens cleanup finally: ServerUtils.stopServer(hosts=self.hostlist) os.remove(hostfile)
def setUp(self): super(CreateManyDkeys, self).setUp() self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(1 << 1)
def setUp(self): # initialising variables self.basepath = None self.server_group = None self.context = None self.pool = None self.mpio = None self.agent_sessions = None self.hostlist_servers = None self.hostfile_servers = None self.hostlist_clients = None self.hostfile_clients = None self.createsvc = None # get paths from the build_vars generated by build with open('../../../.build_vars.json') as var_file: build_paths = json.load(var_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist_servers = self.params.get("test_servers", '/run/hosts/') self.hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.workdir) print("Host file servers is: {}".format(self.hostfile_servers)) self.hostlist_clients = self.params.get("test_clients", '/run/hosts/') self.hostfile_clients = write_host_file.write_host_file( self.hostlist_clients, self.workdir, None) print("Host file clients is: {}".format(self.hostfile_clients)) self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist_servers, self.hostlist_clients) # start servers server_utils.run_server(self.hostfile_servers, self.server_group, self.basepath) try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') self.createsvc = self.params.get("svcn", '/run/pool/createsvc/') # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, self.createsvc) except (DaosApiError) as excep: self.fail("<Test Failed at pool create> \n{1}".format(excep))
def test_destroy_connect(self): """ Test destroying a pool that has a connected client with force == false. Should fail. :avocado: tags=pool,pooldestroy,x """ try: # write out a hostfile and start the servers with it hostlist = self.params.get("test_machines1", '/run/hosts/') hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp) ServerUtils.runServer(hostfile, self.server_group, self.basepath) # give it time to reach steady state time.sleep(1) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage POOL = DaosPool(self.Context) POOL.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container POOL.connect(1 << 1) # destroy pool with connection open POOL.destroy(0) # should throw an exception and not hit this self.fail("Shouldn't hit this line.\n") except ValueError as e: print("got exception which is expected so long as it is BUSY") print(e) print(traceback.format_exc()) # pool should still be there exists = CheckForPool.checkForPool(host, uuid_str) if exists != 0: self.fail("Pool gone, but destroy should have failed.\n") # no matter what happens cleanup finally: ServerUtils.stopServer() os.remove(hostfile)
def setUp(self): super(ObjOpenBadParam, self).setUp() try: # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) # need a connection to create container self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.context) self.container.create(self.pool.handle) # now open it self.container.open() # create an object and write some data into it thedata = "a string that I want to stuff into an object" self.datasize = len(thedata) + 1 self.dkey = "this is the dkey" self.akey = "this is the akey" self.obj, self.epoch = self.container.write_an_obj(thedata, self.datasize, self.dkey, self.akey, obj_cls=1) thedata2 = self.container.read_an_obj(self.datasize, self.dkey, self.akey, self.obj, self.epoch) if thedata not in thedata2.value: print(thedata) print(thedata2.value) err_str = "Error reading back data, test failed during the " \ "initial setup." self.d_log.error(err_str) self.fail(err_str) # setup leaves object in open state, so closing to start clean self.obj.close() except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test failed during the initial setup.")
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank): """ This gets run in a child process and verifyes the global handles can be turned into local handles in another process. """ try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as build_file: build_paths = json.load(build_file) # setup the DAOS python API in this process context = DaosContext(build_paths['PREFIX'] + '/lib/') # setup the pool and connect using global handle pool = DaosPool(context) pool.uuid = uuidstr pool.set_svc(rank) pool.group = "daos_server" buf = ctypes.cast(pool_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) pool_handle = pool.global2local(context, pool_glob_handle.iov_len, pool_glob_handle.iov_buf_len, buf2) # perform an operation that will use the new handle, if it # doesn't throw an exception, then all is well. pool.pool_query() # setup the container and then connect using the global handle container = DaosContainer(context) container.poh = pool_handle buf = ctypes.cast(cont_glob_handle.iov_buf, ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len)) buf2 = bytearray() buf2.extend(buf.contents) dummy_cont_handle = container.global2local(context, cont_glob_handle.iov_len, cont_glob_handle.iov_buf_len, buf2) # just try one thing to make sure handle is good container.query() except DaosApiError as excep: print(excep) print(traceback.format_exc()) raise return
def executable(self, iorflags=None): """ Executable function to run ior for sequential and random order """ # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createsvc = self.params.get("svcn", '/run/pool/createsvc/') iteration = self.params.get("iter", '/run/ior/iteration/') block_size = self.params.get("blocksize", '/run/ior/clientslots/*') record_size = self.params.get("r", '/run/ior/recordsize/*') stripe_count = self.params.get("c", '/run/ior/stripecount/') async_io = self.params.get("a", '/run/ior/asyncio/') object_class = self.params.get("o", '/run/ior/objectclass/*/') transfer_size = self.params.get("t", '/run/ior/transfersize_stripesize/*/') stripe_size = self.params.get("s", '/run/ior/transfersize_stripesize/*/') expected_result = 'PASS' if record_size == '4k' and transfer_size == '1k': expected_result = 'FAIL' try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc) pool_uuid = self.pool.get_uuid_str() svc_list = "" for item in range(createsvc): svc_list += str(int(self.pool.svc.rl_ranks[item])) + ":" svc_list = svc_list[:-1] print("svc_list: {}".format(svc_list)) ior_utils.run_ior(self.hostfile_clients, iorflags, iteration, block_size, transfer_size, pool_uuid, svc_list, record_size, stripe_size, stripe_count, async_io, object_class, self.basepath, self.slots) if expected_result == 'FAIL': self.fail("Test was expected to fail but it passed.\n") except (DaosApiError, ior_utils.IorFailed) as excep: print(excep) if expected_result != 'FAIL': self.fail("Test was expected to pass but it failed.\n")
def test_singleserver(self): """ Test IOR with Single Server config. :avocado: tags=ior,singleserver """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svcn", '/run/createtests/createsvc/') iteration = self.params.get("iter", '/run/ior/iteration/') ior_flags = self.params.get("F", '/run/ior/iorflags/') transfer_size = self.params.get("t", '/run/ior/transfersize/') record_size = self.params.get("r", '/run/ior/recordsize/') segment_count = self.params.get("s", '/run/ior/segmentcount/') stripe_count = self.params.get("c", '/run/ior/stripecount/') async_io = self.params.get("a", '/run/ior/asyncio/') object_class = self.params.get("o", '/run/ior/objectclass/') try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc) pool_uuid = self.POOL.get_uuid_str() print("pool_uuid: {}".format(pool_uuid)) list = [] svc_list = "" for i in range(createsvc): list.append(int(self.POOL.svc.rl_ranks[i])) svc_list += str(list[i]) + ":" svc_list = svc_list[:-1] if len(self.hostlist_clients) == 1: block_size = '12g' elif len(self.hostlist_clients) == 2: block_size = '6g' elif len(self.hostlist_clients) == 4: block_size = '3g' IorUtils.run_ior(self.hostfile_clients, ior_flags, iteration, block_size, transfer_size, pool_uuid, svc_list, record_size, segment_count, stripe_count, async_io, object_class, self.basepath) except ValueError as e: print e print traceback.format_exc()
def test_connectpermission(self): """ Test pool connections with specific permissions. :avocado: tags=pool,permission,connectpermission """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = self.params.get("uid", '/run/createtests/createuid/') creategid = self.params.get("gid", '/run/createtests/creategid/') createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') # parameters used for pool connect permissions = self.params.get("perm", '/run/createtests/permissions/*') if (createmode in [73, 511] and permissions == 0): expected_result = 'PASS' elif (createmode in [146, 511] and permissions == 1): expected_result = 'PASS' elif (createmode in [292, 511] and permissions == 2): expected_result = 'PASS' else: expected_result = 'FAIL' try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) print("Pool initialisation successful") self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None) print("Pool Creation successful") self.POOL.connect(1 << permissions) print("Pool Connect successful") if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except ValueError as e: print(e) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n") finally: if hostfile is not None: os.remove(hostfile) # cleanup the pool self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None
def test_poolsvc(self): """ Test svc arg during pool create. :avocado: tags=pool,svc """ # parameters used in pool create createmode = self.params.get("mode", '/run/createtests/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/createtests/createset/') createsize = self.params.get("size", '/run/createtests/createsize/') createsvc = self.params.get("svc", '/run/createtests/createsvc/*/') expected_result = createsvc[1] try: # initialize a python pool object then create the underlying # daos storage self.POOL = DaosPool(self.Context) self.POOL.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc[0]) self.POOL.connect(1 << 1) if (createsvc[0] == 3): self.POOL.disconnect() cmd = ('{0} kill-leader --uuid={1}'.format( self.daosctl, self.POOL.get_uuid_str())) process.system(cmd) time.sleep(5) self.POOL.connect(1 << 1) self.POOL.disconnect() server = DaosServer(self.Context, self.server_group, 1) server.kill(1) time.sleep(5) self.POOL.connect(1 << 1) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") # cleanup the pool self.POOL.disconnect() self.POOL.destroy(1) self.POOL = None except ValueError as e: print e print traceback.format_exc() if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def setUp(self): try: # get paths from the build_vars generated by build with open('../../../.build_vars.json') as f: build_paths = json.load(f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.tmp = build_paths['PREFIX'] + '/tmp' self.server_group = self.params.get("server_group", '/server/', 'daos_server') # setup the DAOS python API self.Context = DaosContext(build_paths['PREFIX'] + '/lib/') self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = WriteHostFile.WriteHostFile( self.hostlist, self.tmp) ServerUtils.runServer(self.hostfile, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.Context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None) self.pool.connect(1 << 1) # create a container self.container = DaosContainer(self.Context) self.container.create(self.pool.handle) # now open it self.container.open() except DaosApiError as e: print(e) print(traceback.format_exc()) self.fail("Test failed during setup.\n")
def test_many_servers(self): """ Test destroy on a large (relative) number of servers. :avocado: tags=pool,pooldestroy,destroybig """ try: # write out a hostfile_servers and start the servers with it self.hostlist_servers = self.params.get("test_machines6", '/run/hosts/') hostfile_servers = write_host_file.write_host_file( self.hostlist_servers, self.tmp) self.agent_sessions = agent_utils.run_agent( self.basepath, self.hostlist_servers) server_utils.run_server(hostfile_servers, self.server_group, self.basepath) # parameters used in pool create createmode = self.params.get("mode", '/run/poolparams/createmode/') createuid = self.params.get("uid", '/run/poolparams/createuid/') creategid = self.params.get("gid", '/run/poolparams/creategid/') createsetid = self.params.get("setname", '/run/poolparams/createset/') createsize = self.params.get("size", '/run/poolparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) time.sleep(1) # okay, get rid of it pool.destroy(1) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("6 server test failed.\n") except Exception as excep: self.fail("Daos code segfaulted most likely. Error: %s" % excep) # no matter what happens cleanup finally: if self.agent_sessions: agent_utils.stop_agent(self.agent_sessions) server_utils.stop_server(hosts=self.hostlist_servers) os.remove(hostfile_servers)
def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../../.build_vars.json') as json_f: build_paths = json.load(json_f) basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file( self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') self.agent_sessions = agent_utils.run_agent(basepath, self.hostlist) server_utils.run_server(self.hostfile, server_group, basepath) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4)
def test_create(self): """ Test basic pool creation. :avocado: tags=all,pool,smoke,full_regression,tiny,simplecreate """ # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] modelist = self.params.get("mode", '/run/tests/modes/*') mode = modelist[0] expected_for_param.append(modelist[1]) uidlist = self.params.get("uid", '/run/tests/uids/*', os.geteuid()) if uidlist[0] == 'valid': uid = os.geteuid() else: uid = uidlist[0] expected_for_param.append(uidlist[1]) gidlist = self.params.get("gid", '/run/tests/gids/*', os.getegid()) if gidlist[0] == 'valid': gid = os.getegid() else: gid = gidlist[0] expected_for_param.append(gidlist[1]) setidlist = self.params.get("setname", '/run/tests/setnames/*') setid = setidlist[0] expected_for_param.append(setidlist[1]) expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.pool = DaosPool(self.context) self.pool.create(mode, uid, gid, 1073741824, setid) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as exc: print(exc) print(traceback.format_exc()) if expected_result not in ['FAIL']: self.fail("Test was expected to pass but it failed.\n")
def test_global_handle(self): """ Test ID: DAO Test Description: Use a pool handle in another process. :avocado: tags=all,pool,pr,tiny,poolglobalhandle """ try: # use the uid/gid of the user running the test, these should # be perfectly valid createuid = os.geteuid() creategid = os.getegid() # parameters used in pool create that are in yaml createmode = self.params.get("mode", '/run/testparams/createmode/') createsetid = self.params.get("setname", '/run/testparams/createset/') createsize = self.params.get("size", '/run/testparams/createsize/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(self.context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) pool.connect(1 << 1) # create a container just to make sure handle is good container = DaosContainer(self.context) container.create(pool.handle) # create a global handle iov_len, buf_len, buf = pool.local2global() # this should work in the future but need on-line server addition #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0) #p = Process(target=check_handle, args=arg_list) #p.start() #p.join() # for now verifying global handle in the same process which is not # the intended use case check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Expecting to pass but test has failed.\n")
def test_fourservers(self): """ Jira ID: DAOS-1263 Test Description: Test IOR with four servers. Use Cases: Different combinations of 1/64/128 Clients, 1K/4K/32K/128K/512K/1M transfer size. :avocado: tags=ior,fourservers """ # parameters used in pool create createmode = self.params.get("mode_RW", '/run/pool/createmode/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createsvc = self.params.get("svcn", '/run/pool/createsvc/') # ior parameters iteration = self.params.get("iter", '/run/ior/iteration/') client_processes = self.params.get("np", '/run/ior/clientslots/*') ior_flags = self.params.get("F", '/run/ior/iorflags/') transfer_size = self.params.get("t", '/run/ior/transfersize_blocksize/*/') block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/') object_class = self.params.get("o", '/run/ior/objectclass/') try: # initialize a python pool object then create the underlying # daos storage self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, createsvc) pool_uuid = self.pool.get_uuid_str() tmp_rank_list = [] svc_list = "" for i in range(createsvc): tmp_rank_list.append(int(self.pool.svc.rl_ranks[i])) svc_list += str(tmp_rank_list[i]) + ":" svc_list = svc_list[:-1] ior_utils.run_ior_daos(self.hostfile_clients, ior_flags, iteration, block_size, transfer_size, pool_uuid, svc_list, object_class, self.basepath, client_processes) except (DaosApiError, ior_utils.IorFailed) as excep: self.fail("<FourServers Test run Failed>\n {}".format(excep))
def create_pool(self): """ Creates a pool that the various tests use for storage. """ createmode = self.params.get("mode", '/run/pool1/createmode/*/') createuid = os.geteuid() creategid = os.getegid() createsetid = self.params.get("setname", '/run/pool1/createset/') createsize = self.params.get("size", '/run/pool1/createsize/') self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/') self.pool = DaosPool(self.context) self.pool.create(createmode, createuid, creategid, createsize, createsetid, None, None, self.createsvc)