def setUp(self): self.agent_sessions = None # get paths from the build_vars generated by build with open('../../.build_vars.json') as build_file: build_paths = json.load(build_file) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") self.server_group = self.params.get("name", '/server_config/', 'daos_server') # setup the DAOS python API self.context = DaosContext(build_paths['PREFIX'] + '/lib64/') self.hostlist_servers = self.params.get("test_servers", '/run/hosts/') self.hostfile_servers = (write_host_file.write_host_file( self.hostlist_servers, self.workdir)) print("Host file servers is: {}".format(self.hostfile_servers)) self.hostlist_clients = self.params.get("test_clients", '/run/hosts/') self.hostfile_clients = (write_host_file.write_host_file( self.hostlist_clients, self.workdir)) print("Host file clients is: {}".format(self.hostfile_clients)) # start servers self.agent_sessions = agent_utils.run_agent(self.basepath, self.hostlist_servers, self.hostlist_clients) server_utils.run_server(self, self.hostfile_servers, self.server_group) self.mpio = None
def setUp(self): """Set up run before each test.""" super(TestWithoutServers, self).setUp() if not load_mpi("openmpi"): self.fail("Failed to load openmpi") self.orterun = find_executable('orterun') if self.orterun is None: self.fail("Could not find orterun") # hardware tests segfault in MPI_Init without this option self.client_mca = "--mca btl_openib_warn_default_gid_prefix 0" self.client_mca += " --mca pml ob1" self.client_mca += " --mca btl tcp,self" self.client_mca += " --mca oob tcp" self.ompi_prefix = os.path.dirname(os.path.dirname(self.orterun)) # get paths from the build_vars generated by build with open('../../.build_vars.json') as build_vars: build_paths = json.load(build_vars) self.basepath = os.path.normpath(os.path.join(build_paths['PREFIX'], '..') + os.path.sep) self.prefix = build_paths['PREFIX'] try: self.ofi_prefix = build_paths['OFI_PREFIX'] except KeyError: self.ofi_prefix = "/usr" self.bin = os.path.join(self.prefix, 'bin') self.daos_test = os.path.join(self.prefix, 'bin', 'daos_test') # set default shared dir for daos tests in case DAOS_TEST_SHARED_DIR # is not set, for RPM env and non-RPM env. if self.prefix != "/usr": self.tmp = os.path.join(self.prefix, 'tmp') else: self.tmp = os.getenv( 'DAOS_TEST_SHARED_DIR', os.path.expanduser('~/daos_test')) if not os.path.exists(self.tmp): os.makedirs(self.tmp) # setup fault injection, this MUST be before API setup fault_list = self.params.get("fault_list", '/run/faults/*') if fault_list: # not using workdir because the huge path was messing up # orterun or something, could re-evaluate this later self.fault_file = fault_config_utils.write_fault_file(self.tmp, fault_list, None) os.environ["D_FI_CONFIG"] = self.fault_file self.context = DaosContext(self.prefix + '/lib64/') self.d_log = DaosLog(self.context) self.test_log.daos_log = self.d_log
def setUp(self): self.agent_sessions = None self.pool = None self.container = None self.obj = None self.ioreq = None self.hostlist = None self.hostfile = None self.no_of_dkeys = None self.no_of_akeys = None self.array_size = None self.record_length = None with open('../../.build_vars.json') as json_f: build_paths = json.load(json_f) self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../") server_group = self.params.get("name", '/server_config/', 'daos_server') self.context = DaosContext(build_paths['PREFIX'] + '/lib64/') self.d_log = DaosLog(self.context) self.hostlist = self.params.get("test_machines", '/run/hosts/*') self.hostfile = write_host_file.write_host_file( self.hostlist, self.workdir) self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0] self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0] self.array_size = self.params.get("size", '/array_size/') self.record_length = self.params.get("length", '/run/record/*') self.agent_sessions = agent_utils.run_agent(self.basepath, self.hostlist) server_utils.run_server(self, self.hostfile, server_group) self.pool = DaosPool(self.context) self.pool.create(self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(), os.getegid(), self.params.get("size", '/run/pool/createsize/*'), self.params.get("setname", '/run/pool/createset/*'), None) self.pool.connect(2) self.container = DaosContainer(self.context) self.container.create(self.pool.handle) self.container.open() self.obj = DaosObj(self.context, self.container) self.obj.create(objcls=1) self.obj.open() self.ioreq = IORequest(self.context, self.container, self.obj, objtype=4)
def setUp(self): """Set up run before each test.""" super(TestWithoutServers, self).setUp() # get paths from the build_vars generated by build with open('../../.build_vars.json') as build_vars: build_paths = json.load(build_vars) self.basepath = os.path.normpath( os.path.join(build_paths['PREFIX'], '..') + os.path.sep) self.prefix = build_paths['PREFIX'] self.ompi_prefix = build_paths["OMPI_PREFIX"] self.bin = os.path.join(self.prefix, 'bin') self.daos_test = os.path.join(self.prefix, 'bin', 'daos_test') self.orterun = os.path.join(self.ompi_prefix, "bin", "orterun") self.daosctl = os.path.join(self.bin, 'daosctl') # set default shared dir for daos tests in case DAOS_TEST_SHARED_DIR # is not set, for RPM env and non-RPM env. if self.prefix != "/usr": self.tmp = os.path.join(self.prefix, 'tmp') else: self.tmp = os.getenv('DAOS_TEST_SHARED_DIR', \ os.path.expanduser('~/daos_test')) if not os.path.exists(self.tmp): os.makedirs(self.tmp) # setup fault injection, this MUST be before API setup fault_list = self.params.get("fault_list", '/run/faults/*/') if fault_list: # not using workdir because the huge path was messing up # orterun or something, could re-evaluate this later self.fault_file = fault_config_utils.write_fault_file( self.tmp, fault_list, None) os.environ["D_FI_CONFIG"] = self.fault_file self.context = DaosContext(self.prefix + '/lib64/') self.d_log = DaosLog(self.context)
def test_exclude(self): """ Pass bad parameters to pool connect :avocado: tags=all,pool,full_regression,tiny,badexclude """ # parameters used in pool create createmode = self.params.get("mode", '/run/pool/createmode/') createsetid = self.params.get("setname", '/run/pool/createset/') createsize = self.params.get("size", '/run/pool/createsize/') createuid = os.geteuid() creategid = os.getegid() # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/') targets = [] if tgtlist[0] == "NULLPTR": targets = None self.cancel("skipping null pointer test until DAOS-1929 is fixed") else: targets.append(tgtlist[0]) expected_for_param.append(tgtlist[1]) svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/') svc = svclist[0] expected_for_param.append(svclist[1]) setlist = self.params.get("setname", '/run/testparams/connectsetnames/*/') connectset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/') excludeuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break saved_svc = None saved_grp = None saved_uuid = None pool = None try: # setup the DAOS python API with open('../../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib64/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # trash the the pool service rank list if not svc == 'VALID': self.cancel("skipping this test until DAOS-1931 is fixed") saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr) pool.svc = None # trash the pool group value if connectset == 'NULLPTR': saved_grp = pool.group pool.group = None # trash the UUID value in various ways if excludeuuid == 'NULLPTR': self.cancel("skipping this test until DAOS-1932 is fixed") ctypes.memmove(saved_uuid, pool.uuid, 16) pool.uuid = 0 if excludeuuid == 'CRAP': self.cancel("skipping this test until DAOS-1932 is fixed") ctypes.memmove(saved_uuid, pool.uuid, 16) pool.uuid[4] = 244 pool.exclude(targets) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") finally: if pool is not None: if saved_svc is not None: pool.svc = saved_svc if saved_grp is not None: pool.group = saved_grp if saved_uuid is not None: ctypes.memmove(pool.uuid, saved_uuid, 16) pool.destroy(1)
def test_connect(self): """ Pass bad parameters to pool connect :avocado: tags=all,pool,full_regression,tiny,badconnect """ # parameters used in pool create createmode = self.params.get("mode", '/run/connecttests/createmode/') createuid = self.params.get("uid", '/run/connecttests/uids/createuid/') creategid = self.params.get("gid", '/run/connecttests/gids/creategid/') createsetid = self.params.get("setname", '/run/connecttests/setnames/createset/') createsize = self.params.get("size", '/run/connecttests/psize/createsize/') # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] modelist = self.params.get("mode", '/run/connecttests/connectmode/*/') connectmode = modelist[0] expected_for_param.append(modelist[1]) svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/') svc = svclist[0] expected_for_param.append(svclist[1]) setlist = self.params.get("setname", '/run/connecttests/connectsetnames/*/') connectset = setlist[0] expected_for_param.append(setlist[1]) uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/') connectuuid = uuidlist[0] expected_for_param.append(uuidlist[1]) # if any parameter is FAIL then the test should FAIL, in this test # virtually everyone should FAIL since we are testing bad parameters expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break puuid = (ctypes.c_ubyte * 16)() psvc = RankList() pgroup = ctypes.create_string_buffer(0) pool = None try: # setup the DAOS python API with open('../../.build_vars.json') as build_file: data = json.load(build_file) context = DaosContext(data['PREFIX'] + '/lib64/') # initialize a python pool object then create the underlying # daos storage pool = DaosPool(context) pool.create(createmode, createuid, creategid, createsize, createsetid, None) # save this uuid since we might trash it as part of the test ctypes.memmove(puuid, pool.uuid, 16) # trash the the pool service rank list psvc.rl_ranks = pool.svc.rl_ranks psvc.rl_nr = pool.svc.rl_nr if not svc == 'VALID': rl_ranks = ctypes.POINTER(ctypes.c_uint)() pool.svc = RankList(rl_ranks, 1) # trash the pool group value pgroup = pool.group if connectset == 'NULLPTR': pool.group = None # trash the UUID value in various ways if connectuuid == 'NULLPTR': pool.uuid = None if connectuuid == 'JUNK': pool.uuid[4] = 244 pool.connect(connectmode) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except DaosApiError as excep: print(excep) print(traceback.format_exc()) if expected_result in ['PASS']: self.fail("Test was expected to pass but it failed.\n") # cleanup the pool finally: if pool is not None and pool.attached == 1: # restore values in case we trashed them during test pool.svc.rl_ranks = psvc.rl_ranks pool.svc.rl_nr = psvc.rl_nr pool.group = pgroup ctypes.memmove(pool.uuid, puuid, 16) print("pool uuid after restore {}".format(pool.get_uuid_str())) pool.destroy(1)