def verify_pool_readwrite(self, svc, uuid, action, expect='Pass'): ''' Deascription: To verify client is able to perform read or write on a pool. Args: svc: pool svc number. uuid: pool uuid number. action: read or write on pool. expect: expecting behavior pass or deny with RC -1001. Return: pass or fail. ''' deny_access = '-1001' daos_cmd = DaosCommand(os.path.join(self.prefix, "bin")) if action.lower() == "write": daos_cmd.request.value = "container" daos_cmd.action.value = "create --svc={} --pool={}".format( svc, uuid) elif action.lower() == "read": daos_cmd.request.value = "pool" daos_cmd.action.value = "query --svc={} --pool={}".format( svc, uuid) else: self.fail("##In verify_pool_readwrite, invalid action: " "%s", action) daos_cmd.exit_status_exception = False result = daos_cmd.run() self.log.info( " In verify_pool_readwrite %s.\n =daos_cmd.run()" " result:\n%s", action, result) self.verify_daos_pool_result(result, action, expect, deny_access)
def prepare_pool_container(self): """Create a pool and a container and prepare for the test cases. """ self.daos_cmd = DaosCommand(self.bin) self.add_pool(connect=False) self.svc = ",".join(str(rank) for rank in self.pool.svc_ranks) self.add_container(self.pool)
def setUp(self): """Set up each test case.""" # obtain separate logs self.update_log_file_names() # Start the servers and agents super(MdtestBase, self).setUp() # initialise daos_cmd self.daos_cmd = DaosCommand(self.bin) # Get the parameters for Mdtest self.mdtest_cmd = MdtestCommand() self.mdtest_cmd.get_params(self) self.processes = self.params.get("np", '/run/mdtest/client_processes/*') self.manager = self.params.get("manager", '/run/mdtest/*', "MPICH") # Until DAOS-3320 is resolved run IOR for POSIX # with single client node if self.mdtest_cmd.api.value == "POSIX": self.log.info("Restricting mdtest to one node") self.hostlist_clients = [self.hostlist_clients[0]] self.hostfile_clients = write_host_file.write_host_file( self.hostlist_clients, self.workdir, self.hostfile_clients_slots) self.log.info('Clients %s', self.hostlist_clients) self.log.info('Servers %s', self.hostlist_servers)
def test_container_attribute(self): """ Test basic container attribute tests. :avocado: tags=all,tiny,full_regression :avocado: tags=container,attribute :avocado: tags=sync_conattribute :avocado: tags=container_attribute """ self.add_pool() self.add_container(self.pool) self.container.open() self.daos_cmd = DaosCommand(self.bin) expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) # Convert any test yaml string to bytes if isinstance(name[0], str): name[0] = name[0].encode("utf-8") if isinstance(value[0], str): value[0] = value[0].encode("utf-8") attr_dict = {name[0]: value[0]} expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.container.container.set_attr(data=attr_dict) data = self.daos_cmd.container_list_attrs(pool=self.pool.uuid, cont=self.container.uuid) self.verify_list_attr(attr_dict, data['response']) # Request something that doesn't exist if name[0] is not None and b"Negative" in name[0]: name[0] = b"rubbish" attr_value_dict = self.container.container.get_attr([name[0]]) # Raise an exception if the attr value is empty # This is expected to happen on Negative test cases if not attr_value_dict[name[0]]: raise DaosApiError("Attr value is empty. " "Did you set the value?") self.verify_get_attr(attr_dict, attr_value_dict) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except (DaosApiError, DaosTestError) as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def test_container_large_attributes(self): """ Test ID: DAOS-1359 Test description: Test large randomly created container attribute. :avocado: tags=container,attribute,large_conattribute :avocado: tags=container_attribute """ self.add_pool() self.add_container(self.pool) self.container.open() self.daos_cmd = DaosCommand(self.bin) attr_dict = self.create_data_set() try: self.container.container.set_attr(data=attr_dict) # Workaround # Due to DAOS-7093 skip the usage of pydaos cont list attr # size, buf = self.container.container.list_attr() out_attr_dict = self.daos_cmd.container_list_attrs( pool=self.pool.uuid, cont=self.container.uuid) self.verify_list_attr(attr_dict, out_attr_dict) results = self.container.container.get_attr(list(attr_dict.keys())) self.verify_get_attr(attr_dict, results) except DaosApiError as excep: print(excep) print(traceback.format_exc()) self.fail("Test was expected to pass but it failed.\n")
def execute_during_rebuild(self): """Delete half of the objects from the container during rebuild.""" self.daos_cmd = DaosCommand(self.bin) self.daos_cmd.container_set_prop(pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") if self.punch_type == "object": # Punch half of the objects self.punched_indices = [ index for index in range(self.container.object_qty.value) if index % 2 ] self.punched_qty = self.container.punch_objects( self.punched_indices) elif self.punch_type == "record": # Punch half of the records in each object self.punched_indices = [ index for index in range(self.container.record_qty.value) if index % 2 ] self.punched_qty = self.container.punch_records( self.punched_indices)
def verify_pool_readwrite(self, svc, uuid, action, expect='Pass'): """Verify client is able to perform read or write on a pool. Args: svc (int): pool svc number. uuid (str): pool uuid number. action (str): read or write on pool. expect (str): expecting behavior pass or deny with RC -1001. Return: bool: pass or fail. """ deny_access = '-1001' daos_cmd = DaosCommand(self.bin) daos_cmd.exit_status_exception = False if action.lower() == "write": result = daos_cmd.container_create(pool=uuid, svc=svc) elif action.lower() == "read": result = daos_cmd.pool_query(pool=uuid, svc=svc) else: self.fail("##In verify_pool_readwrite, invalid action: {}".format( action)) self.log.info( " In verify_pool_readwrite %s.\n =daos_cmd.run() result:\n%s", action, result) self.verify_daos_pool_cont_result(result, action, expect, deny_access)
def setUp(self): super(NvmeEnospace, self).setUp() # initialize daos command self.daos_cmd = DaosCommand(self.bin) self.create_pool_max_size() self.der_nospace_count = 0 self.other_errors_count = 0
def create_pool_container(self): """Create a pool and a container in the pool. Save some variables so that we can use them in the tests. """ self.add_pool() self.daos_cmd = DaosCommand(self.bin) self.expected_cont_uuid = self.daos_cmd.get_output( "container_create", pool=self.pool.uuid)[0]
def setUp(self): super(MpiioTests, self).setUp() # initialize daos_cmd self.daos_cmd = DaosCommand(self.bin) # initialize a python pool object then create the underlying self.pool = TestPool(self.context, dmg_command=self.get_dmg_command()) self.pool.get_params(self) self.pool.create()
def setUp(self): """Initial setup""" super().setUp() # initialize daos command self.daos_cmd = DaosCommand(self.bin) self.create_pool_max_size() self.der_nospace_count = 0 self.other_errors_count = 0 self.test_result = []
def test_container_attribute(self): """ Test basic container attribute tests. :avocado: tags=all,tiny,full_regression,container,sync_conattribute :avocado: tags=container_attribute """ self.add_pool() self.add_container(self.pool) self.container.open() self.daos_cmd = DaosCommand(self.bin) expected_for_param = [] name = self.params.get("name", '/run/attrtests/name_handles/*/') expected_for_param.append(name[1]) value = self.params.get("value", '/run/attrtests/value_handles/*/') expected_for_param.append(value[1]) # Convert any test yaml string to bytes if isinstance(name[0], str): name[0] = name[0].encode("utf-8") if isinstance(value[0], str): value[0] = value[0].encode("utf-8") attr_dict = {name[0]: value[0]} expected_result = 'PASS' for result in expected_for_param: if result == 'FAIL': expected_result = 'FAIL' break try: self.container.container.set_attr(data=attr_dict) # Workaround # Due to DAOS-7093 skip the usage of pydaos cont list attr # size, buf = self.container.container.list_attr() out_attr_dict = self.daos_cmd.container_list_attrs( pool=self.pool.uuid, cont=self.container.uuid) self.verify_list_attr(attr_dict, out_attr_dict) # Request something that doesn't exist if name[0] is not None and b"Negative" in name[0]: name[0] = b"rubbish" attr_value_dict = self.container.container.get_attr([name[0]]) self.verify_get_attr(attr_dict, attr_value_dict) if expected_result in ['FAIL']: self.fail("Test was expected to fail but it passed.\n") except (DaosApiError, DaosTestError) as excep: print(excep) print(traceback.format_exc()) if expected_result == 'PASS': self.fail("Test was expected to pass but it failed.\n")
def setUp(self): """Set up each test case.""" super().setUp() self.user_uid = os.geteuid() self.user_gid = os.getegid() self.current_user = pwd.getpwuid(self.user_uid)[0] self.current_group = grp.getgrgid(self.user_uid)[0] self.co_prop = self.params.get("container_properties", "/run/container/*") self.dmg = self.get_dmg_command() self.daos_tool = DaosCommand(self.bin)
def execute_rebuild_test(self, create_container=True): """Execute the rebuild test steps. Args: create_container (bool, optional): should the test create a container. Defaults to True. """ # Get the test params self.setup_test_pool() self.daos_cmd = DaosCommand(self.bin) if create_container: self.setup_test_container() # Create a pool and verify the pool information before rebuild self.create_test_pool() # Create a container and write objects self.create_test_container() # Verify the rank to be excluded has at least one object self.verify_rank_has_objects() # Start the rebuild process self.start_rebuild() # Execute the test steps during rebuild self.execute_during_rebuild() # Confirm rebuild completes self.pool.wait_for_rebuild(False, 1) # clear container status for the RF issue self.daos_cmd.container_set_prop( pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") # Refresh local pool and container self.pool.check_pool_info() self.container.check_container_info() # Verify the excluded rank is no longer used with the objects self.verify_rank_has_no_objects() # Verify the pool information after rebuild self.update_pool_verify() self.execute_pool_verify(" after rebuild") # Verify the container data can still be accessed self.verify_container_data() self.log.info("Test passed")
def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.daos_command = DaosCommand(self.bin) self.ranks = self.params.get("rank_list", '/run/test_ranks/*') self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') self.ior_test_sequence = self.params.get("ior_test_sequence", '/run/ior/iorflags/*') # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir, None)
def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.daos_command = DaosCommand(self.bin) # Start an additional server. self.ior_test_sequence = self.params.get("ior_test_sequence", '/run/ior/iorflags/*') self.extra_servers = self.params.get("test_servers", "/run/extra_servers/*") self.rank = self.params.get("rank_list", '/run/test_ranks/*') self.test_oclass = None self.dmg_command.exit_status_exception = True
def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.daos_command = DaosCommand(self.bin) self.ior_test_sequence = self.params.get("ior_test_sequence", '/run/ior/iorflags/*') # Start an additional server. self.extra_servers = self.params.get("test_servers", "/run/extra_servers/*") self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') self.out_queue = queue.Queue() self.dmg_command.exit_status_exception = True self.server_boot = None
def create_pool_and_container(self): """method to create pool and container""" scm_size = self.params.get("scm_size", "/run/server/*/", 138000000) num_of_pool = self.params.get("num_of_pool", "/run/server/*/", 3) container_per_pool = self.params.get("container_per_pool", "/run/server/*/", 2) for _ in range(num_of_pool): dmg = self.get_dmg_command() result = dmg.pool_create(scm_size) uuid = result['uuid'] daos_cmd = DaosCommand(self.bin) for _ in range(container_per_pool): result = daos_cmd.container_create(pool=uuid) self.log.info("container create status: %s", result)
def execute_during_rebuild(self): """Read the objects during rebuild.""" self.daos_cmd = DaosCommand(self.bin) self.daos_cmd.container_set_prop(pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") message = "Reading the array objects during rebuild" self.log.info(message) self.d_log.info(message) self.assertTrue(self.pool.read_data_during_rebuild(self.container), "Error reading data during rebuild")
def execute_during_rebuild(self): """Execute test steps during rebuild.""" self.daos_cmd = DaosCommand(self.bin) if self.mode == "cascading": # Exclude the second rank from the pool during rebuild self.server_managers[0].stop_ranks( [self.inputs.rank.value[1]], self.d_log) self.daos_cmd.container_set_prop( pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") # Populate the container with additional data during rebuild self.container.write_objects(obj_class=self.inputs.object_class.value)
def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.daos_command = DaosCommand(self.bin) self.ior_test_sequence = self.params.get("ior_test_sequence", '/run/ior/iorflags/*') self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') self.ior_test_repetitions = self.params.get("pool_test_repetitions", '/run/pool_capacity/*') self.loop_test_cnt = 1 # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file(self.hostlist_clients, self.workdir, None) self.dmg_command.exit_status_exception = True
def setUp(self): """Set up for test case.""" super().setUp() self.dmg_command = self.get_dmg_command() self.daos_command = DaosCommand(self.bin) self.ior_test_sequence = self.params.get( "ior_test_sequence", '/run/ior/iorflags/*') self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*') # Recreate the client hostfile without slots defined self.hostfile_clients = write_host_file( self.hostlist_clients, self.workdir, None) self.pool = None self.ds_racer_queue = queue.Queue() self.daos_racer = None self.dmg_command.exit_status_exception = True
def setUp(self): """Set up each test case.""" # Start the servers and agents super().setUp() self.dfuse_hosts = self.agent_managers[0].hosts # initialize daos_cmd self.daos_cmd = DaosCommand(self.bin) # Get the processes for each explicitly # This is needed because both IorTestBase and MdtestBase # define self.processes self.ior_processes = self.params.get("np", '/run/ior/client_processes/*') self.mdtest_processes = self.params.get( "np", '/run/mdtest/client_processes/*') self.dcp_processes = self.params.get("np", "/run/dcp/client_processes/*", 1) self.dsync_processes = self.params.get( "np", "/run/dsync/client_processes/*", 1) self.dserialize_processes = self.params.get( "np", "/run/dserialize/client_processes/*", 1) self.ddeserialize_processes = self.params.get( "np", "/run/ddeserialize/client_processes/*", 1) tool = self.params.get("tool", "/run/datamover/*") if tool: self.set_tool(tool)
def test_list_container(self): """Jira ID: DAOS-3629 Test Description: Test daos pool list-cont Use Cases: See test cases in the class description. :avocado: tags=all,container,full_regression,list_containers """ expected_uuids1 = [] data1 = self.get_dmg_command().pool_create(scm_size="150MB") self.daos_cmd = DaosCommand(self.bin) # 1. Create 1 container and list. self.create_list(1, data1["uuid"], expected_uuids1) # 2. Create 1 more container and list; 2 total. self.create_list(1, data1["uuid"], expected_uuids1) # 3. Create 98 more containers and list; 100 total. self.create_list(98, data1["uuid"], expected_uuids1) # 4. Create 2 additional pools and create 10 containers in each pool. data2 = self.get_dmg_command().pool_create(scm_size="150MB") data3 = self.get_dmg_command().pool_create(scm_size="150MB") # Create 10 containers in pool 2 and verify. expected_uuids2 = [] self.create_list(10, data2["uuid"], expected_uuids2) # Create 10 containers in pool 3 and verify. expected_uuids3 = [] self.create_list(10, data3["uuid"], expected_uuids3)
def setUp(self): """Set up each test case.""" # Start the servers and agents super().setUp() self.dfuse_hosts = self.agent_managers[0].hosts # initialize daos_cmd self.daos_cmd = DaosCommand(self.bin) # Get the processes for each explicitly # This is needed because both IorTestBase and MdtestBase # define self.processes self.ior_processes = self.params.get( "np", '/run/ior/client_processes/*') self.mdtest_processes = self.params.get( "np", '/run/mdtest/client_processes/*') self.dcp_processes = self.params.get( "np", "/run/dcp/client_processes/*", 1) self.dsync_processes = self.params.get( "np", "/run/dsync/client_processes/*", 1) tool = self.params.get("tool", "/run/datamover/*") if tool: self.set_tool(tool) # Get and save dcp compatibility options self.dcp_cmd = Dcp(self.hostlist_clients, self.tmp) self.dcp_cmd.get_params(self) self.dcp_cmd.query_compatibility() self.dcp_has_src_pool = self.dcp_cmd.has_src_pool self.dcp_has_bufsize = self.dcp_cmd.has_bufsize
def setUp(self): """Set up each test case.""" # obtain separate logs self.update_log_file_names() # Start the servers and agents super(FioBase, self).setUp() # initialise daos_cmd self.daos_cmd = DaosCommand(self.bin) # Get the parameters for Fio self.fio_cmd = FioCommand() self.fio_cmd.get_params(self) self.processes = self.params.get("np", '/run/fio/client_processes/*') self.manager = self.params.get("manager", '/run/fio/*', "MPICH")
class RbldReadArrayTest(RebuildTestBase): # pylint: disable=too-many-ancestors """Run rebuild tests with DAOS servers and clients. :avocado: recursive """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.daos_cmd = None def execute_during_rebuild(self): """Read the objects during rebuild.""" self.daos_cmd = DaosCommand(self.bin) self.daos_cmd.container_set_prop(pool=self.pool.uuid, cont=self.container.uuid, prop="status", value="healthy") message = "Reading the array objects during rebuild" self.log.info(message) self.d_log.info(message) self.assertTrue(self.pool.read_data_during_rebuild(self.container), "Error reading data during rebuild") def test_read_array_during_rebuild(self): """Jira ID: DAOS-691. Test Description: Configure 5 targets with 1 pool with a service leader quantity of 2. Add 1 container to the pool configured with 3 replicas. Add 10 objects of 10 records each populated with an array of 5 values (currently a sufficient amount of data to be read fully before rebuild completes) to a specific rank. Exclude this rank and verify that rebuild is initiated. While rebuild is active, confirm that all the objects and records can be read. Finally verify that rebuild completes and the pool info indicates the correct number of rebuilt objects and records. Use Cases: Basic rebuild of container objects of array values with sufficient numbers of rebuild targets and no available rebuild targets. :avocado: tags=all,full_regression :avocado: tags=vm,large,rebuild,rebuildreadarray """ self.execute_rebuild_test()
def get_daos_command(self): """Get a DaosCommand object. Returns: DaosCommand: a new DaosCommand object """ return DaosCommand(self.bin)
def execute_cont_rf_test(self, create_container=True): """Execute the rebuild test steps for container rf test. Args: create_container (bool, optional): should the test create a container. Defaults to True. """ # Get the test params and var self.setup_test_pool() self.daos_cmd = DaosCommand(self.bin) if create_container: self.setup_test_container() oclass = self.inputs.object_class.value negative_test = True rf = ''.join(self.container.properties.value.split(":")) rf_num = int(re.search(r"rf([0-9]+)", rf).group(1)) if "OC_SX" in oclass and rf_num < 1: negative_test = False elif ("OC_RP_2" in oclass and rf_num < 2) or ( "OC_RP_3" in oclass and rf_num < 3): negative_test = False # Create a pool and verify the pool information before rebuild self.create_test_pool() # Create a container and write objects self.create_test_container_and_write_obj(negative_test) if self.mode == "cont_rf_with_rebuild": num_of_ranks = len(self.inputs.rank.value) if num_of_ranks > rf_num: expect_cont_status = "UNCLEAN" else: expect_cont_status = "HEALTHY" # Verify the rank to be excluded has at least one object self.verify_rank_has_objects() # Start the rebuild process self.start_rebuild_cont_rf(rf) # Execute the test steps during rebuild self.execute_during_rebuild_cont_rf(rf, expect_cont_status) # Refresh local pool and container self.log.info( "==>(6)Check for pool and container info after rebuild.") self.pool.check_pool_info() self.container.check_container_info() # Verify the excluded rank is no longer used with the objects self.verify_rank_has_no_objects() # Verify the pool information after rebuild if expect_cont_status == "HEALTHY": self.update_pool_verify() self.execute_pool_verify(" after rebuild") self.log.info( "==>(7)Check for container data if the container" " is healthy.") self.verify_container_data() self.log.info("Test passed") elif self.mode == "cont_rf_enforcement": self.log.info("Container rf test passed") else: self.fail("#Unsupported container_rf test mode")
def create_cont(self): """Create a TestContainer object to be used to create container.""" # Get container params self.container = TestContainer( self.pool, daos_command=DaosCommand(self.bin)) self.container.get_params(self) # create container self.container.create()