def verify_access_point(self, host_port_input, failure_expected=None): """Run with given AP and verify the AP in the output. Args: host_port_input (str): Host:Port or just Host. Supports multiple APs that are separated by comma. failure_expected (str): Expected error message. Set it to None if not expecting any error. Defaults to None. Returns: list: List or errors. """ errors = [] check = {} check["expected"] = host_port_input.split(",") if ":" not in host_port_input: # dmg automatically sets 10001 if it's not given in the input. check["expected"] = [ "{}:10001".format(host) for host in check["expected"] ] # Create a new DmgCommand and set its exit_status_exception to False to # make it not raise a TestFailure when the command failed. Then we'll be # able to check result.exit_status for our testing purpose. dmg = DmgCommand(self.bin) dmg.exit_status_exception = False try: result = dmg.config_generate(access_points=host_port_input) except CommandFailure as err: errors.append("Unexpected failure! {}".format(err)) if result.exit_status == 0 and failure_expected is None: try: yaml_data = yaml.safe_load(result.stdout) check["actual"] = yaml_data["access_points"] if sorted(check["expected"]) != sorted(check["actual"]): errors.append("Unexpected access point: {} != {}".format( check["expected"], check["actual"])) except yaml.YAMLError as error: errors.append( "Error loading dmg generated config!: {}".format(error)) elif result.exit_status == 0 and failure_expected is not None: errors.append( "dmg command passed when expected to fail!: {}".format(result)) elif result.exit_status != 0 and failure_expected is not None: if failure_expected not in result.stderr_text: errors.append( "Missing expected error message in failed dmg command!: " + "{}".format(result)) else: errors.append( "dmg command failed when expected to pass!: {}".format(result)) return errors
def test_num_engines(self): """Test --num-engines. 1. Using the NVMe PCI dictionary, find the number of keys. i.e., number of Socket IDs. This would determine the maximum number of engines. 2. Call dmg config generate --num-engines=<1 to max_engine>. Should pass. 3. Call dmg config generate --num-engines=<max_engine + 1> Should fail. :avocado: tags=all,full_regression :avocado: tags=hw,small :avocado: tags=control,config_generate_entries,num_engines """ # Get necessary storage and network info. self.prepare_expected_data() # Find the maximum number of engines we can use. It's the number of # sockets in NVMe. However, I'm not sure if we need to have the same # number of interfaces. Go over this step if we have issue with the # max_engine assumption. max_engine = len(list(self.nvme_socket_to_addrs.keys())) self.log.info("max_engine threshold = %s", max_engine) dmg = DmgCommand(self.bin) dmg.exit_status_exception = False errors = [] # Call dmg config generate --num-engines=<1 to max_engine> for num_engines in range(1, max_engine + 1): result = dmg.config_generate(access_points="wolf-a", num_engines=num_engines) generated_yaml = yaml.safe_load(result.stdout) actual_num_engines = len(generated_yaml["engines"]) # Verify the number of engine field. if actual_num_engines != num_engines: msg = "Unexpected number of engine field! Expected = {}; "\ "Actual = {}".format(num_engines, actual_num_engines) errors.append(msg) # Verify that max_engine + 1 fails. result = dmg.config_generate(access_points="wolf-a", num_engines=max_engine + 1) if result.exit_status == 0: errors.append( "Host + invalid num engines succeeded with {}!".format( max_engine + 1)) self.check_errors(errors)
def test_net_class(self): """Test --net-class. 1. Iterate the interface set and count the number of elements that starts with "ib". This would be our ib_count threshold that we can set --num-engines with --net-class=infiniband. 2. Call dmg config generate --net-class=infiniband --num-engines=<1 to ib_count> and verify that it works. 3. In addition, verify provider using the dictionary. i.e., iterate "engines" fields and verify "provider" is in the list where key is "fabric_iface". 4. Similarly find eth_count and call dmg config generate --net-class=ethernet --num-engines=<1 to eth_count> and verify that it works. 5. As in ib, also verify provider using the dictionary. i.e., iterate "engines" fields and verify "provider" is in the list where key is "fabric_iface". :avocado: tags=all,full_regression :avocado: tags=hw,small :avocado: tags=control,config_generate_entries,net_class """ # Get necessary storage and network info. self.prepare_expected_data() # Get ib_count threshold. ib_count = 0 for interface in self.interface_set: if interface[:2] == "ib": ib_count += 1 self.log.info("ib_count = %d", ib_count) dmg = DmgCommand(self.bin) dmg.exit_status_exception = False errors = [] # Call dmg config generate --num-engines=<1 to ib_count> # --net-class=infiniband. Should pass. for num_engines in range(1, ib_count + 1): # dmg config generate should pass. result = dmg.config_generate(access_points="wolf-a", num_engines=num_engines, net_class="infiniband") if result.exit_status != 0: msg = "config generate failed with --net-class=infiniband "\ "--num-engines = {}!".format(num_engines) errors.append(msg) else: generated_config = yaml.safe_load(result.stdout) for engine in generated_config["engines"]: fabric_iface = engine["fabric_iface"] provider = engine["provider"] # Verify fabric_iface field, e.g., ib0 by checking the # dictionary keys. if not self.interface_to_providers[fabric_iface]: errors.append( "Unexpected fabric_iface! {}".format(fabric_iface)) elif provider not in \ self.interface_to_providers[fabric_iface]: # Now check the provider field, e.g., ofi+sockets by # checking the corresponding list in the dictionary. msg = "Unexpected provider in fabric_iface! provider ="\ " {}; fabric_iface = {}".format( provider, fabric_iface) errors.append(msg) # Call dmg config generate --num-engines=<ib_count + 1> # --net-class=infiniband. Too many engines. Should fail. result = dmg.config_generate(access_points="wolf-a", num_engines=ib_count + 1, net_class="infiniband") if result.exit_status == 0: msg = "config generate succeeded with --net-class=infiniband "\ "num_engines = {}!".format(ib_count + 1) errors.append(msg) # Get eth_count threshold. eth_count = 0 for interface in self.interface_set: if interface[:3] == "eth": eth_count += 1 self.log.info("eth_count = %d", eth_count) # Call dmg config generate --num-engines=<1 to eth_count> # --net-class=ethernet. Should pass. for num_engines in range(1, eth_count + 1): # dmg config generate should pass. result = dmg.config_generate(access_points="wolf-a", num_engines=num_engines, net_class="ethernet") if result.exit_status != 0: msg = "config generate failed with --net-class=ethernet "\ "--num-engines = {}!".format(num_engines) errors.append(msg) else: generated_config = yaml.safe_load(result.stdout) for engine in generated_config["engines"]: fabric_iface = engine["fabric_iface"] provider = engine["provider"] # Verify fabric_iface field, e.g., eth0 by checking the # dictionary keys. if not self.interface_to_providers[fabric_iface]: errors.append( "Unexpected fabric_iface! {}".format(fabric_iface)) elif provider not in \ self.interface_to_providers[fabric_iface]: # Now check the provider field, e.g., ofi+sockets by # checking the corresponding list in the dictionary. msg = "Unexpected provider in fabric_iface! provider ="\ " {}; fabric_iface = {}".format( provider, fabric_iface) errors.append(msg) # Call dmg config generate --num-engines=<eth_count + 1> # --net-class=ethernet. Too many engines. Should fail. result = dmg.config_generate(access_points="wolf-a", num_engines=eth_count + 1, net_class="ethernet") if result.exit_status == 0: msg = "config generate succeeded with --net-class=ethernet, "\ "num_engines = {}!".format(eth_count + 1) errors.append(msg) self.check_errors(errors)
def test_min_ssds(self): """Test --min-ssds. 1. Iterate the NVMe PCI dictionary and find the key that has the shortest list. This would be our min_ssd engine count threshold. 2. Call dmg config generate --min-ssds=<1 to min_ssd>. Should pass. 3. Call dmg config generate --min-ssds=<min_ssd + 1>. Should fail. 4. Call dmg config generate --min-ssds=0. Iterate the engines field and verify that there's no bdev_list field. :avocado: tags=all,full_regression :avocado: tags=hw,small :avocado: tags=control,config_generate_entries,min_ssds """ # Get necessary storage and network info. self.prepare_expected_data() # Iterate the NVMe PCI dictionary and find the key that has the shortest # list. This would be our min_ssd engine count threshold. socket_ids = list(self.nvme_socket_to_addrs.keys()) shortest_id = socket_ids[0] shortest = len(self.nvme_socket_to_addrs[shortest_id]) for socket_id in socket_ids: if len(self.nvme_socket_to_addrs[socket_id]) < shortest: shortest = len(self.nvme_socket_to_addrs[socket_id]) shortest_id = socket_id min_ssd = len(self.nvme_socket_to_addrs[shortest_id]) self.log.info("Maximum --min-ssds threshold = %d", min_ssd) dmg = DmgCommand(self.bin) dmg.exit_status_exception = False errors = [] # Call dmg config generate --min-ssds=<1 to min_ssd>. Should pass. for num_ssd in range(1, min_ssd + 1): result = dmg.config_generate(access_points="wolf-a", min_ssds=num_ssd) if result.exit_status != 0: errors.append( "config generate failed with min_ssd = {}!".format( num_ssd)) # Call dmg config generate --min_ssds=<min_ssd + 1>. Should fail. result = dmg.config_generate(access_points="wolf-a", min_ssds=min_ssd + 1) if result.exit_status == 0: errors.append( "config generate succeeded with min_ssd + 1 = {}!".format( min_ssd + 1)) # Call dmg config generate --min-ssds=0 result = dmg.config_generate(access_points="wolf-a", min_ssds=0) generated_yaml = yaml.safe_load(result.stdout) # Iterate the engines and verify that there's no bdev_list field. engines = generated_yaml["engines"] for engine in engines: if "bdev_list" in engine: errors.append("bdev_list field exists with --min-ssds=0!") self.check_errors(errors)
def pool_acl_verification(self, current_user_acl, read, write): ''' Deascription: Daos pool security verification with acl file. Steps: (1)Setup dmg tool for creating a pool (2)Generate acl file with permissions (3)Create a pool with acl (4)Verify the pool create status (5)Get the pool's acl list (6)Verify pool read operation (7)Verify pool write operation (8)Cleanup user and destroy pool Args: current_user_acl: acl with read write access credential. read: expecting read permission. write: expecting write permission. Return: pass to continue. fail to report the testlog and stop. ''' # (1)Create daos_shell command dmg = DmgCommand(os.path.join(self.prefix, "bin")) dmg.get_params(self) port = self.params.get("port", "/run/server_config/*", 10001) get_acl_file = self.params.get("acl_file", "/run/pool_acl/*", "acl_test.txt") acl_file = os.path.join(self.tmp, get_acl_file) num_user = self.params.get("num_user", "/run/pool_acl/*") num_group = self.params.get("num_group", "/run/pool_acl/*") servers_with_ports = [ "{}:{}".format(host, port) for host in self.hostlist_servers ] dmg.hostlist.update(",".join(servers_with_ports), "dmg.hostlist") self.log.info(" (1)dmg= %s", dmg) # (2)Generate acl file with permissions self.log.info(" (2)Generate acl file with user/group permissions") permission_list = self.create_pool_acl(num_user, num_group, current_user_acl, acl_file) # (3)Create a pool with acl self.log.info(" (3)Create a pool with acl") dmg.action_command.acl_file.value = acl_file dmg.exit_status_exception = False result = dmg.run() # (4)Verify the pool create status self.log.info(" (4)dmg.run() result=\n%s", result) if result.stderr == "": uuid, svc = dmg_utils.get_pool_uuid_service_replicas_from_stdout( result.stdout) else: self.fail("##(4)Unable to parse pool uuid and svc.") # (5)Get the pool's acl list # dmg pool get-acl --pool <UUID> self.log.info(" (5)Get a pool's acl list by: " "dmg pool get-acl --pool --hostlist") pool_acl_list = self.get_pool_acl_list(uuid) self.log.info(" pool original permission_list: %s", permission_list) self.log.info(" pool get_acl permission_list: %s", pool_acl_list) # (6)Verify pool read operation # daos pool query --pool <uuid> self.log.info(" (6)Verify pool read by: daos pool query --pool") self.verify_pool_readwrite(svc, uuid, "read", expect=read) # (7)Verify pool write operation # daos continer create --pool <uuid> self.log.info(" (7)Verify pool write by: daos continer create --pool") self.verify_pool_readwrite(svc, uuid, "write", expect=write) # (8)Cleanup user and destroy pool self.log.info(" (8)Cleanup user and destroy pool") self.cleanup_user_group(num_user, num_group) dmg = DmgCommand(os.path.join(self.prefix, "bin")) dmg.request.value = "pool" dmg.action.value = "destroy --pool={}".format(uuid) dmg.hostlist.update(",".join(servers_with_ports), "dmg.hostlist") result = dmg.run() return
def test_create(self): """Test dmg pool create and destroy with various parameters. Create a pool and verify that the pool was created by comparing the UUID returned from the dmg command against the directory name in /mnt/daos Destroy the pool and verify that the directory is deleted. :avocado: tags=all,pool,full_regression,small,multitarget """ # Create a dmg command object dmg = DmgCommand(self.bin) dmg.get_params(self) dmg.hostlist.update( self.server_managers[0].runner.job.yaml_params.access_points.value, "dmg.hostlist") # Disable raising an exception if the dmg command fails dmg.exit_status_exception = False # Accumulate a list of pass/fail indicators representing what is # expected for each parameter then "and" them to determine the # expected result of the test expected_for_param = [] userlist = self.params.get("user", '/run/tests/users/*') user = os.getlogin() if userlist[0] == 'valid' else userlist[0] expected_for_param.append(userlist[1]) grouplist = self.params.get("group", '/run/tests/groups/*') group = os.getlogin() if grouplist[0] == 'valid' else grouplist[0] expected_for_param.append(grouplist[1]) systemnamelist = self.params.get("systemname", '/run/tests/systemnames/*') system_name = systemnamelist[0] expected_for_param.append(systemnamelist[1]) tgtlistlist = self.params.get("tgt", '/run/tests/tgtlist/*') tgtlist = tgtlistlist[0] expected_for_param.append(tgtlistlist[1]) # if any parameter is FAIL then the test should FAIL expected_result = RESULT_PASS if RESULT_FAIL in expected_for_param: expected_result = RESULT_FAIL host1 = self.hostlist_servers[0] host2 = self.hostlist_servers[1] test_destroy = True create_result = dmg.pool_create("1GB", user, group, None, tgtlist, None, system_name) if create_result.exit_status == 0: if expected_result == RESULT_FAIL: self.fail( "Test was expected to fail but it passed at pool create.") uuid, _ = get_pool_uuid_service_replicas_from_stdout( create_result.stdout) if '0' in tgtlist: # check_for_pool checks if the uuid directory exists in host1 exists = check_for_pool.check_for_pool(host1, uuid) if exists != 0: self.fail("Pool {0} not found on host {1}.\n".format( uuid, host1)) if '1' in tgtlist: exists = check_for_pool.check_for_pool(host2, uuid) if exists != 0: self.fail("Pool {0} not found on host {1}.\n".format( uuid, host2)) else: test_destroy = False if expected_result == RESULT_PASS: self.fail("Test was expected to pass but it failed at pool " + "create.") if test_destroy: destroy_result = dmg.pool_destroy(uuid) if destroy_result.exit_status == 0: if expected_result == RESULT_FAIL: self.fail("Test was expected to fail but it passed at " + "pool create.") if '0' in tgtlist: exists = check_for_pool.check_for_pool(host1, uuid) if exists == 0: self.fail( "Pool {0} found on host {1} after destroy.\n". format(uuid, host1)) if '1' in tgtlist: exists = check_for_pool.check_for_pool(host2, uuid) if exists == 0: self.fail( "Pool {0} found on host {1} after destroy.\n". format(uuid, host2)) else: if expected_result == RESULT_PASS: self.fail("Test was expected to pass but it failed at " + "pool destroy.")