def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] # cleanup env and create vfs cmd = "echo 0 > %s/sriov_numvfs" % pci_address if driver == "mlx4_core": cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core" process.run(cmd, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address) if driver == "mlx4_core": cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \ % (vf_num, vf_num) test_res = process.run(cmd, shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2*(vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh nodedev-list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=180) if not net_diff: test.fail("Get net list with 'virsh nodedev-list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length-6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if driver != "mlx4_core" and sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n")
def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n")
def pci_device_address(): """ Get the address of pci device """ net_list = virsh.nodedev_list(tree='', cap='net') net_lists = net_list.stdout.strip().splitlines() route_cmd = " route | grep default" route_default = process.run(route_cmd, shell=True).stdout_text.strip().split(' ') ip_default = route_default[-1] for default_net_name in net_lists: if default_net_name.find(ip_default): default_net_address = nodedev_xml.NodedevXML.new_from_dumpxml( default_net_name).parent default_net_driver = get_device_driver(default_net_address) break for net_device_name in net_lists: if net_device_name.find(ip_default) == -1: net_device_address = nodedev_xml.NodedevXML.new_from_dumpxml( net_device_name).parent if 'pci' in net_device_address: net_device_driver = get_device_driver(net_device_address) if net_device_driver != default_net_driver: return net_device_address
def pci_device_address(): """ Get the address of network device which is safe to detach """ # find a physical interface to be detached by nodedev-detach cmd = "ip l | grep NO-CARRIER" out = process.run(cmd, shell=True).stdout_text.strip().splitlines() net_name = None for link in out: if "lo" not in link and "virbr0" not in link: logging.debug(link) net_name = link.split(":")[1].strip() logging.debug("The interface to be detached is %s", net_name) break if not net_name: test.cancel("There is no available network device to detach!") # get the pci address of the interface net_list = virsh.nodedev_list(tree='', cap='net') net_lists = net_list.stdout.strip().splitlines() net_name_string = '_' + net_name + '_' for net_name_ in net_lists: if net_name_string in net_name_: eth_detach = net_name_ break eth_addr = nodedev_xml.NodedevXML.new_from_dumpxml(eth_detach).parent return eth_addr
def run(test, params, env): """ Test properties of a chain of devices, starting at a given device going up per parent. """ chain_start_device_pattern = params.get("chain_start_device_pattern") checks = eval(params.get("checks")) result = virsh.nodedev_list(ignore_status=False) selected_device = get_device(result.stdout_text.strip().splitlines(), chain_start_device_pattern) if not selected_device: test.error("No suitable device found for test." "Pattern: %s. Available devices: %s." % (chain_start_device_pattern, result.stdout)) xml = get_nodedev_dumpxml(selected_device) validate_nodedev_xml(test, xml) for check in checks: for xpath, pattern in check.items(): value = xml.xmltreefile.findtext(xpath) value = value if value else "" if not re.search(pattern, value): test.fail("Unexpected value on xpath '%s':" " '%s' does not match '%s'" % (xpath, value, pattern)) xml = get_nodedev_dumpxml(xml.xmltreefile.findtext("parent")) validate_nodedev_xml(test, xml)
def find_hbas(hba_type="hba", status="online"): """ Find online hba/vhba cards. :params hba_type: "vhba" or "hba" :params status: "online" or "offline" :return: A list contains the online/offline vhba/hba list """ # TODO: add offline/online judgement, fc storage not stable for now, so # leave this part after we buy npiv server result = virsh.nodedev_list(cap="scsi_host") if result.exit_status: raise exceptions.TestFail(result.stderr) scsi_hosts = result.stdout.strip().splitlines() online_hbas_list = [] online_vhbas_list = [] # go through all scsi hosts, and split hbas/vhbas into lists for scsi_host in scsi_hosts: result = virsh.nodedev_dumpxml(scsi_host) stdout = result.stdout.strip() if result.exit_status: raise exceptions.TestFail(result.stderr) if re.search('vport_ops', stdout) and not re.search('<fabric_wwn>' 'ffffffffffffffff</fabric_wwn>' '', stdout): online_hbas_list.append(scsi_host) if re.search('fc_host', stdout) and not re.search('vport_ops', stdout): online_vhbas_list.append(scsi_host) if hba_type == "hba": return online_hbas_list if hba_type == "vhba": return online_vhbas_list
def find_hbas(hba_type="hba", status="online"): """ Find online hba/vhba cards. :params hba_type: "vhba" or "hba" :params status: "online" or "offline" :return: A list contains the online/offline vhba/hba list """ # TODO: add offline/online judgement, fc storage not stable for now, so # leave this part after we buy npiv server result = virsh.nodedev_list(cap="scsi_host") if result.exit_status: raise exceptions.TestFail(result.stderr) scsi_hosts = result.stdout.strip().splitlines() online_hbas_list = [] online_vhbas_list = [] # go through all scsi hosts, and split hbas/vhbas into lists for scsi_host in scsi_hosts: result = virsh.nodedev_dumpxml(scsi_host) stdout = result.stdout.strip() if result.exit_status: raise exceptions.TestFail(result.stderr) if re.search('vport_ops', stdout) and not re.search( '<fabric_wwn>' 'ffffffffffffffff</fabric_wwn>' '', stdout): online_hbas_list.append(scsi_host) if re.search('fc_host', stdout) and not re.search('vport_ops', stdout): online_vhbas_list.append(scsi_host) if hba_type == "hba": return online_hbas_list if hba_type == "vhba": return online_vhbas_list
def find_hbas(hba_type="hba", status="online"): """ Find online hba/vhba cards. :params hba_type: "vhba" or "hba" :params status: "online" or "offline" :return: A list contains the online/offline vhba/hba list """ # TODO: add status=offline/online judgement, we don't test offline vhba now # so leave it here as a placeholder. result = virsh.nodedev_list(cap="scsi_host") if result.exit_status: raise exceptions.TestFail(result.stderr_text) scsi_hosts = result.stdout_text.strip().splitlines() online_hbas_list = [] online_vhbas_list = [] # go through all scsi hosts, and split hbas/vhbas into lists for scsi_host in scsi_hosts: result = virsh.nodedev_dumpxml(scsi_host) stdout = result.stdout_text.strip() if result.exit_status: raise exceptions.TestFail(result.stderr_text) if (re.search('vport_ops', stdout) and not re.search( '<fabric_wwn>ffffffffffffffff</fabric_wwn>', stdout) and not re.search('<fabric_wwn>0</fabric_wwn>', stdout)): online_hbas_list.append(scsi_host) if re.search('fc_host', stdout) and not re.search('vport_ops', stdout): online_vhbas_list.append(scsi_host) if hba_type == "hba": return online_hbas_list if hba_type == "vhba": return online_vhbas_list
def check_nodedev_info(dev_dict): """Check nodedev info for the node device 1) virsh nodedev-list and check the device 2) virsh nodedev-dumpxml and check the device info 3) Validate using virt-xml-validate :param dev_dict: device params """ dev_name = dev_dict.get('name') test.log.info("TEST_STEP1: List %s device using virsh nodedev-list.", dev_name) result = virsh.nodedev_list(**VIRSH_ARGS) if dev_name not in result.stdout_text: test.fail("Failed to list %s device!" % dev_name) test.log.info( "TEST_STEP2: Check device info using virsh nodedev-dumpxml.") dev_xml = nodedev_xml.NodedevXML.new_from_dumpxml(dev_name) test.log.debug("Nodedev xml: {}".format(dev_xml)) if not all([ getattr(dev_xml, attr).endswith(value) for attr, value in dev_dict.items() ]): test.fail('nodedev xml comparison failed.') test.log.info("TEST_STEP3: Validate xml using virt-xml-validate.") if not dev_xml.get_validates(): test.fail("Failed to validate node device xml!")
def find_hbas(hba_type="hba", status="online"): """ Find online hba/vhba cards. :params hba_type: "vhba" or "hba" :params status: "online" or "offline" :return: A list contains the online/offline vhba/hba list """ # TODO: add status=offline/online judgement, we don't test offline vhba now # so leave it here as a placeholder. result = virsh.nodedev_list(cap="scsi_host") if result.exit_status: raise exceptions.TestFail(results_stderr_52lts(result)) scsi_hosts = results_stdout_52lts(result).strip().splitlines() online_hbas_list = [] online_vhbas_list = [] # go through all scsi hosts, and split hbas/vhbas into lists for scsi_host in scsi_hosts: result = virsh.nodedev_dumpxml(scsi_host) stdout = results_stdout_52lts(result).strip() if result.exit_status: raise exceptions.TestFail(results_stderr_52lts(result)) if (re.search('vport_ops', stdout) and not re.search('<fabric_wwn>ffffffffffffffff</fabric_wwn>', stdout) and not re.search('<fabric_wwn>0</fabric_wwn>', stdout)): online_hbas_list.append(scsi_host) if re.search('fc_host', stdout) and not re.search('vport_ops', stdout): online_vhbas_list.append(scsi_host) if hba_type == "hba": return online_hbas_list if hba_type == "vhba": return online_vhbas_list
def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length-6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n")
def pci_devices_address(): """ Get the address of pci device """ pci_list = virsh.nodedev_list(tree='', cap='pci') pci_devices_address = pci_list.stdout.strip().splitlines() pci_device_address = pci_devices_address[0] return pci_device_address
def pci_devices_name(device_type): """ Get the address of pci device :param device_type: type of device, such as pci, net , storage """ devices_list = virsh.nodedev_list(tree='', cap=device_type) devices_name_list = devices_list.stdout.strip().splitlines() device_name = devices_name_list[0] return device_name
def find_devices_by_cap(test, cap_type): """ Find device by capability :params cap_type: capability type """ result = virsh.nodedev_list(cap=cap_type) if result.exit_status: test.fail(result.stderr) device_name = result.stdout.strip().splitlines() return device_name
def find_devices_by_cap(cap_type="scsi_host"): """ Find device by capability :params cap_type: capability type """ result = virsh.nodedev_list('--cap %s' % cap_type) if result.exit_status: raise error.TestFail(result.stderr) scsi_hosts = result.stdout.strip().splitlines() return scsi_hosts
def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n")
def get_first_mdev_nodedev_name(): """ Returns the first nodedev of type mdev known to libvirt :return: the first listed mdev node device """ result = virsh.nodedev_list(cap="mdev", debug=True) device_names = result.stdout.strip().splitlines() if result.exit_status or len(device_names) == 0: raise TestError("Couldn't create nodedev. %s. %s." % (result.stderr, result.stdout)) return device_names[0]
def nodedev_validate(file=None, **virsh_dargs): """ Test for schema nodedev """ # Get dev name cmd_result = virsh.nodedev_list() libvirt.check_exit_status(cmd_result) dev_name = cmd_result.stdout.strip().splitlines()[1] if dev_name: cmd_result = virsh.nodedev_dumpxml(dev_name, to_file=file) libvirt.check_exit_status(cmd_result)
def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2*(vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh nodedev-list' failed\n")
def get_avail_caps(all_caps): """ Get all available capabilities on the host. :param all_caps: A list contains all currently known capabilities. :return: A list contains all available capabilities. """ avail_caps = [] for cap in all_caps: result = virsh.nodedev_list(cap=cap) if result.exit_status == 0: avail_caps.append(cap) return avail_caps
def get_device_name(): """ Returns first defined but not started mdev device name. """ try: result = virsh.nodedev_list(cap="mdev", options="--all", ignore_status=False, debug=True) return result.stdout.strip().splitlines()[0] except: raise TestFail("Mdev device not found.")
def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail( "Get net list with 'virsh nodedev-list' failed\n")
def network_device_name(): """ Get the address of network pci device """ net_list = virsh.nodedev_list(tree='', cap='net') net_lists = net_list.stdout.strip().splitlines() device_check = False route_cmd = " route | grep default" route_default = process.run(route_cmd, shell=True).stdout_text.strip().split(' ') ip_default = route_default[-1] for net_device_name in net_lists: if net_device_name.find(ip_default) == -1: net_device_address = nodedev_xml.NodedevXML.new_from_dumpxml(net_device_name).parent if 'pci' in net_device_address: device_check = True return net_device_name if not device_check: test.cancel('Param device_address is not configured.')
def network_device_name(): """ Get the address of network pci device """ net_list = virsh.nodedev_list(tree='', cap='net') net_lists = net_list.stdout.strip().splitlines() device_check = False route_cmd = " route | grep default" route_default = results_stdout_52lts(process.run(route_cmd, shell=True)).strip().split(' ') ip_default = route_default[-1] for net_device_name in net_lists: if net_device_name.find(ip_default) == -1: net_device_address = nodedev_xml.NodedevXML.new_from_dumpxml(net_device_name).parent if 'pci' in net_device_address: device_check = True return net_device_name if not device_check: test.cancel('Param device_address is not configured.')
def pci_device_address(): """ Get the address of pci device """ net_list = virsh.nodedev_list(tree='', cap='net') net_lists = net_list.stdout.strip().splitlines() route_cmd = " route | grep default" route_default = process.run(route_cmd, shell=True).stdout_text.strip().split(' ') ip_default = route_default[-1] for default_net_name in net_lists: if default_net_name.find(ip_default): default_net_address = nodedev_xml.NodedevXML.new_from_dumpxml(default_net_name).parent default_net_driver = get_device_driver(default_net_address) break for net_device_name in net_lists: if net_device_name.find(ip_default) == -1: net_device_address = nodedev_xml.NodedevXML.new_from_dumpxml(net_device_name).parent if 'pci' in net_device_address: net_device_driver = get_device_driver(net_device_address) if net_device_driver != default_net_driver: return net_device_address
def run(test, params, env): """ Test command: nodedev-list [--tree] [--cap <string>] 1) Run nodedev-list command and check return code. 2) If `cap_option == one`, results are also compared with devices get from sysfs. """ def _check_result(cap, ref_list, result): """ Check test result agains a device list retrived from sysfs. :param cap: Capability being checked, current available caps are defined in variable `caps`. :param ref_list: Reference device list retrived from sysfs. :param check_list: Stdout returned from virsh nodedev-list command. """ check_list = result.strip().splitlines() uavail_caps = ['system', 'vports', 'fc_host'] if set(ref_list) != set(check_list) and cap not in uavail_caps: logging.error('Difference in capability %s:', cap) logging.error('Expected devices: %s', ref_list) logging.error('Result devices : %s', check_list) return False return True all_caps = ['system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host', 'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic'] expect_succeed = params.get('expect_succeed', 'yes') tree_option = params.get('tree_option', 'off') cap_option = params.get('cap_option', 'off') caps = get_avail_caps(all_caps) check_failed = False # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_dargs = {} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri tree = (tree_option == 'on') if cap_option == 'one': devices = {} for cap in caps: devices[cap] = get_devices_by_cap(cap) for cap in devices: logging.debug(cap + ':') for device in devices[cap]: logging.debug(' ' + device) for cap in caps: result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) if result.exit_status != 0 and expect_succeed == 'yes': break elif result.exit_status == 0 and expect_succeed == 'no': break if not _check_result(cap, devices[cap], result.stdout.strip()): check_failed = True break else: cap = '' if cap_option != 'off': if cap_option == 'multi': cap = ','.join(caps) elif cap_option == 'long': cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000) else: cap = cap_option result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) logging.debug(result) if expect_succeed == 'yes': if result.exit_status != 0: test.fail( 'Expected succeed, but failed with result:\n%s' % result) elif expect_succeed == 'no': if result.exit_status == 0: test.fail( 'Expected fail, but succeed with result:\n%s' % result) if check_failed: test.fail('Check failed. result:\n%s' % result)
def run(test, params, env): """ Test command: nodedev-list [--tree] [--cap <string>] 1) Run nodedev-list command and check return code. 2) If `cap_option == one`, results are also compared with devices get from sysfs. """ def _check_result(cap, ref_list, result): """ Check test result agains a device list retrived from sysfs. :param cap: Capability being checked, current available caps are defined in variable `caps`. :param ref_list: Reference device list retrived from sysfs. :param check_list: Stdout returned from virsh nodedev-list command. """ check_list = result.strip().splitlines() uavail_caps = ['system', 'vports', 'fc_host'] if set(ref_list) != set(check_list) and cap not in uavail_caps: logging.error('Difference in capability %s:', cap) logging.error('Expected devices: %s', ref_list) logging.error('Result devices : %s', check_list) return False return True all_caps = [ 'system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host', 'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic' ] expect_succeed = params.get('expect_succeed', 'yes') tree_option = params.get('tree_option', 'off') cap_option = params.get('cap_option', 'off') caps = get_avail_caps(all_caps) check_failed = False # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") virsh_dargs = {} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri tree = (tree_option == 'on') if cap_option == 'one': devices = {} for cap in caps: devices[cap] = get_devices_by_cap(cap) for cap in devices: logging.debug(cap + ':') for device in devices[cap]: logging.debug(' ' + device) for cap in caps: result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) if result.exit_status != 0 and expect_succeed == 'yes': break elif result.exit_status == 0 and expect_succeed == 'no': break if not _check_result(cap, devices[cap], result.stdout): check_failed = True break else: cap = '' if cap_option != 'off': if cap_option == 'multi': cap = ','.join(caps) elif cap_option == 'long': cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000) else: cap = cap_option result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) logging.debug(result) if expect_succeed == 'yes': if result.exit_status != 0: raise error.TestFail( 'Expected succeed, but failed with result:\n%s' % result) elif expect_succeed == 'no': if result.exit_status == 0: raise error.TestFail( 'Expected fail, but succeed with result:\n%s' % result) if check_failed: raise error.TestFail('Check failed. result:\n%s' % result)
def run(test, params, env): """ Test command: virsh nodedev-reset <device> When `device_option` is: 1) resettable : Reset specified device if it is resettable. 2) non-exist : Try to reset specified device which doesn't exist. 3) non-pci : Try to reset all local non-PCI devices. 4) active : Try to reset specified device which is attached to VM. 5) unresettable : Try to reset all unresettable PCI devices. """ # Retrive parameters expect_succeed = params.get('expect_succeed', 'yes') device_option = params.get('device_option', 'valid') unspecified = 'REPLACE_WITH_TEST_DEVICE' specified_device = params.get('specified_device', unspecified) # Backup original libvirtd status and prepare libvirtd status logging.debug('Preparing libvirtd') libvirtd = utils_libvirtd.Libvirtd() if params.get("libvirtd", "on") == "off": libvirtd.stop() # Get whether PCI devices are resettable from sysfs. devices = get_pci_info() # Devide PCI devices into to catagories. resettable_nodes = [] unresettable_nodes = [] for device in devices: info = devices[device] if info['reset'] and info['driver']: resettable_nodes.append(device) if not info['reset'] and not info['driver']: unresettable_nodes.append(device) # Find out all non-PCI devices. all_devices = virsh.nodedev_list().stdout.strip().splitlines() non_pci_nodes = [] for device in all_devices: if device not in devices: non_pci_nodes.append(device) try: if device_option == 'resettable': # Test specified resettable device. if specified_device != unspecified: if specified_device in resettable_nodes: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestNAError( 'Param specified_device is not set!') else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'non-exist': # Test specified non-exist device. if specified_device != unspecified: if specified_device not in all_devices: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestError('Specified device exists!') else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'non-pci': # Test all non-PCI device. if non_pci_nodes: test_nodedev_reset(non_pci_nodes, expect_succeed) else: raise error.TestNAError('No non-PCI device found!') elif device_option == 'active': # Test specified device if attached to VM. if specified_device != unspecified: vm_name = params.get('main_vm', 'virt-tests-vm1') vm = env.get_vm(vm_name) test_active_nodedev_reset(specified_device, vm, expect_succeed) else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'unresettable': # Test all unresettable device. if unresettable_nodes: test_nodedev_reset(unresettable_nodes, expect_succeed) else: raise error.TestNAError('No unresettable device found!') else: raise error.TestError('Unrecognisable device option %s!' % device_option) finally: # Restore libvirtd status logging.debug('Restoring libvirtd') if not libvirtd.is_running(): libvirtd.start()
def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] # cleanup env and create vfs cmd = "echo 0 > %s/sriov_numvfs" % pci_address if driver == "mlx4_core": cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core" process.run(cmd, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address) if driver == "mlx4_core": cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \ % (vf_num, vf_num) test_res = process.run(cmd, shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail( "Get net list with 'virsh nodedev-list' failed\n") net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300) pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) if not net_diff: test.fail("Get net list with 'virsh nodedev-list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) mac = ':'.join(net[length - 6:]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if driver != "mlx4_core" and sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n")
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip of guest. 5. Ping the server_ip of from guest to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) device_type = params.get("libvirt_pci_device_type", "NIC") net_name = params.get("libvirt_pci_net_name", "eth0") server_ip = params.get("libvirt_pci_server_ip") storage_dev_name = params.get("libvirt_pci_storage_dev_name", "/dev/sdb") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() pci_address = None if device_type == "NIC": # Find the pci device for given network device. result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.cap.interface == net_name: device = nodedev break if not device: raise error.TestError("There is no network device name of %s." % net_name) pci_dev = netxml.parent pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() result = virsh.nodedev_list("storage") nodedev_storages = result.stdout.strip().splitlines() for nodedev in nodedev_storages: storage_xml = NodedevXML.new_from_dumpxml(nodedev) if storage_xml.cap.block == storage_dev_name: break if not nodedev: raise error.TestError("There is no block device name of %s." % storage_dev_name) pci_xml = NodedevXML.new_from_dumpxml(storage_xml.parent) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": try: session.cmd("ping -c 4 %s" % server_ip) except aexpect.ShellError, detail: raise error.TestFail( "Succeed to set ip on guest, but failed " "to ping server ip from guest.\n" "Detail: %s.", detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: raise error.TestFail("Didn't find the disk attached to guest.")
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip to all the physical functions. 5. Ping to server_ip from each physical function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None device_name = None pci_address = None bus_info = [] if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") device_name = params.get("libvirt_pci_net_dev_name", "None") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): test.cancel("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER"))): test.cancel("Please enter the ips and netmask for NIC test in config file") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("lspci -nn") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = process.run(mod_cmd, ignore_status=True, shell=True) if os.path.exists(output_file): test.error("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.error("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. distro_details = distro.detect() if distro_details.name == 'Ubuntu': network_service = service.Factory.create_service("networking") else: network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: test.error("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("lspci -nn") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: test.fail("passthrough Adapter not found in guest.") else: logging.debug("Adapter passthorughed to guest successfully") if sriov: try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError as detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s \n" % detail) else: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_list = output.splitlines() for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function bus_info.sort() for val in bus_info: nic_name = str(utils_misc.get_interface_from_pci_id(val, session)) try: session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError as detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s\n" % detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: test.fail("Didn't find the disk attached to guest.") finally: backup_xml.sync()
def run(test, params, env): """ Test command: nodedev-list [--tree] [--cap <string>] 1) Run nodedev-list command and check return code. 2) If `cap_option == one`, results are also compared with devices get from sysfs. """ def _check_result(cap, ref_list, result): """ Check test result agains a device list retrived from sysfs. :param cap: Capability being checked, current available caps are defined in variable `caps`. :param ref_list: Reference device list retrived from sysfs. :param check_list: Stdout returned from virsh nodedev-list command. """ check_list = result.strip().splitlines() uavail_caps = ['system', 'vports', 'fc_host'] if set(ref_list) != set(check_list) and cap not in uavail_caps: logging.error('Difference in capability %s:', cap) logging.error('Expected devices: %s', ref_list) logging.error('Result devices : %s', check_list) return False return True all_caps = ['system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host', 'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic'] expect_succeed = params.get('expect_succeed', 'yes') tree_option = params.get('tree_option', 'off') cap_option = params.get('cap_option', 'off') caps = get_avail_caps(all_caps) check_failed = False tree = (tree_option == 'on') if cap_option == 'one': devices = {} for cap in caps: devices[cap] = get_devices_by_cap(cap) for cap in devices: logging.debug(cap + ':') for device in devices[cap]: logging.debug(' ' + device) for cap in caps: result = virsh.nodedev_list(tree=tree, cap=cap) if result.exit_status != 0 and expect_succeed == 'yes': break elif result.exit_status == 0 and expect_succeed == 'no': break if not _check_result(cap, devices[cap], result.stdout): check_failed = True break else: cap = '' if cap_option != 'off': if cap_option == 'multi': cap = ','.join(caps) elif cap_option == 'long': cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000) else: cap = cap_option result = virsh.nodedev_list(tree=tree, cap=cap) logging.debug(result) if expect_succeed == 'yes': if result.exit_status != 0: raise error.TestFail( 'Expected succeed, but failed with result:\n%s' % result) elif expect_succeed == 'no': if result.exit_status == 0: raise error.TestFail( 'Expected fail, but succeed with result:\n%s' % result) if check_failed: raise error.TestFail('Check failed. result:\n%s' % result)
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip of guest. 5. Ping the server_ip of from guest to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): raise error.TestNAError("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER"))): raise error.TestNAError("Please enter the ips for NIC test.") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() pci_address = None if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("ifconfig -a|grep Ethernet") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = utils.run(mod_cmd, ignore_status=True) if os.path.exists(output_file): raise error.TestError("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: raise error.TestError("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: raise error.TestNAError("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("ifconfig -a|grep Ethernet") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: raise error.TestFail( "No Ethernet found for the pci device in guest.") nic_name = (list(set(nic_list_after) - set(nic_list_before)))[0].split()[0] try: session.cmd("ifconfig %s %s" % (nic_name, net_ip)) session.cmd("ping -c 4 %s" % server_ip) except aexpect.ShellError, detail: raise error.TestFail( "Succeed to set ip on guest, but failed " "to ping server ip from guest.\n" "Detail: %s.", detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: raise error.TestFail("Didn't find the disk attached to guest.")
def run(test, params, env): """ Test command: virsh nodedev-reset <device> When `device_option` is: 1) resettable : Reset specified device if it is resettable. 2) non-exist : Try to reset specified device which doesn't exist. 3) non-pci : Try to reset all local non-PCI devices. 4) unresettable : Try to reset all unresettable PCI devices. """ def get_pci_info(): """ Get infomation for all PCI devices including: 1) whether device has reset under its sysfs dir. 2) Whether device has driver dir under its sysfs dir. :return: A dict using libvirt canonical nodedev name as keys and dicts like {'reset': True, 'driver': True} as values """ devices = {} pci_path = '/sys/bus/pci/devices' for device in os.listdir(pci_path): # Generate a virsh nodedev format device name dev_name = re.sub(r'\W', '_', 'pci_' + device) dev_path = os.path.join(pci_path, device) # Check whether device has `reset` file reset_path = os.path.join(dev_path, 'reset') has_reset = os.path.isfile(reset_path) # Check whether device has `driver` file driver_path = os.path.join(dev_path, 'driver') has_driver = os.path.isdir(driver_path) info = {'reset': has_reset, 'driver': has_driver} devices[dev_name] = info return devices def test_nodedev_reset(devices, expect_error, **virsh_dargs): """ Test nodedev-reset command on a list of devices :param devices : A list of node devices to be tested. :param expect_error : 'yes' for expect command run successfully and 'no' for fail. :param virsh_dargs: standardized virsh function API keywords """ readonly = virsh_dargs.get('readonly', 'no') for device in devices: result = virsh.nodedev_reset(device, readonly=readonly, debug=True) # Check whether exit code match expectation. libvirt.check_exit_status(result, expect_error) # Retrive parameters expect_error = params.get('expect_error', 'no') == 'yes' device_option = params.get('device_option', 'valid') unspecified = 'REPLACE_WITH_TEST_DEVICE' readonly = (params.get('nodedev_reset_readonly', 'no') == 'yes') # Backup original libvirtd status and prepare libvirtd status logging.debug('Preparing libvirtd') libvirtd = utils_libvirtd.Libvirtd() if params.get("libvirtd", "on") == "off": libvirtd.stop() # Get whether PCI devices are resettable from sysfs. devices = get_pci_info() # Devide PCI devices into to catagories. resettable_nodes = [] unresettable_nodes = [] for device in devices: info = devices[device] if info['reset'] and info['driver']: resettable_nodes.append(device) if not info['reset'] and not info['driver']: unresettable_nodes.append(device) # Find out all non-PCI devices. all_devices = virsh.nodedev_list().stdout.strip().splitlines() non_pci_nodes = [] for device in all_devices: if device not in devices: non_pci_nodes.append(device) try: if device_option == 'resettable': specified_device = resettable_nodes[0] # Test specified resettable device. if specified_device != unspecified: if specified_device in resettable_nodes: test_nodedev_reset([specified_device], expect_error, readonly=readonly) else: test.error('Param specified_device is not set!') else: test.cancel('Param specified_device is not set!') elif device_option == 'non-exist': specified_device = params.get('specified_device', unspecified) # Test specified non-exist device. if specified_device != unspecified: if specified_device not in all_devices: test_nodedev_reset([specified_device], expect_error) else: test.error('Specified device exists!') else: test.cancel('Param specified_device is not set!') elif device_option == 'non-pci': # Test all non-PCI device. if non_pci_nodes: test_nodedev_reset(non_pci_nodes, expect_error) else: test.cancel('No non-PCI device found!') elif device_option == 'unresettable': # Test all unresettable device. if unresettable_nodes: test_nodedev_reset(unresettable_nodes, expect_error) else: test.cancel('No unresettable device found!') else: test.error('Unrecognisable device option %s!' % device_option) finally: # Restore libvirtd status logging.debug('Restoring libvirtd') if not libvirtd.is_running(): libvirtd.start()
def run(test, params, env): """ Test command: virsh nodedev-reset <device> When `device_option` is: 1) resettable : Reset specified device if it is resettable. 2) non-exist : Try to reset specified device which doesn't exist. 3) non-pci : Try to reset all local non-PCI devices. 4) active : Try to reset specified device which is attached to VM. 5) unresettable : Try to reset all unresettable PCI devices. """ # Retrive parameters expect_succeed = params.get('expect_succeed', 'yes') device_option = params.get('device_option', 'valid') unspecified = 'REPLACE_WITH_TEST_DEVICE' specified_device = params.get('specified_device', unspecified) # Backup original libvirtd status and prepare libvirtd status logging.debug('Preparing libvirtd') libvirtd = utils_libvirtd.Libvirtd() if params.get("libvirtd", "on") == "off": libvirtd.stop() # Get whether PCI devices are resettable from sysfs. devices = get_pci_info() # Devide PCI devices into to catagories. resettable_nodes = [] unresettable_nodes = [] for device in devices: info = devices[device] if info['reset'] and info['driver']: resettable_nodes.append(device) if not info['reset'] and not info['driver']: unresettable_nodes.append(device) # Find out all non-PCI devices. all_devices = virsh.nodedev_list().stdout.strip().splitlines() non_pci_nodes = [] for device in all_devices: if device not in devices: non_pci_nodes.append(device) try: if device_option == 'resettable': # Test specified resettable device. if specified_device != unspecified: if specified_device in resettable_nodes: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestNAError( 'Param specified_device is not set!') else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'non-exist': # Test specified non-exist device. if specified_device != unspecified: if specified_device not in all_devices: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestError('Specified device exists!') else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'non-pci': # Test all non-PCI device. if non_pci_nodes: test_nodedev_reset(non_pci_nodes, expect_succeed) else: raise error.TestNAError('No non-PCI device found!') elif device_option == 'active': # Test specified device if attached to VM. if specified_device != unspecified: vm_name = params.get('main_vm', 'avocado-vt-vm1') vm = env.get_vm(vm_name) test_active_nodedev_reset( specified_device, vm, expect_succeed) else: raise error.TestNAError('Param specified_device is not set!') elif device_option == 'unresettable': # Test all unresettable device. if unresettable_nodes: test_nodedev_reset(unresettable_nodes, expect_succeed) else: raise error.TestNAError('No unresettable device found!') else: raise error.TestError( 'Unrecognisable device option %s!' % device_option) finally: # Restore libvirtd status logging.debug('Restoring libvirtd') if not libvirtd.is_running(): libvirtd.start()
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip of guest. 5. Ping the server_ip of from guest to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): raise error.TestNAError("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER"))): raise error.TestNAError("Please enter the ips for NIC test.") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() pci_address = None if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("ifconfig -a|grep Ethernet") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = utils.run(mod_cmd, ignore_status=True) if os.path.exists(output_file): raise error.TestError("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: raise error.TestError("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: raise error.TestNAError("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("ifconfig -a|grep Ethernet") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: raise error.TestFail( "No Ethernet found for the pci device in guest.") nic_name = (list(set(nic_list_after) - set(nic_list_before)))[0].split()[0] try: session.cmd("ifconfig %s %s" % (nic_name, net_ip)) session.cmd("ping -c 4 %s" % server_ip) except aexpect.ShellError, detail: raise error.TestFail("Succeed to set ip on guest, but failed " "to ping server ip from guest.\n" "Detail: %s.", detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: raise error.TestFail("Didn't find the disk attached to guest.")
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip to all the physical functions. 5. Ping to server_ip from each physical function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None device_name = None pci_address = None bus_info = [] if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") device_name = params.get("libvirt_pci_net_dev_name", "None") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): test.cancel("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER"))): test.cancel("Please enter the ips and netmask for NIC test in config file") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("lspci -nn") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = process.run(mod_cmd, ignore_status=True, shell=True) if os.path.exists(output_file): test.error("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.error("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. distro_details = distro.detect() if distro_details.name == 'Ubuntu': network_service = service.Factory.create_service("networking") else: network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: test.error("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("lspci -nn") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: test.fail("passthrough Adapter not found in guest.") else: logging.debug("Adapter passthorughed to guest successfully") if sriov: try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s \n" % detail) else: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_list = output.splitlines() for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function bus_info.sort() for val in bus_info: nic_name = str(utils_misc.get_interface_from_pci_id(val, session)) try: session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s\n" % detail)
def run(test, params, env): """ Test command: virsh nodedev-reset <device> When `device_option` is: 1) resettable : Reset specified device if it is resettable. 2) non-exist : Try to reset specified device which doesn't exist. 3) non-pci : Try to reset all local non-PCI devices. 4) active : Try to reset specified device which is attached to VM. 5) unresettable : Try to reset all unresettable PCI devices. """ # Retrive parameters expect_succeed = params.get("expect_succeed", "yes") device_option = params.get("device_option", "valid") unspecified = "REPLACE_WITH_TEST_DEVICE" specified_device = params.get("specified_device", unspecified) # Backup original libvirtd status and prepare libvirtd status logging.debug("Preparing libvirtd") libvirtd = utils_libvirtd.Libvirtd() if params.get("libvirtd", "on") == "off": libvirtd.stop() # Get whether PCI devices are resettable from sysfs. devices = get_pci_info() # Devide PCI devices into to catagories. resettable_nodes = [] unresettable_nodes = [] for device in devices: info = devices[device] if info["reset"] and info["driver"]: resettable_nodes.append(device) if not info["reset"] and not info["driver"]: unresettable_nodes.append(device) # Find out all non-PCI devices. all_devices = virsh.nodedev_list().stdout.strip().splitlines() non_pci_nodes = [] for device in all_devices: if device not in devices: non_pci_nodes.append(device) try: if device_option == "resettable": # Test specified resettable device. if specified_device != unspecified: if specified_device in resettable_nodes: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestNAError("Param specified_device is not set!") else: raise error.TestNAError("Param specified_device is not set!") elif device_option == "non-exist": # Test specified non-exist device. if specified_device != unspecified: if specified_device not in all_devices: test_nodedev_reset([specified_device], expect_succeed) else: raise error.TestError("Specified device exists!") else: raise error.TestNAError("Param specified_device is not set!") elif device_option == "non-pci": # Test all non-PCI device. if non_pci_nodes: test_nodedev_reset(non_pci_nodes, expect_succeed) else: raise error.TestNAError("No non-PCI device found!") elif device_option == "active": # Test specified device if attached to VM. if specified_device != unspecified: vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) test_active_nodedev_reset(specified_device, vm, expect_succeed) else: raise error.TestNAError("Param specified_device is not set!") elif device_option == "unresettable": # Test all unresettable device. if unresettable_nodes: test_nodedev_reset(unresettable_nodes, expect_succeed) else: raise error.TestNAError("No unresettable device found!") else: raise error.TestError("Unrecognisable device option %s!" % device_option) finally: # Restore libvirtd status logging.debug("Restoring libvirtd") if not libvirtd.is_running(): libvirtd.start()
def run(test, params, env): """ Test command: nodedev-list [--tree] [--cap <string>] 1) Run nodedev-list command and check return code. 2) If `cap_option == one`, results are also compared with devices get from sysfs. """ def _check_result(cap, ref_list, result, mode): """ Check test result against a device list retrieved from sysfs. :param cap: Capability being checked, current available caps are defined in variable `caps`. :param ref_list: Reference device list retrieved from sysfs. :param result: Stdout returned from virsh nodedev-list command. :param mode: How to compare sysfs info with command output: "exact" or "similar". """ check_list = result.strip().splitlines() are_not_equivalent = True if mode == "similar": listed = [x for x in ref_list if x in result] all_sysfs_info_listed = len(ref_list) == len(listed) same_number_of_devices = len(ref_list) == len(check_list) are_not_equivalent = (not all_sysfs_info_listed or not same_number_of_devices) elif mode == "exact": are_not_equivalent = set(ref_list) != set(check_list) else: logging.error("Unknown comparison mode in result check: %s", mode) return False uavail_caps = ['system', 'vports', 'fc_host'] if are_not_equivalent and cap not in uavail_caps: logging.error('Difference in capability %s:', cap) logging.error('Expected devices: %s', ref_list) logging.error('Result devices : %s', check_list) return False return True mode = params.get("comparison_mode", "exact") all_caps = [ 'system', 'pci', 'usb_device', 'usb', 'net', 'scsi_host', 'scsi_target', 'scsi', 'storage', 'fc_host', 'vports', 'scsi_generic', 'ccw', 'css' ] expect_succeed = params.get('expect_succeed', 'yes') tree_option = params.get('tree_option', 'off') cap_option = params.get('cap_option', 'off') caps = get_avail_caps(all_caps) check_failed = False # acl polkit params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_dargs = {} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri tree = (tree_option == 'on') if cap_option == 'one': devices = {} for cap in caps: devices[cap] = get_devices_by_cap(cap) for cap in devices: logging.debug(cap + ':') for device in devices[cap]: logging.debug(' ' + device) for cap in caps: result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) if result.exit_status != 0 and expect_succeed == 'yes': break elif result.exit_status == 0 and expect_succeed == 'no': break if not _check_result(cap, devices[cap], result.stdout.strip(), mode): check_failed = True break else: cap = '' if cap_option != 'off': if cap_option == 'multi': cap = ','.join(caps) elif cap_option == 'long': cap = ','.join(['pci', 'usb', 'net', 'storage', 'scsi'] * 5000) else: cap = cap_option result = virsh.nodedev_list(tree=tree, cap=cap, **virsh_dargs) logging.debug(result) if expect_succeed == 'yes': if result.exit_status != 0: test.fail('Expected succeed, but failed with result:\n%s' % result) elif expect_succeed == 'no': if result.exit_status == 0: test.fail('Expected fail, but succeed with result:\n%s' % result) if check_failed: test.fail('Check failed. result:\n%s' % result)
def run(test, params, env): """ Test command: virsh nodedev-reset <device> When `device_option` is: 1) resettable : Reset specified device if it is resettable. 2) non-exist : Try to reset specified device which doesn't exist. 3) non-pci : Try to reset all local non-PCI devices. 4) unresettable : Try to reset all unresettable PCI devices. """ def get_pci_info(): """ Get infomation for all PCI devices including: 1) whether device has reset under its sysfs dir. 2) Whether device has driver dir under its sysfs dir. :return: A dict using libvirt canonical nodedev name as keys and dicts like {'reset': True, 'driver': True} as values """ devices = {} pci_path = '/sys/bus/pci/devices' for device in os.listdir(pci_path): # Generate a virsh nodedev format device name dev_name = re.sub(r'\W', '_', 'pci_' + device) dev_path = os.path.join(pci_path, device) # Check whether device has `reset` file reset_path = os.path.join(dev_path, 'reset') has_reset = os.path.isfile(reset_path) # Check whether device has `driver` file driver_path = os.path.join(dev_path, 'driver') has_driver = os.path.isdir(driver_path) info = {'reset': has_reset, 'driver': has_driver} devices[dev_name] = info return devices def test_nodedev_reset(devices, expect_error): """ Test nodedev-reset command on a list of devices :param devices : A list of node devices to be tested. :param expect_error : 'yes' for expect command run successfully and 'no' for fail. """ for device in devices: result = virsh.nodedev_reset(device) logging.debug(result) # Check whether exit code match expectation. libvirt.check_exit_status(result, expect_error) # Retrive parameters expect_error = params.get('expect_error', 'no') == 'yes' device_option = params.get('device_option', 'valid') unspecified = 'REPLACE_WITH_TEST_DEVICE' # Backup original libvirtd status and prepare libvirtd status logging.debug('Preparing libvirtd') libvirtd = utils_libvirtd.Libvirtd() if params.get("libvirtd", "on") == "off": libvirtd.stop() # Get whether PCI devices are resettable from sysfs. devices = get_pci_info() # Devide PCI devices into to catagories. resettable_nodes = [] unresettable_nodes = [] for device in devices: info = devices[device] if info['reset'] and info['driver']: resettable_nodes.append(device) if not info['reset'] and not info['driver']: unresettable_nodes.append(device) # Find out all non-PCI devices. all_devices = virsh.nodedev_list().stdout.strip().splitlines() non_pci_nodes = [] for device in all_devices: if device not in devices: non_pci_nodes.append(device) try: if device_option == 'resettable': specified_device = resettable_nodes[0] # Test specified resettable device. if specified_device != unspecified: if specified_device in resettable_nodes: test_nodedev_reset([specified_device], expect_error) else: test.error('Param specified_device is not set!') else: test.cancel('Param specified_device is not set!') elif device_option == 'non-exist': specified_device = params.get('specified_device', unspecified) # Test specified non-exist device. if specified_device != unspecified: if specified_device not in all_devices: test_nodedev_reset([specified_device], expect_error) else: test.error('Specified device exists!') else: test.cancel('Param specified_device is not set!') elif device_option == 'non-pci': # Test all non-PCI device. if non_pci_nodes: test_nodedev_reset(non_pci_nodes, expect_error) else: test.cancel('No non-PCI device found!') elif device_option == 'unresettable': # Test all unresettable device. if unresettable_nodes: test_nodedev_reset(unresettable_nodes, expect_error) else: test.cancel('No unresettable device found!') else: test.error('Unrecognisable device option %s!' % device_option) finally: # Restore libvirtd status logging.debug('Restoring libvirtd') if not libvirtd.is_running(): libvirtd.start()
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip of guest. 5. Ping the server_ip of from guest to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) device_type = params.get("libvirt_pci_device_type", "NIC") net_name = params.get("libvirt_pci_net_name", "eth0") server_ip = params.get("libvirt_pci_server_ip") storage_dev_name = params.get("libvirt_pci_storage_dev_name", "/dev/sdb") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() pci_address = None if device_type == "NIC": # Find the pci device for given network device. result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.cap.interface == net_name: device = nodedev break if not device: raise error.TestError("There is no network device name of %s." % net_name) pci_dev = netxml.parent pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() result = virsh.nodedev_list("storage") nodedev_storages = result.stdout.strip().splitlines() for nodedev in nodedev_storages: storage_xml = NodedevXML.new_from_dumpxml(nodedev) if storage_xml.cap.block == storage_dev_name: break if not nodedev: raise error.TestError("There is no block device name of %s." % storage_dev_name) pci_xml = NodedevXML.new_from_dumpxml(storage_xml.parent) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": try: session.cmd("ping -c 4 %s" % server_ip) except aexpect.ShellError, detail: raise error.TestFail("Succeed to set ip on guest, but failed " "to ping server ip from guest.\n" "Detail: %s.", detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: raise error.TestFail("Didn't find the disk attached to guest.")
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip of guest. 5. Ping the server_ip of from guest to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") net_name = params.get("libvirt_pci_net_name", "eth0") server_ip = params.get("libvirt_pci_server_ip") storage_dev_name = params.get("libvirt_pci_storage_dev_name", "/dev/sdb") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() pci_address = None if device_type == "NIC": # Find the pci device for given network device. result = virsh.nodedev_list(cap="net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.cap.interface == net_name: device = nodedev break if not device: raise error.TestNAError("There is no network device name of %s." % net_name) pci_dev = netxml.parent if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = utils.run(mod_cmd, ignore_status=True) if os.path.exists(output_file): raise error.TestError("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: raise error.TestError("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: raise error.TestNAError("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() result = virsh.nodedev_list(cap="storage") nodedev_storages = result.stdout.strip().splitlines() device = None for nodedev in nodedev_storages: storage_xml = NodedevXML.new_from_dumpxml(nodedev) if storage_xml.cap.block == storage_dev_name: device = nodedev break if not device: raise error.TestNAError("There is no block device name of %s." % storage_dev_name) pci_xml = NodedevXML.new_from_dumpxml(storage_xml.parent) # In some cases, the parent of target storage device might not be # a PCI device, but is of type 'scsi' for example. # SKIP these tests with a proper message. if pci_xml.cap_type != 'pci': raise error.TestNAError("The parent node device of the storage " "device need to be a PCI device. " "But parent of %s is a %s device." % (storage_dev_name, pci_xml.cap_type)) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": try: session.cmd("ping -c 4 %s" % server_ip) except aexpect.ShellError, detail: raise error.TestFail("Succeed to set ip on guest, but failed " "to ping server ip from guest.\n" "Detail: %s.", detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: raise error.TestFail("Didn't find the disk attached to guest.")