def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach Physical Function's/Virtual Function's to single guest 4. Start guest and set the ip to all the functions. 5. Ping to server_ip from each function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") vm_vfs = int(params.get("number_vfs", 2)) pci_dev = None pci_address = None bus_info = [] if device_type == "NIC": pf_filter = params.get("pf_filter", "0000:01:00.0") vf_filter = params.get("vf_filter", "Virtual Function") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (device_type == "NIC"): if (pf_filter.count("ENTER")): test.cancel("Please enter your NIC Adapter details for test.") if (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER")): test.cancel("Please enter the ips and netmask for NIC " "test in config file") elif (pci_dev.count("ENTER")): test.cancel("Please enter your Storage Adapter details for test.") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() nic_list_before = vm.get_pci_devices() obj = PciAssignable(pf_filter_re=pf_filter, vf_filter_re=vf_filter) # get all functions id's pci_ids = obj.get_same_group_devs(pf_filter) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_" + temp]) if sriov: # The SR-IOV setup of the VF's should be done by test_setup # PciAssignable class. for pf in pci_ids: obj.set_vf(pf, vm_vfs) cont = obj.get_controller_type() if cont == "Infiniband controller": obj.set_linkvf_ib() for val in pci_devs: val = val.replace(".", "_") # Get the virtual functions of the pci devices # which was generated above. pci_xml = NodedevXML.new_from_dumpxml(val) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.fail("No Virtual Functions found.") for val in virt_functions: pci_dev = utils_test.libvirt.pci_label_from_address( val, radix=16) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() # The Network configuration is generic irrespective of PF or SRIOV VF if device_type == "NIC": nic_list_after = vm.get_pci_devices() net_ip = netaddr.IPAddress(net_ip) if sorted(nic_list_after) == sorted(nic_list_before): test.fail("Passthrough Adapter not found in guest.") else: logging.debug("Adapter passthroughed to guest successfully") nic_list = list( set(nic_list_after).difference(set(nic_list_before))) for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] bus_info.sort() if not sriov: # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function for val in bus_info: nic_name = str( utils_misc.get_interface_from_pci_id(val, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) # Pinging using nic_name is having issue, # hence replaced with IPAddress s_ping, o_ping = utils_test.ping(server_ip, count=5, interface=net_ip, timeout=30, session=session) logging.info(o_ping) if s_ping != 0: err_msg = "Ping test fails, error info: '%s'" test.fail(err_msg % o_ping) # Each interface should have unique IP net_ip = net_ip + 1 elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and # compare the result with fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: test.fail("Didn't find the disk attached to guest.") finally: backup_xml.sync() # For SR-IOV , VF's should be cleaned up in the post-processing. if sriov: if obj.get_vfs_count() != 0: for pci_pf in pci_ids: obj.set_vf(pci_pf, vf_no="0")
def run(test, params, env): """ Test for PCI single function device(NIC or Infiniband) passthrough to libvirt guest in hotplug mode. a). NIC Or Infiniband: 1. Get params. 2. Get the pci device function. 3. Start guest 4. prepare device xml to be attached 5. hotplug the device 6. check device hotplugged or not 7. Ping to server_ip from guest 8. test flood ping 9. test guest life cycle 10. test virsh dumpxml 11. hotunplug the device 12. test stress to verify the new network device. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) device_name = params.get("libvirt_pci_net_dev_name", "ENTER_YOUR.DEV.NAME") pci_id = params.get("libvirt_pci_net_dev_label", "ENTER_YOUR.DEV.LABEL") net_ip = params.get("libvirt_pci_net_ip", "ENTER_YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER_YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER_YOUR.MASK") stress_val = params.get("stress_val", "1") stress = params.get("stress", "no") timeout = int(params.get("timeout", "ENTER_YOUR.TIMEOUT.VALUE")) suspend_operation = params.get("suspend_operation", "no") reboot_operation = params.get("reboot_operation", "no") virsh_dumpxml = params.get("virsh_dumpxml", "no") virsh_dump = params.get("virsh_dump", "no") flood_ping = params.get("flood_ping", "no") # Check the parameters from configuration file. for each_param in params.itervalues(): if "ENTER_YOUR" in each_param: test.cancel("Please enter the configuration details of %s." % each_param) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() devices = vmxml.get_devices() pci_devs = [] dargs = {'debug': True, 'ignore_status': True} controller = Controller("controller") controller.type = "pci" controller.index = params.get("index", "1") controller.model = params.get("model", "pci-root") devices.append(controller) vmxml.set_devices(devices) vmxml.sync() if not vm.is_alive(): vm.start() session = vm.wait_for_login() if not utils_package.package_install(["ppc64-diag", "librtas", "powerpc-utils"], session, 360): test.cancel('Fail on dependencies installing') if virsh_dump == "yes": dump_file = os.path.join(data_dir.get_tmp_dir(), "virshdump.xml") output = session.cmd_output("ip link") logging.debug("checking for output - %s", output) nic_list_before = str(output.splitlines()) logging.debug("nic_list before hotplug %s", nic_list_before) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_val = pci_devs[0].replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(pci_val) pci_address = pci_xml.cap.get_address_dict() dev = VMXML.get_device_class('hostdev')() dev.mode = 'subsystem' dev.type = 'pci' dev.managed = 'no' dev.source = dev.new_source(**pci_address) def detach_device(pci_devs, pci_ids): # detaching the device from host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name == "vfio-pci": logging.debug("device alreay detached") else: if virsh.nodedev_detach(pci_value).exit_status: test.error("Hostdev node detach failed") driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name != "vfio-pci": test.error("driver bind failed after detach") def reattach_device(pci_devs, pci_ids): # reattach the device to host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name != "vfio-pci": logging.debug("device alreay attached") else: if virsh.nodedev_reattach(pci_value).exit_status: test.fail("Hostdev node reattach failed") driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name == "vfio-pci": test.error("driver bind failed after reattach") def check_attach_pci(): session = vm.wait_for_login() output = session.cmd_output("ip link") nic_list_after = str(output.splitlines()) logging.debug(nic_list_after) return nic_list_after != nic_list_before def device_hotplug(): if not libvirt_version.version_compare(3, 10, 0): detach_device(pci_devs, pci_ids) # attach the device in hotplug mode result = virsh.attach_device(vm_name, dev.xml, flagstr="--live", debug=True) if result.exit_status: test.error(result.stdout.strip()) else: logging.debug(result.stdout.strip()) if not utils_misc.wait_for(check_attach_pci, timeout): test.fail("timeout value is not sufficient") # detach hot plugged device def device_hotunplug(): result = virsh.detach_device(vm_name, dev.xml, flagstr="--live", debug=True) if result.exit_status: test.fail(result.stdout.strip()) else: logging.debug(result.stdout.strip()) # Fix me # the purpose of waiting here is after detach the device from # guest it need time to perform any other operation on the device time.sleep(timeout) if not libvirt_version.version_compare(3, 10, 0): pci_devs.sort() reattach_device(pci_devs, pci_ids) def test_ping(): try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) s_ping, o_ping = utils_net.ping(dest=server_ip, count=5, interface=net_ip) logging.info(s_ping) logging.info(o_ping) if s_ping: test.fail("Ping test failed") except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to bring up interface.\n" "Detail: %s." % detail)
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip to all the physical functions. 5. Ping to server_ip from each physical function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None device_name = None pci_address = None bus_info = [] if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") device_name = params.get("libvirt_pci_net_dev_name", "None") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): test.cancel("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER"))): test.cancel("Please enter the ips and netmask for NIC " "test in config file") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("lspci -nn") nic_list_before = output.splitlines() if sriov: # The SR-IOV setup of the VF's should be done by test_setup # based on the driver options. # Usage of the PciAssignable for setting up of the VF's # is generic, and eliminates the need to hardcode the driver # and number of VF's to be created. sriov_setup = PciAssignable( driver=params.get("driver"), driver_option=params.get("driver_option"), host_set_flag=params.get("host_set_flag", 1), vf_filter_re=params.get("vf_filter_re"), pf_filter_re=params.get("pf_filter_re"), pa_type=params.get("pci_assignable")) # For Infiniband Controllers, we have to set the link # for the VF's before pass-through. cont = sriov_setup.get_controller_type() if cont == "Infiniband controller": sriov_setup.set_linkvf_ib() # Based on the PF Device specified, all the VF's # belonging to the same iommu group, will be # pass-throughed to the guest. pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) pci_ids = sriov_setup.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") # Get the virtual functions of the pci devices # which was generated above. pci_xml = NodedevXML.new_from_dumpxml(val) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.fail("No Virtual Functions found.") for val in virt_functions: pci_dev = utils_test.libvirt.pci_label_from_address(val, radix=16) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() # The Network configuration is generic irrespective of PF or SRIOV VF if device_type == "NIC": output = session.cmd_output("lspci -nn") nic_list_after = output.splitlines() net_ip = netaddr.IPAddress(net_ip) if nic_list_after == nic_list_before: test.fail("passthrough Adapter not found in guest.") else: logging.debug("Adapter passthorughed to guest successfully") output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_list = output.splitlines() for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] bus_info.sort() if not sriov: # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function for val in bus_info: nic_name = str(utils_misc.get_interface_from_pci_id(val, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) # Pinging using nic_name is having issue, # hence replaced with IPAddress s_ping, o_ping = utils_test.ping(server_ip, count=5, interface=net_ip, timeout=30, session=session) logging.info(o_ping) if s_ping != 0: err_msg = "Ping test fails, error info: '%s'" test.fail(err_msg % o_ping) # Each interface should have unique IP net_ip = net_ip + 1 elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and # compare the result with fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: test.fail("Didn't find the disk attached to guest.") finally: backup_xml.sync() # For SR-IOV , VF's should be cleaned up in the post-processing. if sriov: sriov_setup.release_devs()
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip to all the physical functions. 5. Ping to server_ip from each physical function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None device_name = None pci_address = None bus_info = [] if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") device_name = params.get("libvirt_pci_net_dev_name", "None") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): test.cancel("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER"))): test.cancel("Please enter the ips and netmask for NIC test in config file") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("lspci -nn") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = process.run(mod_cmd, ignore_status=True, shell=True) if os.path.exists(output_file): test.error("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.error("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. distro_details = distro.detect() if distro_details.name == 'Ubuntu': network_service = service.Factory.create_service("networking") else: network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: test.error("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("lspci -nn") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: test.fail("passthrough Adapter not found in guest.") else: logging.debug("Adapter passthorughed to guest successfully") if sriov: try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s \n" % detail) else: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_list = output.splitlines() for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function bus_info.sort() for val in bus_info: nic_name = str(utils_misc.get_interface_from_pci_id(val, session)) try: session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s\n" % detail)
def run(test, params, env): """ Test for PCI device passthrough to libvirt guest. a). NIC: 1. Get params. 2. Get the pci device for specific net_name. 3. Attach pci device to guest. 4. Start guest and set the ip to all the physical functions. 5. Ping to server_ip from each physical function to verify the new network device. b). STORAGE: 1. Get params. 2. Get the pci device for specific storage_dev_name. 3. Store the result of 'fdisk -l' on guest. 3. Attach pci device to guest. 4. Start guest and get the result of 'fdisk -l' on guest. 5. Compare the result of 'fdisk -l' before and after attaching storage pci device to guest. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) sriov = ('yes' == params.get("libvirt_pci_SRIOV", 'no')) device_type = params.get("libvirt_pci_device_type", "NIC") pci_dev = None device_name = None pci_address = None bus_info = [] if device_type == "NIC": pci_dev = params.get("libvirt_pci_net_dev_label") device_name = params.get("libvirt_pci_net_dev_name", "None") else: pci_dev = params.get("libvirt_pci_storage_dev_label") net_ip = params.get("libvirt_pci_net_ip", "ENTER.YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER.YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER.YOUR.Mask") # Check the parameters from configuration file. if (pci_dev.count("ENTER")): test.cancel("Please enter your device name for test.") if (device_type == "NIC" and (net_ip.count("ENTER") or server_ip.count("ENTER") or netmask.count("ENTER"))): test.cancel("Please enter the ips and netmask for NIC test in config file") fdisk_list_before = None vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() if device_type == "NIC": if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("lspci -nn") nic_list_before = output.splitlines() if sriov: # set the parameter max_vfs of igb module to 7. Then we can use # the virtual function pci device for network device. # command 'modprobe -r igb' to unload igb module # command '&& modprobe igb max_vfs=7' to load it again # with max_vfs=7 # command '|| echo 'FAIL' > output_file' is a flag to mean # modprobe igb with max_vfs=7 failed. # command '|| modprobe igb' is a handler of error occured # when we load igb again. If command 2 failed, # this command will be executed to recover network. output_file = os.path.join(test.tmpdir, "output") if os.path.exists(output_file): os.remove(output_file) mod_cmd = ("modprobe -r igb && modprobe igb max_vfs=7 ||" "echo 'FAIL' > %s && modprobe igb &" % output_file) result = process.run(mod_cmd, ignore_status=True, shell=True) if os.path.exists(output_file): test.error("Failed to modprobe igb with max_vfs=7.") # Get the virtual function pci device which was generated above. pci_xml = NodedevXML.new_from_dumpxml(pci_dev) virt_functions = pci_xml.cap.virt_functions if not virt_functions: test.error("Init virtual function failed.") pci_address = virt_functions[0] pci_dev = utils_test.libvirt.pci_label_from_address(pci_address, radix=16) # Find the network name (ethX) is using this pci device. distro_details = distro.detect() if distro_details.name == 'Ubuntu': network_service = service.Factory.create_service("networking") else: network_service = service.Factory.create_service("network") network_service.restart() result = virsh.nodedev_list("net") nodedev_nets = result.stdout.strip().splitlines() device = None for nodedev in nodedev_nets: netxml = NodedevXML.new_from_dumpxml(nodedev) if netxml.parent == pci_dev: device = nodedev break if not device: test.error("There is no network name is using " "Virtual Function PCI device %s." % pci_dev) pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) else: pci_id = pci_dev.replace("_", ".").strip("pci.").replace(".", ":", 2) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) pci_devs = [] for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_id = re.sub('[:.]', '_', pci_id) for val in pci_devs: val = val.replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(val) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) elif device_type == "STORAGE": # Store the result of "fdisk -l" in guest. if not vm.is_alive(): vm.start() session = vm.wait_for_login() output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_before = output.splitlines() pci_xml = NodedevXML.new_from_dumpxml(pci_dev) pci_address = pci_xml.cap.get_address_dict() vmxml.add_hostdev(pci_address) try: vmxml.sync() vm.start() session = vm.wait_for_login() if device_type == "NIC": output = session.cmd_output("lspci -nn") nic_list_after = output.splitlines() if nic_list_after == nic_list_before: test.fail("passthrough Adapter not found in guest.") else: logging.debug("Adapter passthorughed to guest successfully") if sriov: try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError as detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s \n" % detail) else: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_list = output.splitlines() for val in range(len(nic_list)): bus_info.append(str(nic_list[val]).split(' ', 1)[0]) nic_list[val] = str(nic_list[val]).split(' ', 1)[0][:-2] # check all functions get same iommu group if len(set(nic_list)) != 1: test.fail("Multifunction Device passthroughed but " "functions are in different iommu group") # ping to server from each function bus_info.sort() for val in bus_info: nic_name = str(utils_misc.get_interface_from_pci_id(val, session)) try: session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) session.cmd("ping -I %s %s -c 5" % (nic_name, server_ip)) except aexpect.ShellError as detail: test.error("Succeed to set ip on guest, but failed " "to ping server ip from guest. %s\n" % detail) elif device_type == "STORAGE": # Get the result of "fdisk -l" in guest, and compare the result with # fdisk_list_before. output = session.cmd_output("fdisk -l|grep \"Disk identifier:\"") fdisk_list_after = output.splitlines() if fdisk_list_after == fdisk_list_before: test.fail("Didn't find the disk attached to guest.") finally: backup_xml.sync()
def run(test, params, env): """ Test for PCI single function device(NIC or Infiniband) passthrough to libvirt guest in hotplug mode. a). NIC Or Infiniband: 1. Get params. 2. Get the pci device function. 3. Start guest 4. prepare device xml to be attached 5. hotplug the device 6. check device hotplugged or not 7. Ping to server_ip from guest 8. test flood ping 9. test guest life cycle 10. test virsh dumpxml 11. hotunplug the device 12. test stress to verify the new network device. """ # get the params from params vm_name = params.get("main_vm") vm = env.get_vm(vm_name) device_name = params.get("libvirt_pci_net_dev_name", "ENTER_YOUR.DEV.NAME") pci_id = params.get("libvirt_pci_net_dev_label", "ENTER_YOUR.DEV.LABEL") net_ip = params.get("libvirt_pci_net_ip", "ENTER_YOUR.IP") server_ip = params.get("libvirt_pci_server_ip", "ENTER_YOUR.SERVER.IP") netmask = params.get("libvirt_pci_net_mask", "ENTER_YOUR.MASK") stress_val = params.get("stress_val", "1") stress = params.get("stress", "no") timeout = int(params.get("timeout", "ENTER_YOUR.TIMEOUT.VALUE")) suspend_operation = params.get("suspend_operation", "no") reboot_operation = params.get("reboot_operation", "no") virsh_dumpxml = params.get("virsh_dumpxml", "no") virsh_dump = params.get("virsh_dump", "no") flood_ping = params.get("flood_ping", "no") # Check the parameters from configuration file. for each_param in params.itervalues(): if "ENTER_YOUR" in each_param: test.cancel("Please enter the configuration details of %s." % each_param) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() devices = vmxml.get_devices() pci_devs = [] dargs = {'debug': True, 'ignore_status': True} controller = Controller("controller") controller.type = "pci" controller.index = params.get("index", "1") controller.model = params.get("model", "pci-root") devices.append(controller) vmxml.set_devices(devices) vmxml.sync() if not vm.is_alive(): vm.start() session = vm.wait_for_login() if not utils_package.package_install(["ppc64-diag", "librtas", "powerpc-utils"], session, 360): test.cancel('Fail on dependencies installing') if virsh_dump == "yes": dump_file = os.path.join(data_dir.get_tmp_dir(), "virshdump.xml") output = session.cmd_output("ip link") logging.debug("checking for output - %s", output) nic_list_before = str(output.splitlines()) logging.debug("nic_list before hotplug %s", nic_list_before) obj = PciAssignable() # get all functions id's pci_ids = obj.get_same_group_devs(pci_id) for val in pci_ids: temp = val.replace(":", "_") pci_devs.extend(["pci_"+temp]) pci_val = pci_devs[0].replace(".", "_") pci_xml = NodedevXML.new_from_dumpxml(pci_val) pci_address = pci_xml.cap.get_address_dict() dev = VMXML.get_device_class('hostdev')() dev.mode = 'subsystem' dev.hostdev_type = 'pci' dev.managed = 'no' dev.source = dev.new_source(**pci_address) def detach_device(pci_devs, pci_ids): # detaching the device from host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name == "vfio-pci": logging.debug("device alreay detached") else: if virsh.nodedev_detach(pci_value).exit_status: test.error("Hostdev node detach failed") driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name != "vfio-pci": test.error("driver bind failed after detach") def reattach_device(pci_devs, pci_ids): # reattach the device to host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name != "vfio-pci": logging.debug("device alreay attached") else: if virsh.nodedev_reattach(pci_value).exit_status: test.fail("Hostdev node reattach failed") driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name == "vfio-pci": test.error("driver bind failed after reattach") def check_attach_pci(): session = vm.wait_for_login() output = session.cmd_output("ip link") nic_list_after = str(output.splitlines()) logging.debug(nic_list_after) return nic_list_after != nic_list_before def device_hotplug(): if not libvirt_version.version_compare(3, 10, 0): detach_device(pci_devs, pci_ids) # attach the device in hotplug mode result = virsh.attach_device(vm_name, dev.xml, flagstr="--live", debug=True) if result.exit_status: test.error(result.stdout.strip()) else: logging.debug(result.stdout.strip()) if not utils_misc.wait_for(check_attach_pci, timeout): test.fail("timeout value is not sufficient") # detach hot plugged device def device_hotunplug(): result = virsh.detach_device(vm_name, dev.xml, flagstr="--live", debug=True) if result.exit_status: test.fail(result.stdout.strip()) else: logging.debug(result.stdout.strip()) # Fix me # the purpose of waiting here is after detach the device from # guest it need time to perform any other operation on the device time.sleep(timeout) if not libvirt_version.version_compare(3, 10, 0): pci_devs.sort() reattach_device(pci_devs, pci_ids) def test_ping(): try: output = session.cmd_output("lspci -nn | grep %s" % device_name) nic_id = str(output).split(' ', 1)[0] nic_name = str(utils_misc.get_interface_from_pci_id(nic_id, session)) session.cmd("ip addr flush dev %s" % nic_name) session.cmd("ip addr add %s/%s dev %s" % (net_ip, netmask, nic_name)) session.cmd("ip link set %s up" % nic_name) s_ping, o_ping = utils_net.ping(dest=server_ip, count=5, interface=net_ip) logging.info(s_ping) logging.info(o_ping) if s_ping: test.fail("Ping test failed") except aexpect.ShellError, detail: test.error("Succeed to set ip on guest, but failed " "to bring up interface.\n" "Detail: %s." % detail)