def trigger_nodedev_event(nodedev_event_name, event_amount): """ :param event_amount: number of event loop times Trigger nodedevice detach/reattach in event_number times """ event_list = [] if nodedev_event_name == 'lifecycle': if event_amount == 1: virsh.nodedev_detach(device_address) event_list.append("Deleted") if event_amount > 1: if event_amount % 2 == 0: while event_amount > 0: virsh.nodedev_detach(device_address) event_list.append("Deleted") virsh.nodedev_reattach(device_address) event_list.append("Created") time.sleep(2) event_amount -= 2 if event_amount % 2 == 1: event_amount -= 1 while event_amount > 0: virsh.nodedev_detach(device_address) event_list.append("Deleted") virsh.nodedev_reattach(device_address) event_list.append("Created") time.sleep(2) event_amount -= 2 virsh.nodedev_detach(device_address) event_list.append("Deleted") return event_list
def trigger_nodedev_event(nodedev_event_name, event_amount): """ :param event_amount: number of event loop times Trigger nodedevice detach/reattach in event_number times """ event_list = [] if nodedev_event_name == 'lifecycle': if event_amount == 1: virsh.nodedev_detach(device_address) event_list.append("Deleted") if event_amount > 1: if event_amount % 2 == 0: while event_amount > 0: virsh.nodedev_detach(device_address) event_list.append("Deleted") virsh.nodedev_reattach(device_address) event_list.append("Created") event_amount -= 2 if event_amount % 2 == 1: event_amount -= 1 while event_amount > 0: virsh.nodedev_detach(device_address) event_list.append("Deleted") virsh.nodedev_reattach(device_address) event_list.append("Created") event_amount -= 2 virsh.nodedev_detach(device_address) event_list.append("Deleted") return event_list
def test_networks(): """ Start vm with VF from VF Pool with "managed=no" or default setting 1) Create VF pool 2) Prepare device xml and hot-plug to the guest 3) Detach the device from host 4) Check the driver of device 5) Start VM 6) Destroy vm then check the driver 7) Reattach the device to the host and check the driver """ create_vf_pool() libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) iface_dict = { "type": "network", "source": "{'network': '%s'}" % params.get("net_name") } libvirt.modify_vm_iface(vm.name, "update_iface", iface_dict) res = virsh.start(vm.name, debug=True) libvirt.check_exit_status(res, True) virsh.nodedev_detach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) start_vm(vm, True, True) libvirt_vfio.check_vfio_pci(vf_pci) virsh.nodedev_reattach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
def nodedev_rollback(nodedev_event_name, event_amount): """ :param event_amount: number of event loop times Trigger nodedevice detach/reattach in event_number times """ if nodedev_event_name == 'lifecycle': if event_amount % 2 == 1: virsh.nodedev_reattach(device_address)
def detach_reattach_nodedev(device_address, params, options=""): """ Do the detach and reattach. Step1.Do detach. Step2.Check the result of detach. Step3.Do reattach. Step4.Check the result of reattach """ # Libvirt acl polkit related params uri = params.get("virsh_uri") # Nodedev-detach/reattach are special, the connect driver is still qemu # with split daemon, and the connect_driver in polkit rule # should be 'QEMU' for detach, 'nodedev' for read. update the polkit # rule to include both QEMU and nodedev in such situation. set_polkit = 'yes' == params.get('setup_libvirt_polkit', 'no') if utils_split_daemons.is_modular_daemon() and set_polkit: rule_path = '/etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules' cmd = '''sed -i "s/'nodedev'/'nodedev'||'QEMU'/g" %s''' % rule_path process.run(cmd) process.run('cat /etc/polkit-1/rules.d/500-libvirt-acl-virttest.rules') unprivileged_user = params.get('unprivileged_user') readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Do the detach logging.debug('Node device name is %s.', device_address) CmdResult = virsh.nodedev_detach(device_address, options, unprivileged_user=unprivileged_user, uri=uri, readonly=readonly, debug=True) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) logging.debug('Driver after detach is %s.', driver) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if (driver is None) or (not driver.endswith(device_driver_name)): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name)) # Do the reattach. CmdResult = virsh.nodedev_reattach(device_address, options) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if driver and driver.endswith(device_driver_name): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name))
def test_device_hotplug(): """ Hotplug/unplug VF with managed='no' 1) Prepare a running guest 2) Check the driver of vf on host 3) Prepare a xml with "managed=no"and attach to guest 4) Detach the device from host 5) Check the driver of vf on host 6) Attach the device to guest 7) Check the interface of the guest 8) Detach the device from guest and check the driver 9) Reattach the device to the host and check the driver """ libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') start_vm(vm) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) mac_addr = utils_net.generate_mac_address_simple() iface_dict = eval( params.get('iface_dict', '{"hostdev_addr": "%s"}') % utils_sriov.pci_to_addr(vf_pci)) iface = interface.Interface("hostdev") iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) res = virsh.attach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(res, True) virsh.nodedev_detach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) virsh.attach_device(vm_name, iface.xml, debug=True, ignore_status=False) check_vm_iface_managed(vm_name, iface_dict) vm.wait_for_serial_login().close() virsh.detach_device(vm_name, iface.xml, wait_remove_event=True, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) virsh.nodedev_reattach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
def detach_reattach_nodedev(device_address, params, options=""): """ Do the detach and reattach. Step1.Do detach. Step2.Check the result of detach. Step3.Do reattach. Step4.Check the result of reattach """ # Libvirt acl polkit related params uri = params.get("virsh_uri") if uri and not utils_split_daemons.is_modular_daemon(): uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Do the detach logging.debug('Node device name is %s.', device_address) CmdResult = virsh.nodedev_detach(device_address, options, unprivileged_user=unprivileged_user, uri=uri, readonly=readonly) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) logging.debug('Driver after detach is %s.', driver) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if (driver is None) or (not driver.endswith(device_driver_name)): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name)) # Do the reattach. CmdResult = virsh.nodedev_reattach(device_address, options) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if driver and driver.endswith(device_driver_name): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name))
def nodedev_test(dev_name, status_error=False): """ Execute virsh nodedev-* commands :param dev_name: Name of a device(eg. pci_0000_05_00_1) :param status_error: Whether the command should be failed """ res = virsh.nodedev_reset(dev_name, debug=True) libvirt.check_exit_status(res, status_error) res = virsh.nodedev_detach(dev_name, debug=True) libvirt.check_exit_status(res, status_error) res = virsh.nodedev_reattach(dev_name, debug=True) libvirt.check_exit_status(res, status_error)
def do_nodedev_detach_reattach(device_name, params, options=""): """ do the detach and reattach. (1).do detach. (2).check the result of detach. (3).do reattach. (4).check the result of reattach """ # libvirt acl polkit related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # do the detach logging.debug('Node device name is %s.', device_name) CmdResult = virsh.nodedev_detach(device_name, options, unprivileged_user=unprivileged_user, uri=uri) # check the exit_status. if CmdResult.exit_status: raise error.TestFail("Failed to detach %s.\n" "Detail: %s." % (device_name, CmdResult.stderr)) # check the driver. driver = driver_readlink(device_name) logging.debug('Driver after detach is %s.', driver) if (driver is None) or (not driver.endswith('pci-stub')): raise error.TestFail("Driver for %s is not pci-stub " "after nodedev-detach" % (device_name)) else: pass logging.debug('Nodedev-detach %s successed.', device_name) # do the reattach. CmdResult = virsh.nodedev_reattach(device_name, options) # check the exit_status. if CmdResult.exit_status: raise error.TestFail("Failed to reattach %s.\n" "Detail: %s." % (device_name, CmdResult.stderr)) # check the driver. driver = driver_readlink(device_name) if (driver is None) or (not driver.endswith('pci-stub')): pass else: raise error.TestFail("Driver for %s is not be reset after " "nodedev-reattach" % (device_name)) logging.debug('Nodedev-reattach %s successed.', device_name)
def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break
def reattach_device(pci_devs, pci_ids): # reattach the device to host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name != "vfio-pci": logging.debug("device alreay attached") else: if virsh.nodedev_reattach(pci_value).exit_status: test.fail("Hostdev node reattach failed") driver_name = to_text(process.system_output(cmd, shell=True).strip()) if driver_name == "vfio-pci": test.error("driver bind failed after reattach")
def reattach_device(pci_devs, pci_ids): # reattach the device to host for pci_value, pci_node in map(None, pci_devs, pci_ids): pci_value = pci_value.replace(".", "_") cmd = "lspci -ks %s | grep 'Kernel driver in use' |\ awk '{print $5}'" % pci_node driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name != "vfio-pci": logging.debug("device alreay attached") else: if virsh.nodedev_reattach(pci_value).exit_status: test.fail("Hostdev node reattach failed") driver_name = process.run(cmd, shell=True).stdout_text.strip() if driver_name == "vfio-pci": test.error("driver bind failed after reattach")
def detach_reattach_nodedev(device_address, params, options=""): """ Do the detach and reattach. Step1.Do detach. Step2.Check the result of detach. Step3.Do reattach. Step4.Check the result of reattach """ # Libvirt acl polkit related params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') readonly = (params.get('nodedev_detach_readonly', 'no') == 'yes') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Do the detach logging.debug('Node device name is %s.', device_address) CmdResult = virsh.nodedev_detach(device_address, options, unprivileged_user=unprivileged_user, uri=uri, readonly=readonly) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) logging.debug('Driver after detach is %s.', driver) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if (driver is None) or (not driver.endswith(device_driver_name)): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name)) # Do the reattach. CmdResult = virsh.nodedev_reattach(device_address, options) # Check the exit_status. libvirt.check_exit_status(CmdResult) # Check the driver. driver = get_driver_readlink(device_address) if libvirt_version.version_compare(1, 1, 1): device_driver_name = 'vfio-pci' else: device_driver_name = 'pci-stub' if driver and driver.endswith(device_driver_name): test.fail("Driver for %s is not %s " "after nodedev-detach" % (device_address, device_driver_name))
def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink("%s/%s/driver" % (pci_device_dir, vf_addr)).split('/')[-1] logging.log("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break
def do_nodedev_detach_reattach(device_name, options=""): """ do the detach and reattach. (1).do detach. (2).check the result of detach. (3).do reattach. (4).check the result of reattach """ # do the detach logging.debug('Node device name is %s.', device_name) CmdResult = virsh.nodedev_detach(device_name, options) # check the exit_status. if CmdResult.exit_status: raise error.TestFail("Failed to detach %s.\n" "Detail: %s." % (device_name, CmdResult.stderr)) # check the driver. driver = driver_readlink(device_name) logging.debug('Driver after detach is %s.', driver) if (driver is None) or (not driver.endswith('pci-stub')): raise error.TestFail("Driver for %s is not pci-stub " "after nodedev-detach" % (device_name)) else: pass logging.debug('Nodedev-detach %s successed.', device_name) # do the reattach. CmdResult = virsh.nodedev_reattach(device_name, options) # check the exit_status. if CmdResult.exit_status: raise error.TestFail("Failed to reattach %s.\n" "Detail: %s." % (device_name, CmdResult.stderr)) # check the driver. driver = driver_readlink(device_name) if (driver is None) or (not driver.endswith('pci-stub')): pass else: raise error.TestFail("Driver for %s is not be reset after " "nodedev-reattach" % (device_name)) logging.debug('Nodedev-reattach %s successed.', device_name)
def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail("The hostdev interface still in the guest xml after detach\n") break driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n") result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail("The macvtap interface still exist in the guest xml after detach\n") break
def test_active_nodedev_reset(device, vm, expect_succeed): """ Test nodedev-reset when the specified device is attached to a VM :param devices : Specified node device to be tested. :param vm : VM the device is to be attached to. :param expect_succeed : 'yes' for expect command run successfully and 'no' for fail. :raise TestFail : If result doesn't meet expectation. :raise TestError : If failed to recover environment. """ # Split device name such as `pci_0000_00_19_0` and fill the XML. hostdev_xml = """ <hostdev mode='subsystem' type='%s' managed='yes'> <source> <address domain='0x%s' bus='0x%s' slot='0x%s' function='0x%s'/> </source> </hostdev>""" % tuple(device.split('_')) try: # The device need to be detached before attach to VM. virsh.nodedev_detach(device) try: # Backup VM XML. vmxml = VMXML.new_from_inactive_dumpxml(vm.name) # Generate a temp file to store host device XML. dev_fd, dev_fname = tempfile.mkstemp(dir=data_dir.get_tmp_dir()) os.close(dev_fd) dev_file = open(dev_fname, 'w') dev_file.write(hostdev_xml) dev_file.close() # Only live VM allows attach device. if not vm.is_alive(): vm.start() try: result = virsh.attach_device(vm.name, dev_fname) logging.debug(result) test_nodedev_reset([device], expect_succeed) finally: # Detach device from VM. result = virsh.detach_device(vm.name, dev_fname) # Raise error when detach failed. if result.exit_status: raise error.TestError( 'Failed to dettach device %s from %s. Result:\n %s' % (device, vm.name, result)) finally: # Cleanup temp XML file and recover test VM. os.remove(dev_fname) vmxml.sync() finally: # Reattach node device result = virsh.nodedev_reattach(device) # Raise error when reattach failed. if result.exit_status: raise error.TestError( 'Failed to reattach nodedev %s. Result:\n %s' % (device, result))
def run(test, params, env): """ SR-IOV: managed related test. """ def start_vm(vm, test_login=False, destroy_vm=False): """ Start up VM :param vm: The vm object :param test_login: Whether to login VM :param destroy_vm: Whether to destroy VM """ if vm.is_alive(): vm.destroy() vm.start() if test_login: vm.wait_for_serial_login(timeout=180).close() if destroy_vm: vm.destroy() def create_vf_pool(): """ Create VF pool """ net_hostdev_dict = { "net_name": params.get("net_name"), "net_forward": params.get("net_forward"), "vf_list_attrs": "[%s]" % utils_sriov.pci_to_addr(vf_pci) } libvirt_network.create_or_del_network(net_hostdev_dict) def check_vm_iface_managed(vm_name, iface_dict): """ Check 'managed' in VM's iface :param vm_name: Name of VM :param iface_dict: The parameters dict :raise: TestFail if not match """ vm_iface_managed = [ iface.get("managed") for iface in vm_xml.VMXML.new_from_dumpxml( vm_name).devices.by_device_tag("interface") ][0] expr_managed = "yes" if iface_dict.get("managed", "") == "yes" else None if vm_iface_managed != expr_managed: test.fail("Unable to get the expected managed! Actual: %s, " "Expected: %s." % (vm_iface_managed, expr_managed)) def test_networks(): """ Start vm with VF from VF Pool with "managed=no" or default setting 1) Create VF pool 2) Prepare device xml and hot-plug to the guest 3) Detach the device from host 4) Check the driver of device 5) Start VM 6) Destroy vm then check the driver 7) Reattach the device to the host and check the driver """ create_vf_pool() libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) iface_dict = { "type": "network", "source": "{'network': '%s'}" % params.get("net_name") } libvirt.modify_vm_iface(vm.name, "update_iface", iface_dict) res = virsh.start(vm.name, debug=True) libvirt.check_exit_status(res, True) virsh.nodedev_detach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) start_vm(vm, True, True) libvirt_vfio.check_vfio_pci(vf_pci) virsh.nodedev_reattach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) def test_device_hotplug(): """ Hotplug/unplug VF with managed='no' 1) Prepare a running guest 2) Check the driver of vf on host 3) Prepare a xml with "managed=no"and attach to guest 4) Detach the device from host 5) Check the driver of vf on host 6) Attach the device to guest 7) Check the interface of the guest 8) Detach the device from guest and check the driver 9) Reattach the device to the host and check the driver """ libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') start_vm(vm) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) mac_addr = utils_net.generate_mac_address_simple() iface_dict = eval( params.get('iface_dict', '{"hostdev_addr": "%s"}') % utils_sriov.pci_to_addr(vf_pci)) iface = interface.Interface("hostdev") iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) res = virsh.attach_device(vm_name, iface.xml, debug=True) libvirt.check_exit_status(res, True) virsh.nodedev_detach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) virsh.attach_device(vm_name, iface.xml, debug=True, ignore_status=False) check_vm_iface_managed(vm_name, iface_dict) vm.wait_for_serial_login().close() virsh.detach_device(vm_name, iface.xml, wait_remove_event=True, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci) virsh.nodedev_reattach(dev_name, debug=True, ignore_status=False) libvirt_vfio.check_vfio_pci(vf_pci, status_error=True) test_case = params.get("test_case", "") run_test = eval("test_%s" % test_case) vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) pf_pci = utils_sriov.get_pf_pci() if not pf_pci: test.cancel("NO available pf found.") default_vf = sriov_base.setup_vf(pf_pci, params) vf_pci = utils_sriov.get_vf_pci_id(pf_pci) dev_name = utils_sriov.get_device_name(vf_pci) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() try: run_test() finally: logging.info("Recover test enviroment.") sriov_base.recover_vf(pf_pci, params, default_vf) if vm.is_alive(): vm.destroy(gracefully=False) orig_config_xml.sync() libvirt_network.create_or_del_network( {"net_name": params.get("net_name")}, True) virsh.nodedev_reattach(dev_name, debug=True)
def run(test, params, env): """ Test when the PCI configuration file is in read-only mode """ def test_vf_hotplug(): """ Hot-plug VF to VM """ logging.info("Preparing a running guest...") libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') vm.start() vm_session = vm.wait_for_serial_login(timeout=180) logging.info("Attaching VF to the guest...") mac_addr = utils_net.generate_mac_address_simple() iface_dict = eval( params.get('iface_dict', '{"hostdev_addr": "%s"}') % utils_sriov.pci_to_addr(vf_pci)) iface = interface.Interface("hostdev") iface.xml = libvirt.modify_vm_iface(vm.name, "get_xml", iface_dict) virsh.attach_device(vm_name, iface.xml, debug=True, ignore_status=False) logging.info("Checking VF in the guest...") vm_iface_types = [ iface.get_type_name() for iface in vm_xml.VMXML.new_from_dumpxml( vm_name).devices.by_device_tag("interface") ] if 'hostdev' not in vm_iface_types: test.fail('Unable to get hostdev interface!') if cmd_in_vm: if not utils_misc.wait_for( lambda: not vm_session.cmd_status(cmd_in_vm), 30, 10): test.fail("Can not get the Virtual Function info on vm!") vm_session.close() libvirt_version.is_libvirt_feature_supported(params) test_case = params.get("test_case", "") run_test = eval("test_%s" % test_case) cmd_in_vm = params.get("cmd_in_vm") vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(vm_name) pf_pci = utils_sriov.get_pf_pci() if not pf_pci: test.cancel("NO available pf found.") default_vf = sriov_base.setup_vf(pf_pci, params) vf_pci = utils_sriov.get_vf_pci_id(pf_pci) dev_name = utils_sriov.get_device_name(vf_pci) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) orig_config_xml = vmxml.copy() libvirtd = utils_libvirtd.Libvirtd('virtqemud') try: virsh.nodedev_detach(dev_name, debug=True, ignore_status=False) logging.info("Re-mounting sysfs with ro mode...") utils_misc.mount('/sys', '', None, 'remount,ro') libvirtd.restart() run_test() finally: logging.info("Recover test enviroment.") utils_misc.mount('/sys', '', None, 'remount,rw') sriov_base.recover_vf(pf_pci, params, default_vf) if vm.is_alive(): vm.destroy(gracefully=False) orig_config_xml.sync() virsh.nodedev_reattach(dev_name, debug=True)