def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.model = "virtio" if vlan_id: new_iface.vlan = new_iface.new_vlan(**vlan_id) if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface
def prepare_network(net_name, **kwargs): """ Prepare a new network for test by creating net xml, defining network and starting network :param net_name: net to be created :param kwargs: params of net :return: """ new_net = libvirt.create_net_xml(net_name, kwargs) virsh.net_define(new_net.xml, debug=True, ignore_status=False) virsh.net_start(net_name, debug=True, ignore_status=False) logging.debug(virsh.net_dumpxml(net_name).stdout_text)
def ensure_default_network(): """ Ensure the default network exists on the host and in active status :return: None """ net_state = virsh.net_state_dict() if 'default' not in net_state: # define and start the default network virsh.net_define("/usr/share/libvirt/networks/default.xml", debug=True, ignore_status=False) if not net_state["default"].get("active"): virsh.net_start("default", debug=True, ignore_status=False) virsh.net_autostart("default", debug=True, ignore_status=False)
def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface
def restore(self, name): """ Restore networks from _net_. :param net: Target net to be restored. :raise CalledProcessError: when restore failed. """ net = name name = net['name'] nets = self.current_state if name in nets: self.remove(nets[name]) netfile = tempfile.NamedTemporaryFile(delete=False) fname = netfile.name netfile.writelines(net['inactive xml']) netfile.close() try: if net['persistent'] == 'yes': res = virsh.net_define(fname) if res.exit_status: raise Exception(str(res)) if net['active'] == 'yes': res = virsh.net_start(name) if res.exit_status: res = virsh.net_start(name) if res.exit_status: raise Exception(str(res)) else: res = virsh.net_create(fname) if res.exit_status: raise Exception(str(res)) finally: os.remove(fname) if net['autostart'] == 'yes': res = virsh.net_autostart(name) if res.exit_status: raise Exception(str(res))
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append("undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s", result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = {'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function} return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address(**{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length-6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail("The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail("The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail("The num of vf list show in nodedev-dumpxml is wrong\n") addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain+":"+vf_addr.bus+":"+vf_addr.slot+"."+vf_addr.function addr_list.append(addr) logging.debug("The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail("The vf addr list show in nodedev-dumpxml is not sorted correctly\n") elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail("The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain+":"+pf_addr_bus+":"+pf_addr_slot+"."+pf_addr_function if pf_pci != pci_id: test.fail("The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail("The hostdev interface still in the guest xml after detach\n") break driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n") result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail("The macvtap interface still exist in the guest xml after detach\n") break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) # For option == "--persistent", after VM destroyed and then start, the device should still be there. if option == "--persistent": virsh.destroy(vm_name) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail("The driver of the hostdev interface is not vfio\n") break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail("The dev name or mode of macvtap interface is wrong after attach\n") return interface def setup_controller(nic_num, controller_index, ctl_models): """ Create controllers bond to numa node in the guest xml :param nic_num: number of nic card bond to numa node :param controller_index: index num used to create controllers :param ctl_models: contoller topo for numa bond """ index = controller_index if nic_num == 2: ctl_models.append('pcie-switch-upstream-port') ctl_models.append('pcie-switch-downstream-port') ctl_models.append('pcie-switch-downstream-port') for i in range(index): controller = Controller("controller") controller.type = "pci" controller.index = i if i == 0: controller.model = 'pcie-root' else: controller.model = 'pcie-root-port' vmxml.add_device(controller) set_address = False for model in ctl_models: controller = Controller("controller") controller.type = "pci" controller.index = index controller.model = model if set_address or model == "pcie-switch-upstream-port": attrs = {'type': 'pci', 'domain': '0', 'slot': '0', 'bus': index - 1, 'function': '0'} controller.address = controller.new_controller_address(**{"attrs": attrs}) logging.debug(controller) if controller.model == "pcie-expander-bus": controller.node = "0" controller.target = {'busNr': '100'} set_address = True else: set_address = False logging.debug(controller) vmxml.add_device(controller) index += 1 return index - 1 def add_numa(vmxml): """ Add numa node in the guest xml :param vmxml: The instance of VMXML clas """ vcpu = vmxml.vcpu max_mem = vmxml.max_mem max_mem_unit = vmxml.max_mem_unit numa_dict = {} numa_dict_list = [] # Compute the memory size for each numa node if vcpu == 1: numa_dict['id'] = '0' numa_dict['cpus'] = '0' numa_dict['memory'] = str(max_mem) numa_dict['unit'] = str(max_mem_unit) numa_dict_list.append(numa_dict) else: for index in range(2): numa_dict['id'] = str(index) numa_dict['memory'] = str(max_mem // 2) numa_dict['unit'] = str(max_mem_unit) if vcpu == 2: numa_dict['cpus'] = str(index) else: if index == 0: if vcpu == 3: numa_dict['cpus'] = str(index) if vcpu > 3: numa_dict['cpus'] = "%s-%s" % (index, vcpu // 2 - 1) else: numa_dict['cpus'] = "%s-%s" % (vcpu // 2, str(vcpu - 1)) numa_dict_list.append(numa_dict) numa_dict = {} # Add cpu device with numa node setting in domain xml vmxml_cpu = vm_xml.VMCPUXML() vmxml_cpu.xml = "<cpu><numa/></cpu>" vmxml_cpu.numa_cell = numa_dict_list vmxml.cpu = vmxml_cpu def create_iface_list(bus_id, nic_num, vf_list): """ Create hostdev interface list bond to numa node :param bus_id: bus_id in pci address which decides the controller attached to :param nic_num: number of nic card bond to numa node :param vf_list: sriov vf list """ iface_list = [] for num in range(nic_num): vf_addr = vf_list[num] iface = create_hostdev_interface(vf_addr, managed, model) bus_id -= num attrs = {'type': 'pci', 'domain': '0', 'slot': '0', 'bus': bus_id, 'function': '0'} iface.address = iface.new_iface_address(**{"attrs": attrs}) iface_list.append(iface) return iface_list def check_guestos(iface_list): """ Check whether vf bond to numa node can get ip successfully in guest os :param iface_list: hostdev interface list """ for iface in iface_list: mac_addr = iface.mac_address get_ip_by_mac(mac_addr, timeout=60) def check_numa(vf_driver): """ Check whether vf bond to correct numa node in guest os :param vf_driver: vf driver """ if vm.serial_console: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n') for vf in vf_dir: numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1] logging.debug("The vf is attached to numa node %s\n", numa_node) if numa_node != "0": test.fail("The vf is not attached to numa node 0\n") session.close() def remove_devices(vmxml, device_type): """ Remove all addresses for all devices who has one. :param vm_xml: The VM XML to be modified :param device_type: The device type for removing :return: True if success, otherwise, False """ if device_type not in ['address', 'usb']: return type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'} try: for elem in vmxml.xmltreefile.findall(type_dict[device_type]): if device_type == 'usb': if elem.get('bus') == 'usb': vmxml.xmltreefile.remove(elem) else: vmxml.xmltreefile.remove(elem) except (AttributeError, TypeError) as details: test.error("Fail to remove '%s': %s" % (device_type, details)) vmxml.xmltreefile.write() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) machine_type = params.get("machine_type", "pc") operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") nic_num = int(params.get("nic_num", "1")) nfv = params.get("nfv", "no") == "yes" ctl_models = params.get("ctl_models", "").split(' ') controller_index = int(params.get("controller_index", "12")) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model == "pci-bridge": pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/0000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = max_vfs // 2 + 1 create_vfs(vf_num) vf_list = [] vf_name_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] vf_name_list.append(vf_name) if attach == "yes" and not nfv: vf_addr = vf_list[0] new_iface = create_interface() if inactive_pool: result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if nfv: for os_machine_type in (machine_type, vmxml.os.machine): 'q35' in os_machine_type or test.cancel("nfv only run with q35 machine type") vf_driver = os.readlink(os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1] vmxml.remove_all_device_by_type('controller') remove_devices(vmxml, 'address') remove_devices(vmxml, 'usb') add_numa(vmxml) bus_id = setup_controller(nic_num, controller_index, ctl_models) vmxml.sync() logging.debug(vmxml) iface_list = create_iface_list(bus_id, nic_num, vf_list) for iface in iface_list: process.run("cat %s" % iface.xml, shell=True).stdout_text result = virsh.attach_device(vm_name, file_opt=iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) check_guestos(iface_list) check_numa(vf_driver) finally: if vm.is_alive(): vm.destroy(gracefully=False) process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True) backup_xml.sync()
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") loop = int(params.get("loop", 1)) # Get params about creating a bridge bridge = params.get('bridge', None) create_bridge = "yes" == params.get('create_bridge', 'no') ovs_bridge = "yes" == params.get('ovs_bridge', 'no') iface_name = utils_net.get_net_if(state="UP")[0] # Get params about creating a network create_netxml = "yes" == params.get("create_netxml", "no") domain = params.get('domain', None) forward = params.get("forward", None) net_dns_txt = params.get("net_dns_txt", None) net_bandwidth_inbound = params.get("net_bandwidth_inbound", None) net_bandwidth_outbound = params.get("net_bandwidth_outbound", None) mac = params.get("mac") # Edit the created network xml to get the xml to be tested del_mac = "yes" == params.get('del_mac', 'no') del_ip = "yes" == params.get('del_ip', 'no') add_dev = "yes" == params.get('add_dev', 'no') virtualport = 'yes' == params.get("virtualport", "no") virtualport_type = params.get("virtualport_type") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) logging.debug("Get network xml as testnet_xml: %s" % testnet_xml) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True } try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask range_4 = network_xml.RangeXML() range_4.attrs = { "start": dhcp_ranges_start, "end": dhcp_ranges_end } ipxml_v4.dhcp_ranges = range_4 testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) if create_netxml: net_dict = { 'del_nat_attrs': True, 'del_ip': del_ip, 'dns_txt': net_dns_txt, 'domain': domain, 'bridge': bridge, 'forward': forward, 'interface_dev': iface_name, 'virtualport': virtualport, 'virtualport_type': virtualport_type, 'mac': mac, 'net_bandwidth_inbound': net_bandwidth_inbound, 'net_bandwidth_outbound': net_bandwidth_outbound } logging.debug("net_dict is %s" % net_dict) testnet_xml = libvirt_network.modify_network_xml( net_dict, testnet_xml) testnet_xml.debug_xml() if create_bridge: if ovs_bridge: utils_net.create_ovs_bridge(bridge, ignore_status=False) else: utils_net.create_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Run test case while loop: try: define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states after define if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined network: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: logging.debug("check result is %s, expect_msg is %s" % (define_result, expect_msg)) libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append( "Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append( "Found wrong network states for " "defined network: %s" % str(net_state)) # Check network states after start if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started network: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append( "Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append( "Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append( "undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append( "Found wrong network states for " "undefined network: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status except Exception: logging.debug( "The define and undefine operation in loop %s failed. ", loop) finally: loop = loop - 1 finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml if create_bridge: if ovs_bridge: utils_net.delete_ovs_bridge(bridge, ignore_status=False) else: utils_net.delete_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") check_libvirtd = "yes" == params.get("check_libvirtd") vm_defined = "yes" == params.get("vm_defined") check_vm = "yes" == params.get("check_vm") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) # Backup the current network xml net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name) virsh.net_dumpxml(network_name, to_file=net_xml_bk) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("make the network persistent...") virsh.net_define(net_xml_bk) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug( "destroy network as we need to test inactive network...") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name if check_libvirtd or check_vm: vm_name = params.get("main_vm") if virsh.is_alive(vm_name): virsh.destroy(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml # make sure there is interface with source network as default iface_devices = vmxml.get_devices(device_type="interface") has_default_net = False for iface in iface_devices: source = iface.get_source() if 'network' in source.keys() and source['network'] == 'default': has_default_net = True break elif 'bridge' in source.keys() and source['bridge'] == 'virbr0': has_default_net = True break if not has_default_net: options = "network default --current" virsh.attach_interface(vm_name, options, ignore_status=False) try: if vm_defined: ret = virsh.start(vm_name) else: logging.debug("undefine the vm, then create the vm...") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh.undefine(vm_name) ret = virsh.create(vmxml.xml) logging.debug(ret.stdout) # check the create or start cmd status utils_test.libvirt.check_exit_status( ret, expect_error=(network_status != 'active')) status = 1 if status_error != 'yes': libvirtd = utils_libvirtd.Libvirtd("virtqemud") daemon_name = libvirtd.service_name pid_before_run = utils_misc.get_pid(daemon_name) ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) # check_libvirtd pid no change pid_after_run = utils_misc.get_pid(daemon_name) if pid_after_run != pid_before_run: test.fail("libvirtd crash after destroy network!") status = 1 else: logging.debug( "libvirtd do not crash after destroy network!") status = 0 if check_libvirtd: # destroy vm, check libvirtd pid no change ret = virsh.destroy(vm_name) utils_test.libvirt.check_exit_status(ret, expect_error=False) pid_after_run2 = utils_misc.get_pid(daemon_name) if pid_after_run2 != pid_before_run: test.fail("libvirtd crash after destroy vm!") status = 1 else: logging.debug( "libvirtd do not crash after destroy vm!") status = 0 elif check_vm: # restart libvirtd and check vm is running libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not virsh.is_alive(vm_name): test.fail( "vm shutdown when transient network destroyed then libvirtd restart" ) else: status = 0 finally: if not vm_defined: vmxml_backup.define() vmxml_backup.sync() else: readonly = (params.get("net_destroy_readonly", "no") == "yes") status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroyed. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug( "transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Clean up the backup network xml file if os.path.isfile(net_xml_bk): data_dir.clean_tmp_files() logging.debug("Cleaning up the network backup xml") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = { 'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function } return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address( **{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join( random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail( "The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail( "The num of vf list show in nodedev-dumpxml is wrong\n" ) addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function addr_list.append(addr) logging.debug( "The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail( "The vf addr list show in nodedev-dumpxml is not sorted correctly\n" ) elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function if pf_pci != pci_id: test.fail( "The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail( "The driver of the hostdev interface is not vfio\n" ) break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail( "The dev name or mode of macvtap interface is wrong after attach\n" ) return interface vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model == "pci-bridge": pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/0000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = max_vfs // 2 + 1 create_vfs(vf_num) vf_list = [] vf_name_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] vf_name_list.append(vf_name) if attach == "yes": vf_addr = vf_list[0] new_iface = create_interface() if inactive_pool: result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug( "The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) finally: if vm.is_alive(): vm.destroy(gracefully=False) process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True) backup_xml.sync()
def run_virsh_net_define_undefine(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def make_net_persistent(net_name): logging.debug(virsh.net_dumpxml(net_name).stdout) with open("/tmp/default.xml", "w") as f: f.write(virsh.net_dumpxml(net_name).stdout) virsh.net_define("/tmp/default.xml", ignore_status=False) return None
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("!!!make the network persistent") make_net_persistent(network_name) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug("!!!destroy the network as we need to test inactive") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name status = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug("transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_cfg_file, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: make_net_persistent(network_name) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_list(option, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail( "Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_list(option, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = { 'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function } return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address( **{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join( random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] # cleanup env and create vfs cmd = "echo 0 > %s/sriov_numvfs" % pci_address if driver == "mlx4_core": cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core" process.run(cmd, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address) if driver == "mlx4_core": cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \ % (vf_num, vf_num) test_res = process.run(cmd, shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail( "Get net list with 'virsh nodedev-list' failed\n") net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300) pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) if not net_diff: test.fail("Get net list with 'virsh nodedev-list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) mac = ':'.join(net[length - 6:]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if driver != "mlx4_core" and sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) if operation == "restart_libvirtd": detach_interface() utils_libvirtd.libvirtd_restart() interface = attach_interface() def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail( "The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail( "The num of vf list show in nodedev-dumpxml is wrong\n" ) addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function addr_list.append(addr) logging.debug( "The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail( "The vf addr list show in nodedev-dumpxml is not sorted correctly\n" ) elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function if pf_pci != pci_id: test.fail( "The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.model = "virtio" if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 def check_addr_attrs(): live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices hostdev_list = device.by_device_tag("hostdev") for hostdev in hostdev_list: addr = hostdev.source.untyped_address hostdev_addr_attrs = { "domain": addr.domain, "bus": addr.bus, "slot": addr.slot, "function": addr.function } if hostdev_addr_attrs == vf_addr_attrs: return False return True result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) if vf_type == "hostdev": check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60) if not check_ret: test.fail("The hostdev device detach failed from xml\n") else: utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) # For option == "--persistent", after VM destroyed and then start, the device should still be there. if option == "--persistent": virsh.destroy(vm_name) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) if vf_type != "hostdev": get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "hostdev": hostdev_list = device.by_device_tag("hostdev") if len(hostdev_list) == 0: test.fail("The hostdev device attach failed from xml\n") else: for hostdev in hostdev_list: if hostdev.type == "pci": break interface = hostdev if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail( "The driver of the hostdev interface is not vfio\n" ) break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail( "The dev name or mode of macvtap interface is wrong after attach\n" ) return interface def setup_controller(nic_num, controller_index, ctl_models): """ Create controllers bond to numa node in the guest xml :param nic_num: number of nic card bond to numa node :param controller_index: index num used to create controllers :param ctl_models: contoller topo for numa bond """ index = controller_index if nic_num == 2: ctl_models.append('pcie-switch-upstream-port') ctl_models.append('pcie-switch-downstream-port') ctl_models.append('pcie-switch-downstream-port') for i in range(index): controller = Controller("controller") controller.type = "pci" controller.index = i if i == 0: controller.model = 'pcie-root' else: controller.model = 'pcie-root-port' vmxml.add_device(controller) set_address = False for model in ctl_models: controller = Controller("controller") controller.type = "pci" controller.index = index controller.model = model if set_address or model == "pcie-switch-upstream-port": attrs = { 'type': 'pci', 'domain': '0', 'slot': '0', 'bus': index - 1, 'function': '0' } controller.address = controller.new_controller_address( **{"attrs": attrs}) logging.debug(controller) if controller.model == "pcie-expander-bus": controller.node = "0" controller.target = {'busNr': '100'} set_address = True else: set_address = False logging.debug(controller) vmxml.add_device(controller) index += 1 return index - 1 def add_numa(vmxml): """ Add numa node in the guest xml :param vmxml: The instance of VMXML clas """ vcpu = vmxml.vcpu max_mem = vmxml.max_mem max_mem_unit = vmxml.max_mem_unit numa_dict = {} numa_dict_list = [] # Compute the memory size for each numa node if vcpu == 1: numa_dict['id'] = '0' numa_dict['cpus'] = '0' numa_dict['memory'] = str(max_mem) numa_dict['unit'] = str(max_mem_unit) numa_dict_list.append(numa_dict) else: for index in range(2): numa_dict['id'] = str(index) numa_dict['memory'] = str(max_mem // 2) numa_dict['unit'] = str(max_mem_unit) if vcpu == 2: numa_dict['cpus'] = str(index) else: if index == 0: if vcpu == 3: numa_dict['cpus'] = str(index) if vcpu > 3: numa_dict['cpus'] = "%s-%s" % (index, vcpu // 2 - 1) else: numa_dict['cpus'] = "%s-%s" % (vcpu // 2, str(vcpu - 1)) numa_dict_list.append(numa_dict) numa_dict = {} # Add cpu device with numa node setting in domain xml vmxml_cpu = vm_xml.VMCPUXML() vmxml_cpu.xml = "<cpu><numa/></cpu>" vmxml_cpu.numa_cell = numa_dict_list vmxml.cpu = vmxml_cpu def create_iface_list(bus_id, nic_num, vf_list): """ Create hostdev interface list bond to numa node :param bus_id: bus_id in pci address which decides the controller attached to :param nic_num: number of nic card bond to numa node :param vf_list: sriov vf list """ iface_list = [] for num in range(nic_num): vf_addr = vf_list[num] iface = create_hostdev_interface(vf_addr, managed, model) bus_id -= num attrs = { 'type': 'pci', 'domain': '0', 'slot': '0', 'bus': bus_id, 'function': '0' } iface.address = iface.new_iface_address(**{"attrs": attrs}) iface_list.append(iface) return iface_list def check_guestos(iface_list): """ Check whether vf bond to numa node can get ip successfully in guest os :param iface_list: hostdev interface list """ for iface in iface_list: mac_addr = iface.mac_address get_ip_by_mac(mac_addr, timeout=60) def check_numa(vf_driver): """ Check whether vf bond to correct numa node in guest os :param vf_driver: vf driver """ if vm.serial_console: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n') for vf in vf_dir: numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1] logging.debug("The vf is attached to numa node %s\n", numa_node) if numa_node != "0": test.fail("The vf is not attached to numa node 0\n") session.close() def remove_devices(vmxml, device_type): """ Remove all addresses for all devices who has one. :param vm_xml: The VM XML to be modified :param device_type: The device type for removing :return: True if success, otherwise, False """ if device_type not in ['address', 'usb']: return type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'} try: for elem in vmxml.xmltreefile.findall(type_dict[device_type]): if device_type == 'usb': if elem.get('bus') == 'usb': vmxml.xmltreefile.remove(elem) else: vmxml.xmltreefile.remove(elem) except (AttributeError, TypeError) as details: test.error("Fail to remove '%s': %s" % (device_type, details)) vmxml.xmltreefile.write() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) machine_type = params.get("machine_type", "pc") operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") dev_type = params.get("dev_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") nic_num = int(params.get("nic_num", "1")) nfv = params.get("nfv", "no") == "yes" ctl_models = params.get("ctl_models", "").split(' ') controller_index = int(params.get("controller_index", "12")) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model in ("pci-bridge", "pcie-root-port"): pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = int(max_vfs // 2 + 1) create_vfs(vf_num) vf_list = [] vf_name_list = [] vf_mac_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name), 'r') as f: vf_mac = f.readline().strip() vf_name_list.append(vf_name) vf_mac_list.append(vf_mac) if attach == "yes" and not nfv: vf_addr = vf_list[0] if dev_type: mac_addr = vf_mac_list[0] new_iface = utils_test.libvirt.create_hostdev_xml( vf_addr, managed=managed, xmlfile=False) else: new_iface = create_interface() mac_addr = new_iface.mac_address if inactive_pool: result = virsh.attach_device(vm_name, new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug( "The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if vf_type == "hostdev": addr = interface.source.untyped_address vf_addr_attrs = { "domain": addr.domain, "bus": addr.bus, "slot": addr.slot, "function": addr.function } if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if nfv: vf_driver = os.readlink( os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1] vmxml.remove_all_device_by_type('controller') remove_devices(vmxml, 'address') remove_devices(vmxml, 'usb') osxml = vmxml.os if "i440fx" in vmxml.os.machine: osxml.machine = "q35" vmxml.os = osxml add_numa(vmxml) bus_id = setup_controller(nic_num, controller_index, ctl_models) vmxml.sync() logging.debug(vmxml) iface_list = create_iface_list(bus_id, nic_num, vf_list) for iface in iface_list: process.run("cat %s" % iface.xml, shell=True).stdout_text result = virsh.attach_device(vm_name, iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) check_guestos(iface_list) check_numa(vf_driver) finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if driver == "mlx4_core": # Reload mlx4 driver to default setting process.run( "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core", shell=True) process.run( "modprobe mlx4_core; modprobe mlx4_ib; modprobe mlx4_en", shell=True) else: process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True)
def run(test, params, env): """ Test mtu feature from virtual network """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) mtu_type = params.get('mtu_type') mtu_size = params.get('mtu_size', '') net = params.get('net', DEFAULT_NET) net_type = params.get('net_type', '') with_iface = 'yes' == params.get('with_iface', 'no') with_net = 'yes' == params.get('with_net', 'no') status_error = 'yes' == params.get('status_error', 'no') check = params.get('check', '') error_msg = params.get('error_msg', '') bridge_name = 'br_mtu' + utils_misc.generate_random_string(3) add_pkg = params.get('add_pkg', '') model = params.get('model', 'virtio') def set_network(size, net='default'): """ Set mtu size to a certain network """ logging.info('Set mtu size of network "%s" to %s', net, size) default_xml = NetworkXML.new_from_net_dumpxml(net) default_xml.mtu = size default_xml.sync() logging.debug(virsh.net_dumpxml(net)) def set_interface(mtu_size='', source_network='default', iface_type='network', iface_model='virtio'): """ Set mtu size to a certain interface """ interface_type = 'bridge' if iface_type in ( 'bridge', 'openvswitch') else iface_type iface_dict = { 'type': interface_type, 'source': "{'%s': '%s'}" % (interface_type, source_network), 'model': iface_model } if iface_type == 'openvswitch': iface_dict.update({'virtualport_type': 'openvswitch'}) if mtu_size: iface_dict.update({'mtu': "{'size': %s}" % mtu_size}) libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict) logging.debug(virsh.dumpxml(vm_name).stdout) def get_default_if(): """ Get default interface that is using by vm """ ifaces = utils_net.get_sorted_net_if() logging.debug('Interfaces on host: %s', ifaces) for iface in ifaces[0]: if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text: logging.debug('Found host interface "%s"', iface) return iface def create_bridge(): """ Create a bridge on host for test """ cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s' con_name = 'con_' + utils_misc.generate_random_string(3) bridge_name = 'br_' + utils_misc.generate_random_string(3) process.run(cmd_create_br % (con_name, bridge_name), verbose=True) return con_name, bridge_name def create_network_xml(name, network_type, base_if='', **kwargs): """ Create a network xml to be defined """ m_net = NetworkXML(name) m_net.forward = {'mode': 'bridge'} if network_type in ('bridge', 'openvswitch'): m_net.bridge = {'name': kwargs['bridge_name']} elif network_type == 'macvtap': if base_if: m_net.forward_interface = [{'dev': base_if}] if network_type == 'openvswitch': m_net.virtualport_type = 'openvswitch' if 'mtu' in kwargs: m_net.mtu = kwargs['mtu'] logging.debug(m_net) return m_net.xml def create_iface(iface_type, **kwargs): """ Create a interface to be attached to vm """ m_iface = Interface(iface_type) m_iface.mac_address = utils_net.generate_mac_address_simple() if 'base_if' in kwargs: m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'} if 'source_net' in kwargs: m_iface.source = {'network': kwargs['source_net']} if 'mtu' in kwargs: m_iface.mtu = {'size': kwargs['mtu']} if 'model_net' in kwargs: m_iface.model = kwargs['model_net'] logging.debug(m_iface.get_xml()) logging.debug(m_iface) return m_iface def check_mtu(mtu_size, qemu=False): """ Check if mtu meets expectation on host """ error = '' live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = live_vmxml.get_devices('interface')[0] logging.debug(iface_xml.target) dev = iface_xml.target['dev'] ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev, shell=True, verbose=True).stdout_text if 'mtu %s' % mtu_size in ifconfig_info: logging.info('PASS on ifconfig check for vnet.') else: error += 'Fail on ifconfig check for vnet.' if qemu: qemu_mtu_info = process.run('ps aux|grep qemu-kvm', shell=True, verbose=True).stdout_text if 'host_mtu=%s' % mtu_size in qemu_mtu_info: logging.info('PASS on qemu cmd line check.') else: error += 'Fail on qemu cmd line check.' if error: test.fail(error) def check_mtu_in_vm(fn_login, mtu_size): """ Check if mtu meets expectations in vm """ session = fn_login() check_cmd = 'ifconfig' output = session.cmd(check_cmd) session.close() logging.debug(output) if 'mtu %s' % mtu_size not in output: test.fail('MTU check inside vm failed.') else: logging.debug("MTU check inside vm passed.") try: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET) if add_pkg: add_pkg = add_pkg.split() if 'openvswitch' in add_pkg and shutil.which('ovs-vsctl'): new_pkg = add_pkg.copy() new_pkg.remove('openvswitch') utils_package.package_install(new_pkg) if 'openvswitch' in add_pkg: br = 'ovsbr0' + utils_misc.generate_random_string(3) process.run('systemctl start openvswitch.service', shell=True, verbose=True) process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True) process.run('ovs-vsctl show', shell=True, verbose=True) if not check or check in ['save', 'managedsave', 'hotplug_save']: # Create bridge or network and set mtu iface_type = 'network' if net_type in ('bridge', 'openvswitch'): if net_type == 'bridge': params['con_name'], br = create_bridge() if mtu_type == 'network': test_net = create_network_xml(bridge_name, net_type, bridge_name=br) virsh.net_create(test_net, debug=True) virsh.net_dumpxml(bridge_name, debug=True) if mtu_type == 'interface': iface_type = net_type bridge_name = br elif net_type == 'network': if mtu_type == 'network': set_network(mtu_size) iface_mtu = 0 if mtu_type == 'interface': iface_mtu = mtu_size if mtu_type == 'network' and with_iface: mtu_size = str(int(mtu_size) // 2) iface_mtu = mtu_size source_net = bridge_name if net_type in ( 'bridge', 'openvswitch') else 'default' # set mtu in vm interface set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model) vm.start() vm_login = vm.wait_for_serial_login if net_type in ( 'bridge', 'openvswitch') else vm.wait_for_login vm_login().close() check_qemu = True if mtu_type == 'interface' else False # Test mtu after save vm if check in ('save', 'hotplug_save'): if check == 'hotplug_save': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) params['mac'] = iface.mac_address virsh.attach_device(vm_name, iface.xml, debug=True) virsh.dumpxml(vm_name, debug=True) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] not in str(dom_xml): test.fail('Failed to attach interface with mtu') save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save') virsh.save(vm_name, save_path, debug=True) virsh.restore(save_path, debug=True) if check == 'managedsave': virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check in both host and vm check_mtu(mtu_size, check_qemu) if mtu_type == 'interface' or with_iface: check_mtu_in_vm(vm_login, mtu_size) vm_login(timeout=60).close() if check == 'hotplug_save': virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True) time.sleep(5) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] in str(dom_xml): test.fail( 'Failed to detach interface with mtu after save-restore' ) else: hotplug = 'yes' == params.get('hotplug', 'False') if check == 'net_update': result = virsh.net_update(DEFAULT_NET, 'modify', 'mtu', '''"<mtu size='%s'/>"''' % mtu_size, debug=True) if check in ('macvtap', 'bridge_net', 'ovswitch_net'): base_if = get_default_if() macv_name = 'direct-macvtap' + utils_misc.generate_random_string( 3) # Test mtu in different type of network if mtu_type == 'network': if check == 'macvtap': test_net = create_network_xml(macv_name, 'macvtap', base_if, mtu=mtu_size) if check == 'bridge_net': params['con_name'], br = create_bridge() test_net = create_network_xml(bridge_name, 'bridge', mtu=mtu_size, bridge_name=br) if check == 'ovswitch_net': test_net = create_network_xml(bridge_name, 'openvswitch', mtu=mtu_size, bridge_name=br) if 'net_create' in params['id']: result = virsh.net_create(test_net, debug=True) if 'net_define' in params['id']: result = virsh.net_define(test_net, debug=True) # Test mtu with or without a binding network elif mtu_type == 'interface': vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_net: test_net = create_network_xml(macv_name, 'macvtap', base_if) virsh.net_create(test_net, debug=True) iface = create_iface('network', source_net=macv_name, mtu=mtu_size, model_net=model) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) vmxml.sync() result = virsh.start(vm_name) else: iface = create_iface('direct', base_if=base_if, mtu=mtu_size, model_net=model) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) result = virsh.define(vmxml.xml, debug=True) if check == 'invalid_val': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) result = virsh.attach_device(vm_name, iface.xml, debug=True) # Check result libvirt.check_exit_status(result, status_error) libvirt.check_result(result, [error_msg]) finally: bk_xml.sync() bk_netxml.sync() if 'test_net' in locals(): virsh.net_destroy(bridge_name, debug=True) if params.get('con_name'): process.run('nmcli con del %s' % params['con_name'], verbose=True) if add_pkg: process.run("ovs-vsctl del-br %s" % br, verbose=True) utils_package.package_remove(add_pkg)
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def check_unix_sock(group, perms, path, readonly=False): """ Check the validity of one libvirt socket file, including existence, group name, access permission and usability of virsh command. :param group: Expected group of the file. :param perms: Expected permission string of the file. :param path: Absolute path of the target file. :return : True if success or False if any test fails. """ mode = os.stat(path).st_mode gid = os.stat(path).st_gid # Check file exists as a socket file. if not stat.S_ISSOCK(mode): logging.error("File %s is not a socket file." % path) return False # Check file group ID. try: expected_gid = grp.getgrnam(group).gr_gid logging.debug('Group ID of %s is %s' % (group, expected_gid)) if gid != expected_gid: logging.error('File group gid expected to be ' ' %s, but %s found' % (expected_gid, gid)) return False except KeyError: logging.error('Can not find group "%s"' % group) return False # Check file permissions. mode_str = mode_bits_to_str(stat.S_IMODE(mode)) logging.debug('Permission of file %s is %s' % (path, mode_str)) # Zero padding perms to 4 digits. expected_perms = perms.zfill(4) if mode_str != expected_perms: logging.error('Expected file permission is %s, but %s ' 'found' % (expected_perms, mode_str)) return False # Check virsh connection. uri = 'qemu+unix:///system?socket=%s' % path # Prepare test XML file. net_name = "unix_sock_test" xml_cont = "<network><name>%s</name></network>" % net_name xml_path = os.path.join(data_dir.get_tmp_dir(), net_name + '.xml') with open(xml_path, 'w') as xml_file: xml_file.write(xml_cont) result = virsh.net_define(xml_path, uri=uri, ignore_status=True) logging.debug('Result of virsh test run is:\n %s' % result) try: if result.exit_status and not readonly: logging.error('Error encountered when running virsh net-define ' 'on socket file %s' % path) return False elif readonly and not result.exit_status: logging.error('Expect fail when running virsh net-define on ' 'read-only socket file %s, but succeeded.' % path) return False finally: # Cleanup network and temp file virsh.net_undefine(net_name, uri=uri, ignore_status=True) if os.path.exists(xml_path): os.remove(xml_path) # All success return True
def run_virsh_net_list(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]["active"]: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]["autostart"]: autostart_status = "no" # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]["active"]: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]["active"]: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") result = virsh.net_list(option, extra, ignore_status=True) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") check_libvirtd = "yes" == params.get("check_libvirtd") vm_defined = "yes" == params.get("vm_defined") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("make the network persistent...") make_net_persistent(network_name) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug("destroy network as we need to test inactive network...") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name if check_libvirtd: vm_name = params.get("main_vm") if virsh.is_alive(vm_name): virsh.destroy(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml # make sure there is interface with source network as default iface_devices = vmxml.get_devices(device_type="interface") has_default_net = False for iface in iface_devices: source = iface.get_source() if 'network' in source.keys() and source['network'] == 'default': has_default_net = True break elif 'bridge' in source.keys() and source['bridge'] == 'virbr0': has_default_net = True break if not has_default_net: options = "network default --current" virsh.attach_interface(vm_name, options, ignore_status=False) try: if vm_defined: ret = virsh.start(vm_name) else: logging.debug("undefine the vm, then create the vm...") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh.undefine(vm_name) ret = virsh.create(vmxml.xml) logging.debug(ret.stdout) # check the create or start cmd status utils_test.libvirt.check_exit_status(ret, expect_error=(network_status != 'active')) status = 1 if status_error != 'yes': cmd = "ps -ef | grep /usr/sbin/libvirtd | grep -v grep" # record the libvirt pid then destroy network libvirtd_pid = process.run(cmd, shell=True).stdout_text.strip().split()[1] ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) # check_libvirtd pid no change result = check_libvirtd_restart(libvirtd_pid, cmd) if result: test.fail("libvirtd crash after destroy network!") status = 1 else: logging.debug("libvirtd do not crash after destroy network!") status = 0 # destroy vm, check libvirtd pid no change ret = virsh.destroy(vm_name) utils_test.libvirt.check_exit_status(ret, expect_error=False) result = check_libvirtd_restart(libvirtd_pid, cmd) if result: test.fail("libvirtd crash after destroy vm!") status = 1 else: logging.debug("libvirtd do not crash after destroy vm!") status = 0 finally: if not vm_defined: vmxml_backup.define() vmxml_backup.sync() else: readonly = (params.get("net_destroy_readonly", "no") == "yes") status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug("transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_cfg_file, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: make_net_persistent(network_name) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run_virsh_net_list(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") result = virsh.net_list(option, extra, ignore_status=True) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")