def destroy_net(net_name): """ destroy network in conversion server """ if virsh.net_state_dict()[net_name]['active']: logging.info("Remove network %s in conversion server", net_name) virsh.net_destroy(net_name) if virsh.net_state_dict()[net_name]['autostart']: virsh.net_autostart(net_name, "--disable") output = virsh.net_list("--all").stdout.strip() logging.info(output)
def run_invalid_mac_net(params, libvirtd, vm): """ Start a network with all zero MAC. """ net_xml = NetworkXML() net_xml.name = 'invalid_mac' net_xml.forward = {'mode': 'nat'} net_xml.mac = "00:00:00:00:00:00" ip_xml = IPXML(address='192.168.123.1') net_xml.ip = ip_xml virsh.create(net_xml.xml) virsh.net_destroy(net_xml.name)
def trigger_net_event(event_amount=1): """ Trigger network start/stop actions in event_number times """ i = event_amount // 2 event_list = [] try: while i > 0: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") virsh.net_destroy(net_name, **virsh_dargs) event_list.append("Stopped") i -= 1 if event_amount % 2: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") finally: return event_list
def network_hook(): """ Check network hooks. """ # Set interface to use default network net_name = params.get("net_name", "default") edit_iface(net_name) prepare_hook_file(hook_script % (net_name, hook_log)) try: # destroy the network ret = virsh.net_destroy(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " stopped end -" assert check_hooks(hook_str) # start network ret = virsh.net_start(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " start begin -" assert check_hooks(hook_str) hook_str = hook_file + " " + net_name + " started begin -" assert check_hooks(hook_str) # plug a interface if vm.is_dead(): vm.start() vm.wait_for_login().close() mac_addr = "52:54:00:9a:53:a9" ret = virsh.attach_interface(vm_name, ("network %s --mac %s" % (net_name, mac_addr))) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " plugged begin -" assert check_hooks(hook_str) ret = virsh.detach_interface(vm_name, "network --mac %s" % mac_addr) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) # remove the log file if os.path.exists(hook_log): os.remove(hook_log) # destroy the domain vm.destroy() hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) except AssertionError: utils.log_last_traceback() raise error.TestFail("Failed to check" " network hooks")
def network_hook(): """ Check network hooks. """ # Set interface to use default network net_name = params.get("net_name", "default") edit_iface(net_name) prepare_hook_file(hook_script % (net_name, hook_log)) try: # destroy the network ret = virsh.net_destroy(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " stopped end -" assert check_hooks(hook_str) # start network ret = virsh.net_start(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " start begin -" assert check_hooks(hook_str) hook_str = hook_file + " " + net_name + " started begin -" assert check_hooks(hook_str) # plug a interface if vm.is_dead(): vm.start() vm.wait_for_login().close() mac_addr = "52:54:00:9a:53:a9" ret = virsh.attach_interface(vm_name, ("network %s --mac %s" % (net_name, mac_addr))) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " plugged begin -" assert check_hooks(hook_str) ret = virsh.detach_interface(vm_name, "network --mac %s" % mac_addr) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) # remove the log file if os.path.exists(hook_log): os.remove(hook_log) # destroy the domain vm.destroy() hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) except AssertionError: utils_misc.log_last_traceback() test.fail("Failed to check" " network hooks")
def remove(self, name): """ Remove target network _net_. :param net: Target net to be removed. """ net = name if net['active'] == 'yes': res = virsh.net_destroy(net['name']) if res.exit_status: raise Exception(str(res)) if net['persistent'] == 'yes': res = virsh.net_undefine(net['name']) if res.exit_status: raise Exception(str(res))
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") check_libvirtd = "yes" == params.get("check_libvirtd") vm_defined = "yes" == params.get("vm_defined") check_vm = "yes" == params.get("check_vm") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) # Backup the current network xml net_xml_bk = os.path.join(data_dir.get_tmp_dir(), "%s.xml" % network_name) virsh.net_dumpxml(network_name, to_file=net_xml_bk) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("make the network persistent...") virsh.net_define(net_xml_bk) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug( "destroy network as we need to test inactive network...") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name if check_libvirtd or check_vm: vm_name = params.get("main_vm") if virsh.is_alive(vm_name): virsh.destroy(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml # make sure there is interface with source network as default iface_devices = vmxml.get_devices(device_type="interface") has_default_net = False for iface in iface_devices: source = iface.get_source() if 'network' in source.keys() and source['network'] == 'default': has_default_net = True break elif 'bridge' in source.keys() and source['bridge'] == 'virbr0': has_default_net = True break if not has_default_net: options = "network default --current" virsh.attach_interface(vm_name, options, ignore_status=False) try: if vm_defined: ret = virsh.start(vm_name) else: logging.debug("undefine the vm, then create the vm...") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh.undefine(vm_name) ret = virsh.create(vmxml.xml) logging.debug(ret.stdout) # check the create or start cmd status utils_test.libvirt.check_exit_status( ret, expect_error=(network_status != 'active')) status = 1 if status_error != 'yes': libvirtd = utils_libvirtd.Libvirtd("virtqemud") daemon_name = libvirtd.service_name pid_before_run = utils_misc.get_pid(daemon_name) ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) # check_libvirtd pid no change pid_after_run = utils_misc.get_pid(daemon_name) if pid_after_run != pid_before_run: test.fail("libvirtd crash after destroy network!") status = 1 else: logging.debug( "libvirtd do not crash after destroy network!") status = 0 if check_libvirtd: # destroy vm, check libvirtd pid no change ret = virsh.destroy(vm_name) utils_test.libvirt.check_exit_status(ret, expect_error=False) pid_after_run2 = utils_misc.get_pid(daemon_name) if pid_after_run2 != pid_before_run: test.fail("libvirtd crash after destroy vm!") status = 1 else: logging.debug( "libvirtd do not crash after destroy vm!") status = 0 elif check_vm: # restart libvirtd and check vm is running libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() if not virsh.is_alive(vm_name): test.fail( "vm shutdown when transient network destroyed then libvirtd restart" ) else: status = 0 finally: if not vm_defined: vmxml_backup.define() vmxml_backup.sync() else: readonly = (params.get("net_destroy_readonly", "no") == "yes") status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroyed. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug( "transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: virsh.net_define(net_xml_bk, ignore_status=False) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Clean up the backup network xml file if os.path.isfile(net_xml_bk): data_dir.clean_tmp_files() logging.debug("Cleaning up the network backup xml") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test openvswitch support for network. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def modify_iface_xml(): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index(xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] iface_type = params.get("iface_type") if iface_type: iface.type_name = iface_type source = eval(iface_source) if source: del iface.source iface.source = source iface_model = params.get("iface_model", "virtio") iface.model = iface_model iface_virtualport = params.get("iface_virtualport") if iface_virtualport: iface.virtualport_type = iface_virtualport logging.debug("New interface xml file: %s", iface) vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def check_ovs_port(ifname, brname): """ Check OVS port that created by libvirt """ pg_name = params.get("porgroup_name", "").split() pg_vlan = params.get("portgroup_vlan", "").split() if_source = eval(iface_source) port_vlan = {} if if_source.has_key("portgroup"): pg = if_source["portgroup"] for (name, vlan) in zip(pg_name, pg_vlan): if pg == name: port_vlan = eval(vlan) # Check bridge name by port name _, bridge = utils_net.find_current_bridge(ifname) assert bridge == brname # Get port info from ovs-vsctl output cmd = "ovs-vsctl list port %s" % ifname output = utils.run(cmd).stdout logging.debug("ovs port output: %s", output) for line in output.splitlines(): if line.count("tag"): tag_info = line.rsplit(":") if port_vlan.has_key("id") and tag_info[0] == "tag": assert port_vlan["id"] == tag_info[1] elif line.count("vlan_mode"): mode_info = line.rsplit(":") if port_vlan.has_key("nativeMode") and mode_info[0] == "vlan_mode": assert port_vlan["nativeMode"] == "native-%s" % mode_info[1] start_error = "yes" == params.get("start_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") iface_source = params.get("iface_source", "{}") create_network = "yes" == params.get("create_network", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_ovs_port = "yes" == params.get("test_ovs_port", "no") # Destroy the guest first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bridge_name = eval(net_bridge)["name"] # Build the xml and run test. try: # Edit the network xml or create a new one. if create_network: # Try to add ovs bridge first if not utils_net.ovs_br_exists(bridge_name): utils_net.add_ovs_bridge(bridge_name) netxml = libvirt.create_net_xml(net_name, params) netxml.sync() # Edit the interface xml. if change_iface_option: # Try to add bridge if needed source = eval(iface_source) if source: if source.has_key("bridge"): if not utils_net.ovs_br_exists(source["bridge"]): utils_net.add_ovs_bridge(source["bridge"]) modify_iface_xml() try: # Start the VM. vm.start() if start_error: raise error.TestFail("VM started unexpectedly") iface_name = libvirt.get_ifname_host(vm_name, iface_mac) if test_ovs_port: check_ovs_port(iface_name, bridge_name) except virt_vm.VMStartError, details: logging.info(str(details)) if start_error: pass else: raise error.TestFail("VM Failed to start for some reason!") finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) # Try to recovery ovs bridge if utils_net.ovs_br_exists(bridge_name): utils_net.del_ovs_bridge(bridge_name) vmxml_backup.sync()
def run(test, params, env): """ Since 3.3.0, Coalesce setting is supported. This case is to set coalesce and check for 4 network types and each guest interface type. Only network/bridge guest interface type take effect for setting interface coalesce. For each host network type, guest can use bridge/network interface type to set coalesce except macvtap network type. Execute 'ethtool -c ${interface}|grep "rx-frames"' and anylize the output to check whether coalesce setting take effect or not. For macvtap network type, guest can start but query coalesce will fail. 1. For default host network network definition is below: <network> <name>default</name> <bridge name="virbr0"/> <forward/> <ip address="192.168.122.1" netmask="255.255.255.0"> <dhcp> <range start="192.168.122.2" end="192.168.122.254"/> </dhcp> </ip> </network> This is default network. 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:a7:4d:f7'/> <source bridge='virbr0'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: <interface type='network'> <source network='default'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 2. For bridge host network This mode need true bridge in host network. 'nmcli con add type bridge con-name br0 ifname br0' 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:8e:f3:6f'/> <source bridge='br0'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: First, define one virtual network for bridge br0 'virsh net-define net-br0.xml' 'virsh net-dumpxml net-br0' <network> <name>net-br0</name> <forward mode='bridge'/> <bridge name='br0'/> </network> Secondly, use this network for guest <interface type='network'> <source network='net-br0'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 3. For openvswitch bridge host network This mode need true openvswitch bridge in host network. 'ovs-vsctl add-br ovsbr0' 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:8e:f3:6f'/> <source bridge='ovsbr0'/> <virtualport type='openvswitch'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: First, define one virtual network for openvswitch bridge ovsbr0 'virsh net-define net-ovsbr0.xml' 'virsh net-dumpxml net-ovsbr0' <network> <name>net-ovs0</name> <forward mode='bridge'/> <bridge name='ovsbr0'/> <virtualport type='openvswitch'/> </network> Secondly, use this network for guest <interface type='network'> <source network='net-ovs0'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 4. For macvtap bridge mode on host network For this mode, first, create one virtual network. Note, should set dev to one ture physical interface. 'virsh net-define net-br-macvtap.xml' 'virsh net-dumpxml net-br-macvtap' <network> <name>net-br-macvtap</name> <forward dev='eno1' mode='bridge'> <interface dev='eno1'/> </forward> </network> Set guest to use this macvtap network and set coalesc <interface type='network'> <mac address='52:54:00:6e:f4:f1'/> <source network='net-br-macvtap'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> Steps in this test: 1. Prepare test environment,destroy or suspend a VM. 2. Prepare network if necessary. 3. Edit guest interface with definite network and set coalesce. 4. Start guest and check whether coalesce setting take effect. 5. Recover network and guest. """ if not libvirt_version.version_compare(3, 3, 0): test.skip( "Coalesce setting is only supported by libvirt3.3.0 and above") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def get_first_phy_iface(): """ Get first physical network interface from output of 'ls /sys/class/net' #ls /sys/class/net eno1 lo virbr0 virbr0-nic :return: interface name """ interface = '' lines = process.run('ls /sys/class/net').stdout_text.splitlines() interfaces = lines[0].split() for iface in interfaces: if iface != 'lo' and 'vir' not in iface: interface = iface break if interface == '': test.fail("There is no physical network interface") return interface def modify_iface_xml(): """ Modify interface xml options Two methods to modify domain interfae: 1. modify guest xml, define it 2. attach one interface for running guest :return: 0 for successful negative case test.fail is fail for positive/negative case None for successful positive case """ if hotplug_iface: iface = Interface(iface_type) else: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_type == 'network': iface.type_name = iface_type source = {iface_type: net_name} elif iface_type == 'bridge' and bridge_name: iface.type_name = iface_type source = {iface_type: bridge_name} elif iface_type == 'direct': iface.type_name = iface_type source = {'dev': interface, 'mode': 'bridge'} if source: del iface.source iface.source = source iface_model = params.get("iface_model", "virtio") iface.model = iface_model iface.coalesce = {'max': coalesce_value} if network_type == "ovsbridge" and iface_type == "bridge": iface.virtualport_type = "openvswitch" if not hotplug_iface: vmxml.devices = xml_devices vmxml.xmltreefile.write() try: vmxml.sync() except xcepts.LibvirtXMLError as details: if status_error: # Expect error for negetive test return 0 else: test.fail("Define guest: FAIL") else: if not vm.is_alive(): vm.start() # Wait guest boot completely time.sleep(2) try: ret = virsh.attach_device(vm_name, iface.xml, ignore_status=False, debug=True) except process.CmdError as error: if status_error: # Expect error for negetive test return 0 else: test.fail("Define guest: FAIL") start_error = "yes" == params.get("start_error", "no") status_error = "yes" == params.get("status_error", "no") # Get coalesce value. expect_coalesce = params.get("expect_coalesce", "") coalesce_value = params.get("coalesce", "15") if expect_coalesce == '': expect_coalesce = coalesce_value # Network specific attributes. network_type = params.get("network_type", "default") net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") # Get guest interface type iface_type = params.get("iface_type", "network") # Whether attach interface hotplug_iface = "yes" == params.get("hotplug_iface", "no") error_info = params.get("error_info", "") # Destroy the guest first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Build the xml and run test. try: interface = get_first_phy_iface() # Prepare network for specific network type. # Create bridge/ovsbridge for host bridge/ovsbridge network type if network_type == "default" and iface_type == "bridge": bridge_name = "virbr0" elif network_type == "bridge": bridge_name = eval(net_bridge)['name'] bridge = utils_net.Bridge() # Try to add bridge if not exist if bridge_name not in bridge.list_br(): bridge.add_bridge(bridge_name) elif network_type == 'ovsbridge': bridge_name = eval(net_bridge)['name'] # Try to add ovs bridge if not exist if not utils_net.ovs_br_exists(bridge_name): utils_net.add_ovs_bridge(bridge_name) if iface_type == "network": # Define virtual network if not exist for 'network' type of guest interface network = NetworkXML() network.name = net_name # Prepare virtual network required parameters params['net_forward'] = "{'mode':'bridge'}" if network_type == "ovsbridge": params['net_virtualport'] = "openvswitch" if network_type == "macvtap": # For bridge type of macvtap network, one true physical interface shold be added # Check whether physical interface has been added into one bridge. if yes, skip macvtap test # If interface already added to a bridge, the output of the nmcli # command will include "connection.slave-type: bridge" out = process.run('nmcli dev show %s' % interface).stdout_text con_l = re.findall(r'GENERAL.CONNECTION:(.+?)\n', out) if not con_l: test.cancel("no connection for the interface") else: con = con_l[0].strip() if "bridge" not in process.run('nmcli con show "%s"' % con).stdout_text: params['forward_iface'] = interface params[ 'net_forward'] = "{'mode':'bridge', 'dev': '%s'}" % interface else: test.cancel( "interface %s has been added into one brige, but macvtap" "need also add this interface, so current network can't" "suit macvtap testing" % interface) if not network.exists(): netxml = libvirt.create_net_xml(net_name, params) netxml.define() netxml.start() virsh.net_dumpxml(network.name, debug=True) # Edit the interface xml. # For successful negative case, return 0 to specify PASS result directly ret = modify_iface_xml() if ret == 0: return 0 try: # Get all interface link_before = set( process.run('ls /sys/class/net').stdout_text.splitlines()) # Start the VM. vm.start() if start_error: raise test.fail("VM started unexpectedly") # Get guest virtual network interface link_after = set( process.run('ls /sys/class/net').stdout_text.splitlines()) newinterface = (link_after - link_before).pop() out = process.run('ethtool -c %s' % newinterface, ignore_status=True) if network_type == 'macvtap': # Currently, output coalesce for macvtap is not supported err_msg = "Cannot get device coalesce settings: Operation not supported" std_msg = "Coalesce parameters for %s:" % newinterface if err_msg not in out.stderr_text or std_msg not in out.stdout_text: test.fail("coalesce setting on %s failed." % network_type) else: # Get coalesce value and check it is true if out.exit_status == 0: for line in out.stdout_text.splitlines(): if 'rx-frames:' in line: coalesce = line.split(':')[1].strip() if expect_coalesce != coalesce: test.fail("coalesce setting failed for %s" % network_type) break else: test.fail("coalesce setting on %s failed." % network_type) except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start:\n%s' % details) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) # Try to recovery bridge if network_type == "bridge" and bridge_name: if bridge_name in bridge.list_br(): bridge.del_bridge(bridge_name) elif network_type == "ovsbridge" and bridge_name: if utils_net.ovs_br_exists(bridge_name): utils_net.del_ovs_bridge(bridge_name) vmxml_backup.sync()
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = {'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function} return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address(**{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length-6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail("The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail("The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail("The num of vf list show in nodedev-dumpxml is wrong\n") addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain+":"+vf_addr.bus+":"+vf_addr.slot+"."+vf_addr.function addr_list.append(addr) logging.debug("The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail("The vf addr list show in nodedev-dumpxml is not sorted correctly\n") elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail("The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain+":"+pf_addr_bus+":"+pf_addr_slot+"."+pf_addr_function if pf_pci != pci_id: test.fail("The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail("The hostdev interface still in the guest xml after detach\n") break driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n") result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail("The macvtap interface still exist in the guest xml after detach\n") break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) # For option == "--persistent", after VM destroyed and then start, the device should still be there. if option == "--persistent": virsh.destroy(vm_name) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail("The driver of the hostdev interface is not vfio\n") break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail("The dev name or mode of macvtap interface is wrong after attach\n") return interface def setup_controller(nic_num, controller_index, ctl_models): """ Create controllers bond to numa node in the guest xml :param nic_num: number of nic card bond to numa node :param controller_index: index num used to create controllers :param ctl_models: contoller topo for numa bond """ index = controller_index if nic_num == 2: ctl_models.append('pcie-switch-upstream-port') ctl_models.append('pcie-switch-downstream-port') ctl_models.append('pcie-switch-downstream-port') for i in range(index): controller = Controller("controller") controller.type = "pci" controller.index = i if i == 0: controller.model = 'pcie-root' else: controller.model = 'pcie-root-port' vmxml.add_device(controller) set_address = False for model in ctl_models: controller = Controller("controller") controller.type = "pci" controller.index = index controller.model = model if set_address or model == "pcie-switch-upstream-port": attrs = {'type': 'pci', 'domain': '0', 'slot': '0', 'bus': index - 1, 'function': '0'} controller.address = controller.new_controller_address(**{"attrs": attrs}) logging.debug(controller) if controller.model == "pcie-expander-bus": controller.node = "0" controller.target = {'busNr': '100'} set_address = True else: set_address = False logging.debug(controller) vmxml.add_device(controller) index += 1 return index - 1 def add_numa(vmxml): """ Add numa node in the guest xml :param vmxml: The instance of VMXML clas """ vcpu = vmxml.vcpu max_mem = vmxml.max_mem max_mem_unit = vmxml.max_mem_unit numa_dict = {} numa_dict_list = [] # Compute the memory size for each numa node if vcpu == 1: numa_dict['id'] = '0' numa_dict['cpus'] = '0' numa_dict['memory'] = str(max_mem) numa_dict['unit'] = str(max_mem_unit) numa_dict_list.append(numa_dict) else: for index in range(2): numa_dict['id'] = str(index) numa_dict['memory'] = str(max_mem // 2) numa_dict['unit'] = str(max_mem_unit) if vcpu == 2: numa_dict['cpus'] = str(index) else: if index == 0: if vcpu == 3: numa_dict['cpus'] = str(index) if vcpu > 3: numa_dict['cpus'] = "%s-%s" % (index, vcpu // 2 - 1) else: numa_dict['cpus'] = "%s-%s" % (vcpu // 2, str(vcpu - 1)) numa_dict_list.append(numa_dict) numa_dict = {} # Add cpu device with numa node setting in domain xml vmxml_cpu = vm_xml.VMCPUXML() vmxml_cpu.xml = "<cpu><numa/></cpu>" vmxml_cpu.numa_cell = numa_dict_list vmxml.cpu = vmxml_cpu def create_iface_list(bus_id, nic_num, vf_list): """ Create hostdev interface list bond to numa node :param bus_id: bus_id in pci address which decides the controller attached to :param nic_num: number of nic card bond to numa node :param vf_list: sriov vf list """ iface_list = [] for num in range(nic_num): vf_addr = vf_list[num] iface = create_hostdev_interface(vf_addr, managed, model) bus_id -= num attrs = {'type': 'pci', 'domain': '0', 'slot': '0', 'bus': bus_id, 'function': '0'} iface.address = iface.new_iface_address(**{"attrs": attrs}) iface_list.append(iface) return iface_list def check_guestos(iface_list): """ Check whether vf bond to numa node can get ip successfully in guest os :param iface_list: hostdev interface list """ for iface in iface_list: mac_addr = iface.mac_address get_ip_by_mac(mac_addr, timeout=60) def check_numa(vf_driver): """ Check whether vf bond to correct numa node in guest os :param vf_driver: vf driver """ if vm.serial_console: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n') for vf in vf_dir: numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1] logging.debug("The vf is attached to numa node %s\n", numa_node) if numa_node != "0": test.fail("The vf is not attached to numa node 0\n") session.close() def remove_devices(vmxml, device_type): """ Remove all addresses for all devices who has one. :param vm_xml: The VM XML to be modified :param device_type: The device type for removing :return: True if success, otherwise, False """ if device_type not in ['address', 'usb']: return type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'} try: for elem in vmxml.xmltreefile.findall(type_dict[device_type]): if device_type == 'usb': if elem.get('bus') == 'usb': vmxml.xmltreefile.remove(elem) else: vmxml.xmltreefile.remove(elem) except (AttributeError, TypeError) as details: test.error("Fail to remove '%s': %s" % (device_type, details)) vmxml.xmltreefile.write() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) machine_type = params.get("machine_type", "pc") operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") nic_num = int(params.get("nic_num", "1")) nfv = params.get("nfv", "no") == "yes" ctl_models = params.get("ctl_models", "").split(' ') controller_index = int(params.get("controller_index", "12")) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model == "pci-bridge": pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/0000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = max_vfs // 2 + 1 create_vfs(vf_num) vf_list = [] vf_name_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] vf_name_list.append(vf_name) if attach == "yes" and not nfv: vf_addr = vf_list[0] new_iface = create_interface() if inactive_pool: result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if nfv: for os_machine_type in (machine_type, vmxml.os.machine): 'q35' in os_machine_type or test.cancel("nfv only run with q35 machine type") vf_driver = os.readlink(os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1] vmxml.remove_all_device_by_type('controller') remove_devices(vmxml, 'address') remove_devices(vmxml, 'usb') add_numa(vmxml) bus_id = setup_controller(nic_num, controller_index, ctl_models) vmxml.sync() logging.debug(vmxml) iface_list = create_iface_list(bus_id, nic_num, vf_list) for iface in iface_list: process.run("cat %s" % iface.xml, shell=True).stdout_text result = virsh.attach_device(vm_name, file_opt=iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) check_guestos(iface_list) check_numa(vf_driver) finally: if vm.is_alive(): vm.destroy(gracefully=False) process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True) backup_xml.sync()
def run(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_list(option, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Confirm the network exists. output_all = virsh.net_list("--all").stdout.strip() if not re.search(network_name, output_all): raise error.TestNAError("Make sure the network exists!!") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name # Get status of network and prepare network status. network_current_status = "active" try: if not virsh.net_state_dict()[network_name]['active']: network_current_status = "inactive" if network_status == "active": virsh.net_start(network_name) else: if network_status == "inactive": virsh.net_destroy(network_name) except process.CmdError: raise error.TestError("Prepare network status failed!") status = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if virsh.net_state_dict()[network_name]['active']: status = 1 # Recover network status try: if (network_current_status == "active" and not virsh.net_state_dict()[network_name]['active']): virsh.net_start(network_name) if (network_current_status == "inactive" and virsh.net_state_dict()[network_name]['active']): virsh.net_destroy(network_name) except process.CmdError: raise error.TestError("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-event 1. Prepare a new network. 2. Running virsh net-event with different options, and start/stop the network if needed, then check the output of net-event. 3. Clean the environment. """ prepare_net = "yes" == params.get("prepare_net", "yes") net_addr = params.get("net_addr") net_name = params.get("net_name") net_event_list = "yes" == params.get("net_event_list", "no") net_event_loop = "yes" == params.get("net_event_loop", "no") net_event_timestamp = "yes" == params.get("net_event_timestamp", "no") net_event_name = params.get("net_event_name") net_event_timeout = params.get("net_event_timeout") net_event_amount = int(params.get("net_event_amount", 1)) status_error = "yes" == params.get("status_error", "no") net_event_option = params.get("net_event_option", "") virsh_dargs = {'debug': True, 'ignore_status': True} net_event_interrupt = False libv_net = None expected_event_list = [] virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC, auto_close=True) def trigger_net_event(event_amount=1): """ Trigger network start/stop actions in event_number times """ i = event_amount // 2 event_list = [] try: while i > 0: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") virsh.net_destroy(net_name, **virsh_dargs) event_list.append("Stopped") i -= 1 if event_amount % 2: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") finally: return event_list def check_output(output, expected_event_list): """ Check received net-event in output. :param output: The virsh shell output, such as: Welcome to virsh, the virtualization interactive terminal. Type: 'help' for help with commands 'quit' to quit virsh # event 'lifecycle' for network virttest_net: Started events received: 1 virsh # : expected_event_list: A list of expected events ['Started', 'Stopped', ..] """ event_match_str = "event 'lifecycle' for network %s: %s" if net_event_interrupt: output = output.strip().splitlines()[5:] else: output = output.strip().splitlines()[5:-2] output = [o.replace("virsh #", "").strip() for o in output] # Both order and content should match index = 0 for event_str in expected_event_list: match_str = event_match_str % (net_name, event_str) logging.debug("Expected output: %s", match_str) logging.debug("Actual output: %s", output[index]) if not output[index].count(match_str): test.fail("Event received not match") if net_event_timestamp: if not re.match(r"(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})", output[index]): test.fail("Can not get timestamp in output") index += 1 try: if prepare_net: libv_net = utlv.LibvirtNetwork("vnet", address=net_addr, net_name=net_name, persistent=True) # Destroy the network if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name) logging.info("Defined network %s", net_name) if net_event_list: net_event_option += " --list" if net_event_loop: net_event_option += " --loop" if net_event_timestamp: net_event_option += " --timestamp" if not status_error and not net_event_list: # Assemble the net-event command net_event_cmd = "net-event %s" % net_event_option if net_name: net_event_cmd += " --network %s" % net_name if net_event_name: net_event_cmd += " --event %s" % net_event_name if net_event_timeout: net_event_cmd += " --timeout %s" % net_event_timeout if not status_error: net_event_timeout = int(net_event_timeout) # Run the command in a new virsh session, then waiting for # 'lifecycle' events logging.info("Sending '%s' to virsh shell", net_event_cmd) virsh_session.sendline(net_event_cmd) else: result = virsh.net_event(network=net_name, event=net_event_name, event_timeout=net_event_timeout, options=net_event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) if not status_error: # Verify 'lifecycle' events if not net_event_list and net_event_name == 'lifecycle': expected_event_list = trigger_net_event(net_event_amount) if net_event_timeout: # Make sure net-event will timeout on time time.sleep(net_event_timeout) elif net_event_loop: virsh_session.send_ctrl("^C") net_event_interrupt = True ret_output = virsh_session.get_stripped_output() check_output(ret_output, expected_event_list) finally: virsh_session.close() if libv_net: libv_net.cleanup()
if test_ipv4_address: check_ipt_rules(check_ipv4=True) run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not (start_error or restart_error): test.fail('VM failed to start:\n%s' % details) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync() if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + "=%s" % original_accept_ra)
def run_virsh_net_list(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") result = virsh.net_list(option, extra, ignore_status=True) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") nic_mac = params.get("nic_mac", "") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") exist_bug = params.get("exist_bug") def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} ipxml = network_xml.IPXML() ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end} netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(zip(keys, values))) return leases except: raise error.TestError("Fail to parse output: %s" % output) def get_ip_by_mac(mac_addr, try_dhclint=False): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login() def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn( "Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: raise error.TestFail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: raise error.TestFail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: raise error.TestFail("Address '%s' is not expected" % iface_ip) if expected_find and not find_mac: raise error.TestFail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() # Remove all interfaces of the VM if vm.is_alive(): vm.destroy(gracefully=False) vm.free_mac_address(0) vmxml.remove_all_device_by_type("interface") # Create new network if prepare_net: create_network() nets = virsh.net_state_dict() if net_name not in nets.keys() and not status_error: raise error.TestError("Not find network '%s'" % net_name) expected_find = False try: result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --source %s --mac %s" % (net_name, iface_mac) if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.start() else: vm.start() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True) # Allocate IP address for the new interface may fail, so only # check the result if get new IP address if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) finally: if exist_bug: logging.warn("Case may failed as bug: %s", BUG_URL % exist_bug) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run_virsh_net_destroy(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") # Confirm the network exists. output_all = virsh.net_list("--all").stdout.strip() if not re.search(network_name, output_all): raise error.TestNAError("Make sure the network exists!!") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name # Get status of network and prepare network status. network_current_status = "active" try: if not virsh.net_state_dict()[network_name]['active']: network_current_status = "inactive" if network_status == "active": virsh.net_start(network_name) else: if network_status == "inactive": virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Prepare network status failed!") status = virsh.net_destroy(net_ref, extra, ignore_status=True).exit_status # Confirm the network has been destroied. if virsh.net_state_dict()[network_name]['active']: status = 1 # Recover network status try: if (network_current_status == "active" and not virsh.net_state_dict()[network_name]['active']): virsh.net_start(network_name) if (network_current_status == "inactive" and virsh.net_state_dict()[network_name]['active']): virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Since 3.3.0, Coalesce setting is supported. This case is to set coalesce and check for 4 network types and each guest interface type. Only network/bridge guest interface type take effect for setting interface coalesce. For each host network type, guest can use bridge/network interface type to set coalesce except macvtap network type. Execute 'ethtool -c ${interface}|grep "rx-frames"' and anylize the output to check whether coalesce setting take effect or not. For macvtap network type, guest can start but query coalesce will fail. 1. For default host network network definition is below: <network> <name>default</name> <bridge name="virbr0"/> <forward/> <ip address="192.168.122.1" netmask="255.255.255.0"> <dhcp> <range start="192.168.122.2" end="192.168.122.254"/> </dhcp> </ip> </network> This is default network. 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:a7:4d:f7'/> <source bridge='virbr0'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: <interface type='network'> <source network='default'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 2. For bridge host network This mode need true bridge in host network. 'nmcli con add type bridge con-name br0 ifname br0' 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:8e:f3:6f'/> <source bridge='br0'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: First, define one virtual network for bridge br0 'virsh net-define net-br0.xml' 'virsh net-dumpxml net-br0' <network> <name>net-br0</name> <forward mode='bridge'/> <bridge name='br0'/> </network> Secondly, use this network for guest <interface type='network'> <source network='net-br0'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 3. For openvswitch bridge host network This mode need true openvswitch bridge in host network. 'ovs-vsctl add-br ovsbr0' 1) guest interface type 'bridge' and set coalesce: <interface type='bridge'> <mac address='52:54:00:8e:f3:6f'/> <source bridge='ovsbr0'/> <virtualport type='openvswitch'/> <target dev='vnet0'/> <model type='virtio'/> <coalesce> <rx> <frames max='64'/> </rx> </coalesce> <alias name='net0'/> <address type='pci' domain='0x0000' bus='0x00' slot='0x0a' function='0x0'/> </interface> 2) guest interface type 'network' and set coalesce: First, define one virtual network for openvswitch bridge ovsbr0 'virsh net-define net-ovsbr0.xml' 'virsh net-dumpxml net-ovsbr0' <network> <name>net-ovs0</name> <forward mode='bridge'/> <bridge name='ovsbr0'/> <virtualport type='openvswitch'/> </network> Secondly, use this network for guest <interface type='network'> <source network='net-ovs0'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> </interface> 4. For macvtap bridge mode on host network For this mode, first, create one virtual network. Note, should set dev to one ture physical interface. 'virsh net-define net-br-macvtap.xml' 'virsh net-dumpxml net-br-macvtap' <network> <name>net-br-macvtap</name> <forward dev='eno1' mode='bridge'> <interface dev='eno1'/> </forward> </network> Set guest to use this macvtap network and set coalesc <interface type='network'> <mac address='52:54:00:6e:f4:f1'/> <source network='net-br-macvtap'/> <model type='virtio'/> <coalesce> <rx> <frames max='32'/> </rx> </coalesce> <address type='pci' domain='0x0000' bus='0x00' slot='0x03' function='0x0'/> </interface> Steps in this test: 1. Prepare test environment,destroy or suspend a VM. 2. Prepare network if necessary. 3. Edit guest interface with definite network and set coalesce. 4. Start guest and check whether coalesce setting take effect. 5. Recover network and guest. """ if not libvirt_version.version_compare(3, 3, 0): test.skip("Coalesce setting is only supported by libvirt3.3.0 and above") vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def get_first_phy_iface(): """ Get first physical network interface from output of 'ls /sys/class/net' #ls /sys/class/net eno1 lo virbr0 virbr0-nic :return: interface name """ interface = '' lines = process.run('ls /sys/class/net').stdout_text.splitlines() interfaces = lines[0].split() for iface in interfaces: if iface != 'lo' and 'vir' not in iface: interface = iface break if interface == '': test.fail("There is no physical network interface") return interface def modify_iface_xml(): """ Modify interface xml options Two methods to modify domain interfae: 1. modify guest xml, define it 2. attach one interface for running guest :return: 0 for successful negative case test.fail is fail for positive/negative case None for successful positive case """ if hotplug_iface: iface = Interface(iface_type) else: vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] if iface_type == 'network': iface.type_name = iface_type source = {iface_type: net_name} elif iface_type == 'bridge' and bridge_name: iface.type_name = iface_type source = {iface_type: bridge_name} elif iface_type == 'direct': iface.type_name = iface_type source = {'dev': interface, 'mode': 'bridge'} if source: del iface.source iface.source = source iface_model = params.get("iface_model", "virtio") iface.model = iface_model iface.coalesce = {'max': coalesce_value} if network_type == "ovsbridge" and iface_type == "bridge": iface.virtualport_type = "openvswitch" if not hotplug_iface: vmxml.devices = xml_devices vmxml.xmltreefile.write() try: vmxml.sync() except xcepts.LibvirtXMLError as details: if status_error: # Expect error for negetive test return 0 else: test.fail("Define guest: FAIL") else: if not vm.is_alive(): vm.start() # Wait guest boot completely time.sleep(2) try: ret = virsh.attach_device(vm_name, iface.xml, ignore_status=False, debug=True) except process.CmdError as error: if status_error: # Expect error for negetive test return 0 else: test.fail("Define guest: FAIL") start_error = "yes" == params.get("start_error", "no") status_error = "yes" == params.get("status_error", "no") # Get coalesce value. expect_coalesce = params.get("expect_coalesce", "") coalesce_value = params.get("coalesce", "15") if expect_coalesce == '': expect_coalesce = coalesce_value # Network specific attributes. network_type = params.get("network_type", "default") net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") # Get guest interface type iface_type = params.get("iface_type", "network") # Whether attach interface hotplug_iface = "yes" == params.get("hotplug_iface", "no") error_info = params.get("error_info", "") # Destroy the guest first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) # Build the xml and run test. try: interface = get_first_phy_iface() # Prepare network for specific network type. # Create bridge/ovsbridge for host bridge/ovsbridge network type if network_type == "default" and iface_type == "bridge": bridge_name = "virbr0" elif network_type == "bridge": bridge_name = eval(net_bridge)['name'] bridge = utils_net.Bridge() # Try to add bridge if not exist if bridge_name not in bridge.list_br(): bridge.add_bridge(bridge_name) elif network_type == 'ovsbridge': bridge_name = eval(net_bridge)['name'] # Try to add ovs bridge if not exist if not utils_net.ovs_br_exists(bridge_name): utils_net.add_ovs_bridge(bridge_name) if iface_type == "network": # Define virtual network if not exist for 'network' type of guest interface network = NetworkXML() network.name = net_name # Prepare virtual network required parameters params['net_forward'] = "{'mode':'bridge'}" if network_type == "ovsbridge": params['net_virtualport'] = "openvswitch" if network_type == "macvtap": # For bridge type of macvtap network, one true physical interface shold be added # Check whether physical interface has been added into one bridge. if yes, skip macvtap test # If interface already added to a bridge, the output of the nmcli # command will include "connection.slave-type: bridge" out = process.run('nmcli dev show %s' % interface).stdout_text con_l = re.findall(r'GENERAL.CONNECTION:(.+?)\n', out) if not con_l: test.cancel("no connection for the interface") else: con = con_l[0].strip() if "bridge" not in process.run('nmcli con show "%s"' % con).stdout_text: params['forward_iface'] = interface params['net_forward'] = "{'mode':'bridge', 'dev': '%s'}" % interface else: test.cancel("interface %s has been added into one brige, but macvtap" "need also add this interface, so current network can't" "suit macvtap testing" % interface) if not network.exists(): netxml = libvirt.create_net_xml(net_name, params) netxml.define() netxml.start() virsh.net_dumpxml(network.name, debug=True) # Edit the interface xml. # For successful negative case, return 0 to specify PASS result directly ret = modify_iface_xml() if ret == 0: return 0 try: # Get all interface link_before = set(process.run('ls /sys/class/net').stdout_text.splitlines()) # Start the VM. vm.start() if start_error: raise test.fail("VM started unexpectedly") # Get guest virtual network interface link_after = set(process.run('ls /sys/class/net').stdout_text.splitlines()) newinterface = (link_after - link_before).pop() out = process.run('ethtool -c %s' % newinterface, ignore_status=True) if network_type == 'macvtap': # Currently, output coalesce for macvtap is not supported err_msg = "Cannot get device coalesce settings: Operation not supported" std_msg = "Coalesce parameters for %s:" % newinterface if err_msg not in out.stderr_text or std_msg not in out.stdout_text: test.fail("coalesce setting on %s failed." % network_type) else: # Get coalesce value and check it is true if out.exit_status == 0: for line in out.stdout_text.splitlines(): if 'rx-frames:' in line: coalesce = line.split(':')[1].strip() if expect_coalesce != coalesce: test.fail("coalesce setting failed for %s" % network_type) break else: test.fail("coalesce setting on %s failed." % network_type) except virt_vm.VMStartError as details: logging.info(str(details)) if not start_error: test.fail('VM failed to start:\n%s' % details) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) # Try to recovery bridge if network_type == "bridge" and bridge_name: if bridge_name in bridge.list_br(): bridge.del_bridge(bridge_name) elif network_type == "ovsbridge" and bridge_name: if utils_net.ovs_br_exists(bridge_name): utils_net.del_ovs_bridge(bridge_name) vmxml_backup.sync()
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_misc.yum_install(pkg_list): raise error.TestNAError("Failed ot install " "required packages") boot_initrd = params.get("boot_initrd") boot_vmlinuz = params.get("boot_vmlinuz") # Download pxe boot images utils.run("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) utils.run("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) utils.run("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root)) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt) def modify_iface_xml(): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if pxe_boot: # Config boot console for pxe boot osxml = vm_xml.VMOSXML() osxml.type = vmxml.os.type osxml.arch = vmxml.os.arch osxml.machine = vmxml.os.machine osxml.loader = "/usr/share/seabios/bios.bin" osxml.bios_useserial = "yes" osxml.bios_reboot_timeout = "-1" osxml.boots = ['network'] del vmxml.os vmxml.os = osxml xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] iface_bandwidth = {} iface_inbound = eval(iface_bandwidth_inbound) iface_outbound = eval(iface_bandwidth_outbound) if iface_inbound: iface_bandwidth["inbound"] = iface_inbound if iface_outbound: iface_bandwidth["outbound"] = iface_outbound if iface_bandwidth: bandwidth = iface.new_bandwidth(**iface_bandwidth) iface.bandwidth = bandwidth iface_source = params.get("iface_source") if iface_source: source = eval(iface_source) if source: iface.source = source logging.debug("New interface xml file: %s", iface) vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def run_dnsmasq_default_test(key, value=None, exists=True): """ Test dnsmasq configuration. """ conf_file = "/var/lib/libvirt/dnsmasq/default.conf" configs = "" with open(conf_file) as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if value: config = "%s=%s" % (key, value) else: config = key if not configs.count(config): if exists: raise error.TestFail("Can't find %s=%s in configuration" " file" % (key, value)) else: if not exists: raise error.TestFail("Found %s=%s in configuration" " file" % (key, value)) def run_dnsmasq_addnhosts_test(hostip, hostnames): """ Test host ip and names configuration """ conf_file = "/var/lib/libvirt/dnsmasq/default.addnhosts" hosts_re = ".*".join(hostnames) configs = "" with open(conf_file) as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not re.search(r"%s.*%s" % (hostip, hosts_re), configs, re.M): raise error.TestFail("Can't find '%s' in configuration" " file" % hostip) def run_dnsmasq_host_test(iface_mac, guest_ip, guest_name): """ Test host name and ip configuration for dnsmasq """ conf_file = "/var/lib/libvirt/dnsmasq/default.hostsfile" config = "%s,%s,%s" % (iface_mac, guest_ip, guest_name) configs = "" with open(conf_file) as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not configs.count(config): raise error.TestFail("Can't find host configuration" " in file %s" % conf_file) def check_class_rules(ifname, rule_id, bandwidth): """ Check bandwidth settings via 'tc class' output """ cmd = "tc class show dev %s" % ifname class_output = utils.run(cmd).stdout logging.debug("Bandwidth class output: %s", class_output) class_pattern = (r"class htb %s.*rate (\d+)Kbit ceil" " (\d+)Kbit burst (\d+)(K?M?)b.*" % rule_id) se = re.search(class_pattern, class_output, re.M) if not se: raise error.TestFail("Can't find outbound setting" " for htb %s" % rule_id) logging.debug("bandwidth from tc output:%s" % str(se.groups())) ceil = None if bandwidth.has_key("floor"): ceil = int(bandwidth["floor"]) * 8 elif bandwidth.has_key("average"): ceil = int(bandwidth["average"]) * 8 if ceil: assert int(se.group(1)) == ceil if bandwidth.has_key("peak"): assert int(se.group(2)) == int(bandwidth["peak"]) * 8 if bandwidth.has_key("burst"): if se.group(4) == 'M': tc_burst = int(se.group(3)) * 1024 else: tc_burst = int(se.group(3)) assert tc_burst == int(bandwidth["burst"]) def check_filter_rules(ifname, bandwidth): """ Check bandwidth settings via 'tc filter' output """ cmd = "tc -d filter show dev %s parent ffff:" % ifname filter_output = utils.run(cmd).stdout logging.debug("Bandwidth filter output: %s", filter_output) if not filter_output.count("filter protocol all pref"): raise error.TestFail("Can't find 'protocol all' settings" " in filter rules") filter_pattern = ".*police.*rate (\d+)Kbit burst (\d+)Kb.*" se = re.search(r"%s" % filter_pattern, filter_output, re.M) if not se: raise error.TestFail("Can't find any filter policy") logging.debug("bandwidth from tc output:%s" % str(se.groups())) if bandwidth.has_key("average"): assert int(se.group(1)) == int(bandwidth["average"]) * 8 if bandwidth.has_key("burst"): assert int(se.group(2)) == int(bandwidth["burst"]) def run_bandwidth_test(check_net=False, check_iface=False): """ Test bandwidth option for network or interface by tc command. """ iface_inbound = eval(iface_bandwidth_inbound) iface_outbound = eval(iface_bandwidth_outbound) net_inbound = eval(net_bandwidth_inbound) net_outbound = eval(net_bandwidth_outbound) net_bridge_name = eval(net_bridge)["name"] iface_name = libvirt.get_ifname_host(vm_name, iface_mac) try: if check_net and net_inbound: # Check qdisc rules cmd = "tc -d qdisc show dev %s" % net_bridge_name qdisc_output = utils.run(cmd).stdout logging.debug("Bandwidth qdisc output: %s", qdisc_output) if not qdisc_output.count("qdisc ingress ffff:"): raise error.TestFail("Can't find ingress setting") check_class_rules(net_bridge_name, "1:1", {"average": net_inbound["average"], "peak": net_inbound["peak"]}) check_class_rules(net_bridge_name, "1:2", net_inbound) # Check filter rules on bridge interface if check_net and net_outbound: check_filter_rules(net_bridge_name, net_outbound) # Check class rules on interface inbound settings if check_iface and iface_inbound: check_class_rules(iface_name, "1:1", {'average': iface_inbound['average'], 'peak': iface_inbound['peak'], 'burst': iface_inbound['burst']}) if iface_inbound.has_key("floor"): check_class_rules(net_bridge_name, "1:3", {'floor': iface_inbound["floor"]}) # Check filter rules on interface outbound settings if check_iface and iface_outbound: check_filter_rules(iface_name, iface_outbound) except AssertionError: utils.log_last_traceback() raise error.TestFail("Failed to check network bandwidth") def check_name_ip(session): """ Check dns resolving on guest """ # Check if bind-utils is installed if not utils_misc.yum_install(['bind-utils'], session): raise error.TestNAError("Failed to install bind-utils" " on guest") # Run host command to check if hostname can be resolved if not guest_ipv4 and not guest_ipv6: raise error.TestFail("No ip address found from parameters") guest_ip = guest_ipv4 if guest_ipv4 else guest_ipv6 cmd = "host %s | grep %s" % (guest_name, guest_ip) if session.cmd_status(cmd): raise error.TestFail("Can't resolve name %s on guest" % guest_name) def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = eval(net_bridge)["name"] net_forward = eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") ipt_rules = ("FORWARD -i {0} -o {0} -j ACCEPT".format(br_name), "FORWARD -o %s -j REJECT --reject-with icmp" % br_name, "FORWARD -i %s -j REJECT --reject-with icmp" % br_name) net_dev_in = "" net_dev_out = "" if net_forward.has_key("dev"): net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] if check_ipv4: ipv4_rules = list(ipt_rules) ctr_rule = "" nat_rules = [] if net_forward.has_key("mode") and net_forward["mode"] == "nat": nat_port = eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m conntrack --ctstate RELATED,ESTABLISHED" nat_rules = ["POSTROUTING -s %s -d 224.0.0.0/24 -j RETURN" % net_ipv4, "POSTROUTING -s %s -d 255.255.255.255/32 -j RETURN" % net_ipv4, ("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp" " -j MASQUERADE".format(net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and net_forward.has_key("mode") and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT" % (net_ipv4, net_dev_in, br_name, ctr_rule)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = utils.run("iptables-save").stdout.strip() logging.debug("iptables: %s", output) for ipt in ipv4_rules: if not output.count(ipt): raise error.TestFail("Can't find iptable rule:\n%s" % ipt) if check_ipv6: ipv6_rules = list(ipt_rules) if (net_ipv6 and net_forward.has_key("mode") and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s -j ACCEPT" % (net_ipv6, net_dev_in, br_name)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = utils.run("ip6tables-save").stdout.strip() logging.debug("iptables: %s", output) for ipt in ipv6_rules: if not output.count(ipt): raise error.TestFail("Can't find ipbtable rule:\n%s" % ipt) def run_ip_test(session, ip_ver): """ Check iptables on host and ipv6 address on guest """ if ip_ver == "ipv6": # Clean up iptables rules for guest to get ipv6 address session.cmd_status("ip6tables -F") utils_net.restart_guest_network(session, iface_mac, ip_version=ip_ver) # It may take some time to get the ip address def get_ip_func(): return utils_net.get_guest_ip_addr(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 10) vm_ip = get_ip_func() logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: raise error.TestFail("Can't find ip address on guest") ping_cmd = "ping -c 5" ip_gateway = net_ip_address if ip_ver == "ipv6": ping_cmd = "ping6 -c 5" ip_gateway = net_ipv6_address if ip_gateway: if utils.system("%s %s" % (ping_cmd, ip_gateway), ignore_status=True): raise error.TestFail("Failed to ping gateway address: %s" % ip_gateway) def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if not utils_misc.yum_install(['libvirt'], session): raise error.TestNAError("Failed ot install libvirt" " package on guest") result = True # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*no'") if session.cmd_status(cmd): result = False logging.error("Default network isn't in inactive state") # Try to start default network on guest, check error messages if result: cmd = "virsh net-start default" status, output = session.cmd_status_output(cmd) logging.debug("Run command on guest exit %s, output %s" % (status, output)) if not status or not output.count("already in use"): result = False logging.error("Failed to see network messges on guest") if session.cmd_status("rpm -e libvirt"): logging.error("Failed to remove libvirt packages on guest") if not result: raise error.TestFail("Check libvirt network on guest failed") start_error = "yes" == params.get("start_error", "no") restart_error = "yes" == params.get("restart_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") net_domain = params.get("net_domain") net_ip_address = params.get("net_ip_address") net_ipv6_address = params.get("net_ipv6_address") net_dns_forward = params.get("net_dns_forward") net_dns_txt = params.get("net_dns_txt") net_dns_srv = params.get("net_dns_srv") net_dns_hostip = params.get("net_dns_hostip") net_dns_hostnames = params.get("net_dns_hostnames", "").split() dhcp_start_ipv4 = params.get("dhcp_start_ipv4") dhcp_end_ipv4 = params.get("dhcp_end_ipv4") guest_name = params.get("guest_name") guest_ipv4 = params.get("guest_ipv4") guest_ipv6 = params.get("guest_ipv6") tftp_root = params.get("tftp_root") pxe_boot = "yes" == params.get("pxe_boot", "no") net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}") net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}") iface_bandwidth_inbound = params.get("iface_bandwidth_inbound", "{}") iface_bandwidth_outbound = params.get("iface_bandwidth_outbound", "{}") multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") serial_login = "******" == params.get("serial_login", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_bridge = "yes" == params.get("test_bridge", "no") test_dnsmasq = "yes" == params.get("test_dnsmasq", "no") test_dhcp_range = "yes" == params.get("test_dhcp_range", "no") test_dns_host = "yes" == params.get("test_dns_host", "no") test_qos_bandwidth = "yes" == params.get("test_qos_bandwidth", "no") test_qos_remove = "yes" == params.get("test_qos_remove", "no") test_ipv4_address = "yes" == params.get("test_ipv4_address", "no") test_ipv6_address = "yes" == params.get("test_ipv6_address", "no") test_guest_libvirt = "yes" == params.get("test_guest_libvirt", "no") if serial_login: # Set serial console for serial login if vm.is_dead(): vm.start() session = vm.wait_for_login() # Set console option vm.set_kernel_console("ttyS0", "115200") # Shutdown here for sync fs vm.shutdown() else: if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vms_list = [] # Build the xml and run test. try: if test_dnsmasq: # Check the settings before modifying network xml if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed", exists=False) run_dnsmasq_default_test("local", "//", exists=False) if net_domain: run_dnsmasq_default_test("domain", net_domain, exists=False) run_dnsmasq_default_test("expand-hosts", exists=False) # Prepare pxe boot directory if pxe_boot: prepare_pxe_boot() # Edit the network xml or create a new one. if create_network: libvirt.create_net_xml(net_name, params) # Edit the interface xml. if change_iface_option: modify_iface_xml() if multiple_guests: # Clone more vms for testing for i in range(int(multiple_guests)): guest_name = "%s_%s" % (vm_name, i) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True) vms_list.append(vm.clone(guest_name)) if test_bridge: bridge = eval(net_bridge) br_if = utils_net.Interface(bridge['name']) if not br_if.is_up(): raise error.TestFail("Bridge interface isn't up") if test_dnsmasq: # Check the settings in dnsmasq config file if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed") run_dnsmasq_default_test("local", "//") if net_domain: run_dnsmasq_default_test("domain", net_domain) run_dnsmasq_default_test("expand-hosts") if net_bridge: bridge = eval(net_bridge) run_dnsmasq_default_test("interface", bridge['name']) if bridge.has_key('stp') and bridge['stp'] == 'on': if bridge.has_key('delay'): br_delay = float(bridge['delay']) cmd = ("brctl showstp %s | grep 'bridge forward delay'" % bridge['name']) out = utils.run(cmd, ignore_status=False).stdout.strip() logging.debug("brctl showstp output: %s", out) pattern = (r"\s*forward delay\s+(\d+.\d+)\s+bridge" " forward delay\s+(\d+.\d+)") match_obj = re.search(pattern, out, re.M) if not match_obj or len(match_obj.groups()) != 2: raise error.TestFail("Can't see forward delay" " messages from command") elif (float(match_obj.groups()[0]) != br_delay or float(match_obj.groups()[1]) != br_delay): raise error.TestFail("Foward delay setting" " can't take effect") if dhcp_start_ipv4 and dhcp_end_ipv4: run_dnsmasq_default_test("dhcp-range", "%s,%s" % (dhcp_start_ipv4, dhcp_end_ipv4)) if guest_name and guest_ipv4: run_dnsmasq_host_test(iface_mac, guest_ipv4, guest_name) if test_dns_host: if net_dns_txt: dns_txt = eval(net_dns_txt) run_dnsmasq_default_test("txt-record", "%s,%s" % (dns_txt["name"], dns_txt["value"])) if net_dns_srv: dns_srv = eval(net_dns_srv) run_dnsmasq_default_test("srv-host", "_%s._%s.%s,%s,%s,%s,%s" % (dns_srv["service"], dns_srv["protocol"], dns_srv["domain"], dns_srv["target"], dns_srv["port"], dns_srv["priority"], dns_srv["weight"])) if net_dns_hostip and net_dns_hostnames: run_dnsmasq_addnhosts_test(net_dns_hostip, net_dns_hostnames) # Run bandwidth test for network if test_qos_bandwidth: run_bandwidth_test(check_net=True) try: # Start the VM. vm.start() if start_error: raise error.TestFail("VM started unexpectedly") if pxe_boot: # Just check network boot messages here vm.serial_console.read_until_output_matches( ["Loading vmlinuz", "Loading initrd.img"], utils_misc.strip_console_codes) output = vm.serial_console.get_stripped_output() logging.debug("Boot messages: %s", output) else: if serial_login: session = vm.wait_for_serial_login() else: session = vm.wait_for_login() if test_dhcp_range: # First vm should have a valid ip address utils_net.restart_guest_network(session, iface_mac) vm_ip = utils_net.get_guest_ip_addr(session, iface_mac) logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: raise error.TestFail("Guest has invalid ip address") # Other vms cloudn't get the ip address for vms in vms_list: # Start other VMs. vms.start() sess = vms.wait_for_serial_login() vms_mac = vms.get_virsh_mac_address() # restart guest network to get ip addr utils_net.restart_guest_network(sess, vms_mac) vms_ip = utils_net.get_guest_ip_addr(sess, vms_mac) if vms_ip: # Get IP address on guest should return Null raise error.TestFail("Guest has ip address: %s" % vms_ip) sess.close() # Check dnsmasq settings if take affect in guest if guest_ipv4: check_name_ip(session) # Run bandwidth test for interface if test_qos_bandwidth: run_bandwidth_test(check_iface=True) if test_qos_remove: # Remove the bandwidth settings in network xml logging.debug("Removing network bandwidth settings...") netxml_backup.sync() vm.destroy(gracefully=False) # Should fail to start vm vm.start() if restart_error: raise error.TestFail("VM started unexpectedly") if test_ipv4_address: check_ipt_rules(check_ipv4=True) run_ip_test(session, "ipv4") if test_ipv6_address: check_ipt_rules(check_ipv6=True) run_ip_test(session, "ipv6") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError, details: logging.info(str(details)) if start_error or restart_error: pass else: raise error.TestFail('VM Failed to start for some reason!') finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync()
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("!!!make the network persistent") make_net_persistent(network_name) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug("!!!destroy the network as we need to test inactive") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name status = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug("transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_cfg_file, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: make_net_persistent(network_name) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") invalid_mac = "yes" == params.get("invalid_mac", "no") expect_msg = params.get("leases_err_msg") # upstream expect msg may change on new libvirt new_expect_msg = params.get("new_leases_err_msg") range_lease = eval(params.get("range_lease", "None")) host_lease = eval(params.get("host_lease", "None")) host = eval(params.get("host", "None")) invalid_lease = "yes" == params.get("invalid_lease", "no") blank_lease = "yes" == params.get("blank_lease", "no") if (host_lease or range_lease) and not libvirt_version.version_compare(6, 2, 0): test.cancel( "Don't support: libvirt support lease setting since 6.2.0!") # Generate a random string as the MAC address nic_mac = None if invalid_mac: nic_mac = utils_misc.generate_random_string(17) # Command won't fail on old libvirt if not libvirt_version.version_compare(1, 3, 1) and invalid_mac: logging.debug("Reset case to positive as BZ#1261432") status_error = False def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} range = network_xml.RangeXML() range.attrs = {'start': net_dhcp_start, "end": net_dhcp_end} ipxml = network_xml.IPXML() if range_lease: range.lease_attrs = range_lease ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = range if host: new_host = network_xml.DhcpHostXML() new_host.attrs = host new_host.lease_attrs = host_lease ipxml.hosts = [new_host] netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(list(zip(keys, values)))) return leases except Exception: test.error("Fail to parse output: %s" % output) def check_lease_time(ex_time, duration): """ Compare the expiry time from the virsh cmd output and the setting :param ex_time: text, the expiry time get from the net-dhcp-lease output :param duration: dict, the configured expiry time """ now_time = datetime.now() # convert the lease time from str to the datetime structure # lease is in format like: 2021-01-18 02:15:35 get_ex_time = datetime.strptime(ex_time, '%Y-%m-%d %H:%M:%S') if duration['expiry'] == '0': if get_ex_time > now_time: test.fail("The expiry time is not correct!!") if 'unit' not in duration: duration['unit'] = 'minutes' else: if duration['unit'] == 'seconds': dur_sec = int(duration['expiry']) elif duration['unit'] == 'hours': dur_sec = int(duration['expiry']) * 3600 else: dur_sec = int(duration['expiry']) * 60 delta = get_ex_time - now_time logging.debug( "The get_ex_time is %s, the now_time is %s, " "duration is %s", get_ex_time, now_time, duration) if delta > timedelta(seconds=dur_sec): test.fail("Get expiry time %s longer than the setting %s!!" % (delta, timedelta(seconds=dur_sec))) elif delta < timedelta(seconds=(dur_sec - 30)): test.fail("Get expiry time %s shorter than the setting %s" % (delta, timedelta(seconds=dur_sec))) else: logging.info("Get expected lease info.") return None def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login(login_nic_index, timeout=timeout, serial=True) def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn( "Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: test.fail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: test.fail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] expiry_time = net_lease['Expiry Time'] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: test.fail("Address '%s' is not expected" % iface_ip) #check if lease time is correct if libvirt_version.version_compare(6, 2, 0): if host_lease and net_mac == host['mac']: check_lease_time(expiry_time, host_lease) elif range_lease: check_lease_time(expiry_time, range_lease) if expected_find and not find_mac: test.fail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if vm.is_alive(): vm.destroy(gracefully=False) login_nic_index = 0 new_nic_index = 0 # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except # default net_state = virsh.net_state_dict(only_names=True) logging.debug( "current networks: %s, destroy and undefine networks " "except default!", net_state) for net in net_state: if net != "default": virsh.net_destroy(net) virsh.net_undefine(net) cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'" pid_list = process.run(cmd, shell=True).stdout_text.strip().splitlines() logging.debug(pid_list) for pid in pid_list: utils_misc.safe_kill(pid, signal.SIGKILL) try: # Create new network if prepare_net: nets_old = virsh.net_state_dict() if net_name in list(nets_old.keys()): virsh.net_destroy(net_name) virsh.net_undefine(net_name) create_network() nets = virsh.net_state_dict() if net_name not in list(nets.keys()) and not status_error: test.error("Not find network '%s'" % net_name) expected_find = False result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: if host: iface_mac = host['mac'] else: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --model virtio --source %s --mac %s" \ % (net_name, iface_mac) nic_params = { 'mac': iface_mac, 'nettype': 'bridge', 'ip_version': 'ipv4' } login_timeout = 120 if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) vm.start() new_nic_index = vm.get_nic_index_by_mac(iface_mac) if new_nic_index > 0: login_nic_index = new_nic_index else: vm.start() # wait for VM start before hotplug interface vm.wait_for_serial_login() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) # As VM already started, so the login timeout could be shortened login_timeout = 10 new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True, timeout=login_timeout) if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) else: if expect_msg: utlv.check_result(result, expect_msg.split(';')) except LibvirtXMLError as e: if status_error and invalid_lease: if blank_lease and libvirt_version.version_compare(7, 1, 0): expect_msg = new_expect_msg if expect_msg not in e.details: test.fail("Network create fail unexpected: %s" % e.details) else: logging.debug("Network create fail expected: %s", e.details) finally: # Delete the new attached interface if new_nic_index > 0: vm.del_nic(new_nic_index) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") invalid_mac = "yes" == params.get("invalid_mac", "no") expect_msg = params.get("leases_err_msg") # Generate a random string as the MAC address nic_mac = None if invalid_mac: nic_mac = utils_misc.generate_random_string(17) # Command won't fail on old libvirt if not libvirt_version.version_compare(1, 3, 1) and invalid_mac: logging.debug("Reset case to positive as BZ#1261432") status_error = False def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} ipxml = network_xml.IPXML() ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end} netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(list(zip(keys, values)))) return leases except Exception: test.error("Fail to parse output: %s" % output) def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login(login_nic_index, timeout=timeout, serial=True) def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn( "Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: test.fail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: test.fail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: test.fail("Address '%s' is not expected" % iface_ip) if expected_find and not find_mac: test.fail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if vm.is_alive(): vm.destroy(gracefully=False) login_nic_index = 0 new_nic_index = 0 # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except # default net_state = virsh.net_state_dict(only_names=True) logging.debug( "current networks: %s, destroy and undefine networks " "except default!", net_state) for net in net_state: if net != "default": virsh.net_destroy(net) virsh.net_undefine(net) cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'" pid_list = process.run(cmd, shell=True).stdout_text.strip().splitlines() logging.debug(pid_list) for pid in pid_list: utils_misc.safe_kill(pid, signal.SIGKILL) # Create new network if prepare_net: create_network() nets = virsh.net_state_dict() if net_name not in list(nets.keys()) and not status_error: test.error("Not find network '%s'" % net_name) expected_find = False try: result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --model virtio --source %s --mac %s" \ % (net_name, iface_mac) nic_params = { 'mac': iface_mac, 'nettype': 'bridge', 'ip_version': 'ipv4' } login_timeout = 120 if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) vm.start() new_nic_index = vm.get_nic_index_by_mac(iface_mac) if new_nic_index > 0: login_nic_index = new_nic_index else: vm.start() # wait for VM start before hotplug interface vm.wait_for_serial_login() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) # As VM already started, so the login timeout could be shortened login_timeout = 10 new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True, timeout=login_timeout) # Allocate IP address for the new interface may fail, so only # check the result if get new IP address if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) else: if expect_msg: utlv.check_result(result, expect_msg.split(';')) finally: # Delete the new attached interface if new_nic_index > 0: vm.del_nic(new_nic_index) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run_virsh_net_dumpxml(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(test.tmpdir, xml_flie) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: raise error.TestError("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: raise error.TestError("Network destroied failed!") result = virsh.net_dumpxml(net_ref, extra, network_xml, ignore_status=True) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()): status_error = "yes" break finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: raise error.TestError("Network started failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") if err == "": raise error.TestFail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command!") if valid_s != 0: raise error.TestFail("Command output is invalid!") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test mtu feature from virtual network """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) mtu_type = params.get('mtu_type') mtu_size = params.get('mtu_size', '') net = params.get('net', DEFAULT_NET) net_type = params.get('net_type', '') with_iface = 'yes' == params.get('with_iface', 'no') with_net = 'yes' == params.get('with_net', 'no') status_error = 'yes' == params.get('status_error', 'no') check = params.get('check', '') error_msg = params.get('error_msg', '') bridge_name = 'br_mtu' + utils_misc.generate_random_string(3) add_pkg = params.get('add_pkg', '') model = params.get('model', 'virtio') def set_network(size, net='default'): """ Set mtu size to a certain network """ logging.info('Set mtu size of network "%s" to %s', net, size) default_xml = NetworkXML.new_from_net_dumpxml(net) default_xml.mtu = size default_xml.sync() logging.debug(virsh.net_dumpxml(net)) def set_interface(mtu_size='', source_network='default', iface_type='network', iface_model='virtio'): """ Set mtu size to a certain interface """ interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type iface_dict = { 'type': interface_type, 'source': "{'%s': '%s'}" % (interface_type, source_network), 'model': iface_model } if iface_type == 'openvswitch': iface_dict.update({'virtualport_type': 'openvswitch'}) if mtu_size: iface_dict.update({'mtu': "{'size': %s}" % mtu_size}) libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict) logging.debug(virsh.dumpxml(vm_name).stdout) def get_default_if(): """ Get default interface that is using by vm """ ifaces = utils_net.get_sorted_net_if() logging.debug('Interfaces on host: %s', ifaces) for iface in ifaces[0]: if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text: logging.debug('Found host interface "%s"', iface) return iface def create_bridge(): """ Create a bridge on host for test """ cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s' con_name = 'con_' + utils_misc.generate_random_string(3) bridge_name = 'br_' + utils_misc.generate_random_string(3) process.run(cmd_create_br % (con_name, bridge_name), verbose=True) return con_name, bridge_name def create_network_xml(name, network_type, base_if='', **kwargs): """ Create a network xml to be defined """ m_net = NetworkXML(name) m_net.forward = {'mode': 'bridge'} if network_type in ('bridge', 'openvswitch'): m_net.bridge = {'name': kwargs['bridge_name']} elif network_type == 'macvtap': if base_if: m_net.forward_interface = [{'dev': base_if}] if network_type == 'openvswitch': m_net.virtualport_type = 'openvswitch' if 'mtu' in kwargs: m_net.mtu = kwargs['mtu'] logging.debug(m_net) return m_net.xml def create_iface(iface_type, **kwargs): """ Create a interface to be attached to vm """ m_iface = Interface(iface_type) m_iface.mac_address = utils_net.generate_mac_address_simple() if 'base_if' in kwargs: m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'} if 'source_net' in kwargs: m_iface.source = {'network': kwargs['source_net']} if 'mtu' in kwargs: m_iface.mtu = {'size': kwargs['mtu']} if 'model_net' in kwargs: m_iface.model = kwargs['model_net'] logging.debug(m_iface.get_xml()) logging.debug(m_iface) return m_iface def check_mtu(mtu_size, qemu=False): """ Check if mtu meets expectation on host """ error = '' live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = live_vmxml.get_devices('interface')[0] logging.debug(iface_xml.target) dev = iface_xml.target['dev'] ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev, shell=True, verbose=True).stdout_text if 'mtu %s' % mtu_size in ifconfig_info: logging.info('PASS on ifconfig check for vnet.') else: error += 'Fail on ifconfig check for vnet.' if qemu: qemu_mtu_info = process.run('ps aux|grep qemu-kvm', shell=True, verbose=True).stdout_text if 'host_mtu=%s' % mtu_size in qemu_mtu_info: logging.info('PASS on qemu cmd line check.') else: error += 'Fail on qemu cmd line check.' if error: test.fail(error) def check_mtu_in_vm(fn_login, mtu_size): """ Check if mtu meets expectations in vm """ session = fn_login() check_cmd = 'ifconfig' output = session.cmd(check_cmd) session.close() logging.debug(output) if 'mtu %s' % mtu_size not in output: test.fail('MTU check inside vm failed.') else: logging.debug("MTU check inside vm passed.") try: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET) if add_pkg: add_pkg = add_pkg.split() utils_package.package_install(add_pkg) if 'openvswitch' in add_pkg: br = 'ovsbr0' + utils_misc.generate_random_string(3) process.run('systemctl start openvswitch.service', shell=True, verbose=True) process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True) process.run('ovs-vsctl show', shell=True, verbose=True) if not check or check in ['save', 'managedsave', 'hotplug_save']: # Create bridge or network and set mtu iface_type = 'network' if net_type in ('bridge', 'openvswitch'): if net_type == 'bridge': params['con_name'], br = create_bridge() if mtu_type == 'network': test_net = create_network_xml( bridge_name, net_type, bridge_name=br ) virsh.net_create(test_net, debug=True) virsh.net_dumpxml(bridge_name, debug=True) if mtu_type == 'interface': iface_type = net_type bridge_name = br elif net_type == 'network': if mtu_type == 'network': set_network(mtu_size) iface_mtu = 0 if mtu_type == 'interface': iface_mtu = mtu_size if mtu_type == 'network' and with_iface: mtu_size = str(int(mtu_size)//2) iface_mtu = mtu_size source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default' # set mtu in vm interface set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model) vm.start() vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login vm_login().close() check_qemu = True if mtu_type == 'interface' else False # Test mtu after save vm if check in ('save', 'hotplug_save'): if check == 'hotplug_save': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) params['mac'] = iface.mac_address virsh.attach_device(vm_name, iface.xml, debug=True) virsh.dumpxml(vm_name, debug=True) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] not in str(dom_xml): test.fail('Failed to attach interface with mtu') save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save') virsh.save(vm_name, save_path, debug=True) virsh.restore(save_path, debug=True) if check == 'managedsave': virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check in both host and vm check_mtu(mtu_size, check_qemu) check_mtu_in_vm(vm_login, mtu_size) vm_login(timeout=60).close() if check == 'hotplug_save': virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True) time.sleep(5) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] in str(dom_xml): test.fail('Failed to detach interface with mtu after save-restore') else: hotplug = 'yes' == params.get('hotplug', 'False') if check == 'net_update': result = virsh.net_update( DEFAULT_NET, 'modify', 'mtu', '''"<mtu size='%s'/>"''' % mtu_size, debug=True ) if check in ('macvtap', 'bridge_net', 'ovswitch_net'): base_if = get_default_if() macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3) # Test mtu in different type of network if mtu_type == 'network': if check == 'macvtap': test_net = create_network_xml(macv_name, 'macvtap', base_if, mtu=mtu_size) if check == 'bridge_net': params['con_name'], br = create_bridge() test_net = create_network_xml( bridge_name, 'bridge', mtu=mtu_size, bridge_name=br ) if check == 'ovswitch_net': test_net = create_network_xml( bridge_name, 'openvswitch', mtu=mtu_size, bridge_name=br ) if 'net_create' in params['id']: result = virsh.net_create(test_net, debug=True) if 'net_define' in params['id']: result = virsh.net_define(test_net, debug=True) # Test mtu with or without a binding network elif mtu_type == 'interface': vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_net: test_net = create_network_xml(macv_name, 'macvtap', base_if) virsh.net_create(test_net, debug=True) iface = create_iface('network', source_net=macv_name, mtu=mtu_size) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) vmxml.sync() result = virsh.start(vm_name) else: iface = create_iface('direct', base_if=base_if, mtu=mtu_size) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) result = virsh.define(vmxml.xml, debug=True) if check == 'invalid_val': iface = create_iface('network', source_net='default', mtu=mtu_size) result = virsh.attach_device(vm_name, iface.xml, debug=True) # Check result libvirt.check_exit_status(result, status_error) libvirt.check_result(result, [error_msg]) finally: bk_xml.sync() bk_netxml.sync() if 'test_net' in locals(): virsh.net_destroy(bridge_name, debug=True) if params.get('con_name'): process.run('nmcli con del %s' % params['con_name'], verbose=True) if add_pkg: process.run("ovs-vsctl del-br %s" % br, verbose=True) utils_package.package_remove(add_pkg)
def run(test, params, env): """ Test command: virsh net-autostart. """ # Gather test parameters status_error = "yes" == params.get("status_error", "no") net_ref = params.get("net_autostart_net_ref", "netname") disable = "yes" == params.get("net_autostart_disable", "no") extra = params.get("net_autostart_extra", "") # extra cmd-line params. net_name = params.get("net_autostart_net_name", "autotest") net_transient = "yes" == params.get("net_transient", "no") readonly = ("yes" == params.get("readonly", "no")) sim_reboot = "yes" == params.get("sim_reboot", "no") # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict() backup_state = virsh.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Generate our own bridge # First check if a bridge of this name already exists try: _ = backup[net_name] except (KeyError, AttributeError): pass # Not found - good else: test.cancel("Found network bridge '%s' - skipping" % net_name) # Define a very bare bones bridge, don't provide UUID - use whatever # libvirt ends up generating. We need to define a persistent network # since we'll be looking to restart libvirtd as part of this test. # # This test cannot use the 'default' bridge (virbr0) since undefining # it causes issues for libvirtd restart since it's expected that a # default network is defined # temp_bridge = """ <network> <name>%s</name> <bridge name="vir%sbr0"/> </network> """ % (net_name, net_name) try: test_xml = network_xml.NetworkXML(network_name=net_name) test_xml.xml = temp_bridge test_xml.define() except xcepts.LibvirtXMLError as detail: test.error("Failed to define a test network.\n" "Detail: %s." % detail) # To guarantee cleanup will be executed try: # Run test case # Get the updated list and make sure our new bridge exists currents = network_xml.NetworkXML.new_all_networks_dict() current_state = virsh.net_state_dict() logging.debug("Current network(s): %s", current_state) try: testbr_xml = currents[net_name] except (KeyError, AttributeError): test.error("Did not find newly defined bridge '%s'" % net_name) # Prepare default property for network # Transient network can not be set autostart # So confirm persistent is true for test testbr_xml['persistent'] = True # Set network to inactive # Since we do not reboot host to check(instead of restarting libvirtd) # If default network is active, we cannot check "--disable". # Because active network will not be inactive after restarting libvirtd # even we set autostart to False. While inactive network will be active # after restarting libvirtd if we set autostart to True testbr_xml['active'] = False # Prepare options and arguments if net_ref == "netname": net_ref = testbr_xml.name elif net_ref == "netuuid": net_ref = testbr_xml.uuid if disable: net_ref += " --disable" if net_transient: # make the network to transient and active ret = virsh.net_start(net_name) libvirt.check_exit_status(ret) ret = virsh.net_undefine(net_name) libvirt.check_exit_status(ret) logging.debug("after make it transistent: %s" % virsh.net_state_dict()) # Run test case # Use function in virsh module directly for both normal and error test result = virsh.net_autostart(net_ref, extra, readonly=readonly) status = result.exit_status if status: logging.debug("autostart cmd return:\n%s" % result.stderr.strip()) else: logging.debug("set autostart succeed: %s" % virsh.net_state_dict()) # Check if autostart or disable is successful with libvirtd restart. # TODO: Since autostart is designed for host reboot, # we'd better check it with host reboot. autostart_file = '/var/run/libvirt/network/autostarted' check_version = libvirt_version.version_compare(5, 6, 0) if check_version and os.path.exists(autostart_file): logging.debug("the sim_reboot is %s" % sim_reboot) if sim_reboot: os.unlink(autostart_file) utils_libvirtd.libvirtd_restart() # Reopen testbr_xml currents = network_xml.NetworkXML.new_all_networks_dict() current_state = virsh.net_state_dict() logging.debug("After libvirtd reboot, current network(s): %s", current_state) testbr_xml = currents[net_name] is_active = testbr_xml['active'] # undefine the persistent&autostart network, # if "autostart" should change to 'no" if not disable and not net_transient: logging.debug("undefine the persistent/autostart network:") ret = virsh.net_undefine(net_name) libvirt.check_exit_status(ret) # after undefine, the network can not be "autostart" if net_name in virsh.net_list("").stdout.strip(): current_state = virsh.net_state_dict()[net_name] logging.debug("Current network(s): %s", current_state) net_autostart_now = current_state['autostart'] if not status_error and not disable and net_autostart_now: test.fail("transient network can not be autostart") finally: persistent_net = virsh.net_list("--persistent --all").stdout.strip() if net_name in persistent_net: virsh.net_undefine(net_name) active_net = virsh.net_list("").stdout.strip() if net_name in active_net: virsh.net_destroy(net_name) # Check Result if status_error: if status == 0: test.fail("Run successfully with wrong command!") else: if disable: if status or is_active: test.fail("Disable autostart failed.") else: if status: test.fail("The virsh cmd return error when enable autostart!") # If host reboot(sim_reboot=True), the network should be active # If host do not reboot, restart libvirtd will not start inactive # autostart network after libvirt 5.6.0 if sim_reboot: if not is_active: test.fail("Set network autostart failed.") else: if check_version and is_active: test.fail( "net turn active with libvirtd restart without host reboot!" )
def run_virsh_net_define_undefine(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") net_persistent = "yes" == params.get("net_persistent", "yes") net_cfg_file = params.get("net_cfg_file", "/usr/share/libvirt/networks/default.xml") check_libvirtd = "yes" == params.get("check_libvirtd") vm_defined = "yes" == params.get("vm_defined") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' output_all = virsh.net_list("--all").stdout.strip() # prepare the network status: active, persistent if not re.search(network_name, output_all): if net_persistent: virsh.net_define(net_cfg_file, ignore_status=False) virsh.net_start(network_name, ignore_status=False) else: virsh.create(net_cfg_file, ignore_status=False) if net_persistent: if not virsh.net_state_dict()[network_name]['persistent']: logging.debug("make the network persistent...") make_net_persistent(network_name) else: if virsh.net_state_dict()[network_name]['persistent']: virsh.net_undefine(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: if network_status == "active": virsh.net_start(network_name, ignore_status=False) else: if network_status == "inactive": logging.debug("destroy network as we need to test inactive network...") virsh.net_destroy(network_name, ignore_status=False) logging.debug("After prepare: %s" % virsh.net_state_dict()) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name if check_libvirtd: vm_name = params.get("main_vm") if virsh.is_alive(vm_name): virsh.destroy(vm_name) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml # make sure there is interface with source network as default iface_devices = vmxml.get_devices(device_type="interface") has_default_net = False for iface in iface_devices: source = iface.get_source() if 'network' in source.keys() and source['network'] == 'default': has_default_net = True break elif 'bridge' in source.keys() and source['bridge'] == 'virbr0': has_default_net = True break if not has_default_net: options = "network default --current" virsh.attach_interface(vm_name, options, ignore_status=False) try: if vm_defined: ret = virsh.start(vm_name) else: logging.debug("undefine the vm, then create the vm...") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) virsh.undefine(vm_name) ret = virsh.create(vmxml.xml) logging.debug(ret.stdout) # check the create or start cmd status utils_test.libvirt.check_exit_status(ret, expect_error=(network_status != 'active')) status = 1 if status_error != 'yes': cmd = "ps -ef | grep /usr/sbin/libvirtd | grep -v grep" # record the libvirt pid then destroy network libvirtd_pid = process.run(cmd, shell=True).stdout_text.strip().split()[1] ret = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True) utils_test.libvirt.check_exit_status(ret, expect_error=False) # check_libvirtd pid no change result = check_libvirtd_restart(libvirtd_pid, cmd) if result: test.fail("libvirtd crash after destroy network!") status = 1 else: logging.debug("libvirtd do not crash after destroy network!") status = 0 # destroy vm, check libvirtd pid no change ret = virsh.destroy(vm_name) utils_test.libvirt.check_exit_status(ret, expect_error=False) result = check_libvirtd_restart(libvirtd_pid, cmd) if result: test.fail("libvirtd crash after destroy vm!") status = 1 else: logging.debug("libvirtd do not crash after destroy vm!") status = 0 finally: if not vm_defined: vmxml_backup.define() vmxml_backup.sync() else: readonly = (params.get("net_destroy_readonly", "no") == "yes") status = virsh.net_destroy(net_ref, extra, uri=uri, readonly=readonly, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if net_persistent: if virsh.net_state_dict()[network_name]['active']: status = 1 else: output_all = virsh.net_list("--all").stdout.strip() if re.search(network_name, output_all): status = 1 logging.debug("transient network should not exists after destroy") # Recover network status to system default status try: if network_name not in virsh.net_state_dict(): virsh.net_define(net_cfg_file, ignore_status=False) if not virsh.net_state_dict()[network_name]['active']: virsh.net_start(network_name, ignore_status=False) if not virsh.net_state_dict()[network_name]['persistent']: make_net_persistent(network_name) if not virsh.net_state_dict()[network_name]['autostart']: virsh.net_autostart(network_name, ignore_status=False) except process.CmdError: test.error("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") elif status_error == "no": if status != 0: test.fail("Run failed with right command") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(test.tmpdir, xml_flie) # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: test.error("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: test.error("Network destroied failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_dumpxml(net_ref, extra, network_xml, **virsh_dargs) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = process.run(xml_validate_cmd, ignore_status=True, shell=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()) and\ status_error == "no": test.cancel("The current libvirt version" " doesn't support '%s' option" % option.strip()) finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: test.error("Network started failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") if err == "": test.fail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: test.fail("Run failed with right command!") if valid_s != 0: test.fail("Command output is invalid!") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append("undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s", result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def network_hook(): """ Check network hooks. """ # Set interface to use default network net_name = params.get("net_name", "default") edit_iface(net_name) prepare_hook_file(hook_script % (net_name, hook_log)) try: # destroy the network ret = virsh.net_destroy(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " stopped end -" assert check_hooks(hook_str) # start network ret = virsh.net_start(net_name, **virsh_dargs) libvirt.check_exit_status(ret) hook_str = hook_file + " " + net_name + " start begin -" assert check_hooks(hook_str) hook_str = hook_file + " " + net_name + " started begin -" assert check_hooks(hook_str) if vm.is_alive(): vm.destroy(gracefully=False) # Remove all controllers, interfaces and addresses in vm dumpxml vm_inactive_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vm_inactive_xml.remove_all_device_by_type('controller') type_dict = {'address': '/devices/*/address'} try: for elem in vm_inactive_xml.xmltreefile.findall( type_dict['address']): vm_inactive_xml.xmltreefile.remove(elem) except (AttributeError, TypeError) as details: test.fail("Fail to remove address.") vm_inactive_xml.xmltreefile.write() machine_list = vm_inactive_xml.os.machine.split("-") # Modify machine type according to the requirements and Add controllers to VM according to machine type def generate_controller(controller_dict): controller_xml = Controller("controller") controller_xml.model = controller_dict['model'] controller_xml.type = controller_dict['type'] controller_xml.index = controller_dict['index'] return controller_xml if machine_type == 'pc': vm_inactive_xml.set_os_attrs( **{ "machine": machine_list[0] + "-i440fx-" + machine_list[2] }) pc_Dict0 = {'model': 'pci-root', 'type': 'pci', 'index': 0} pc_Dict1 = {'model': 'pci-bridge', 'type': 'pci', 'index': 1} vm_inactive_xml.add_device(generate_controller(pc_Dict0)) vm_inactive_xml.add_device(generate_controller(pc_Dict1)) elif machine_type == 'q35': vm_inactive_xml.set_os_attrs( **{"machine": machine_list[0] + "-q35-" + machine_list[2]}) q35_Dict0 = {'model': 'pcie-root', 'type': 'pci', 'index': 0} q35_Dict1 = { 'model': 'pcie-root-port', 'type': 'pci', 'index': 1 } q35_Dict2 = { 'model': 'pcie-to-pci-bridge', 'type': 'pci', 'index': 2 } vm_inactive_xml.add_device(generate_controller(q35_Dict0)) vm_inactive_xml.add_device(generate_controller(q35_Dict1)) vm_inactive_xml.add_device(generate_controller(q35_Dict2)) vm_inactive_xml.sync() # Plug a interface and Unplug the interface vm.start() vm.wait_for_login().close() interface_num = len( vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( "interface")) mac_addr = "52:54:00:9a:53:a9" logging.debug(vm_xml.VMXML.new_from_dumpxml(vm_name)) def is_attached_interface(): return len( vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( "interface")) == interface_num + 1 ret = virsh.attach_interface(vm_name, ("network %s --mac %s" % (net_name, mac_addr))) libvirt.check_exit_status(ret) if utils_misc.wait_for(is_attached_interface, timeout=20) is not True: test.fail("Attaching interface failed.") if libvirt_version.version_compare(6, 0, 0): hook_str = hook_file + " " + net_name + " port-created begin -" else: hook_str = hook_file + " " + net_name + " plugged begin -" assert check_hooks(hook_str) def is_detached_interface(): return len( vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( "interface")) == interface_num ret = virsh.detach_interface(vm_name, "network --mac %s" % mac_addr) libvirt.check_exit_status(ret) utils_misc.wait_for(is_detached_interface, timeout=50) # Wait for timeout and if not succeeded, detach again (during testing, detaching interface failed from q35 VM for the first time when using this function) if len( vm_xml.VMXML.new_from_dumpxml(vm_name).get_devices( "interface")) != interface_num: ret = virsh.detach_interface(vm_name, "network --mac %s" % mac_addr) libvirt.check_exit_status(ret) if utils_misc.wait_for(is_detached_interface, timeout=50) is not True: test.fail("Detaching interface failed.") if libvirt_version.version_compare(6, 0, 0): hook_str = hook_file + " " + net_name + " port-deleted begin -" else: hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) # remove the log file if os.path.exists(hook_log): os.remove(hook_log) # destroy the domain vm.destroy() if libvirt_version.version_compare(6, 0, 0): hook_str = hook_file + " " + net_name + " port-deleted begin -" else: hook_str = hook_file + " " + net_name + " unplugged begin -" assert check_hooks(hook_str) except AssertionError: utils_misc.log_last_traceback() test.fail("Failed to check network hooks")
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = { 'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function } return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address( **{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join( random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] # cleanup env and create vfs cmd = "echo 0 > %s/sriov_numvfs" % pci_address if driver == "mlx4_core": cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core" process.run(cmd, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address) if driver == "mlx4_core": cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \ % (vf_num, vf_num) test_res = process.run(cmd, shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) net_count = len(net_diff) if ((driver != "mlx4_core" and net_count != vf_num) or (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))): net_diff = [] return False return net_diff except process.CmdError: raise test.fail( "Get net list with 'virsh nodedev-list' failed\n") net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300) pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) if not net_diff: test.fail("Get net list with 'virsh nodedev-list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) mac = ':'.join(net[length - 6:]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if driver != "mlx4_core" and sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) if operation == "restart_libvirtd": detach_interface() utils_libvirtd.libvirtd_restart() interface = attach_interface() def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail( "The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail( "The num of vf list show in nodedev-dumpxml is wrong\n" ) addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function addr_list.append(addr) logging.debug( "The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail( "The vf addr list show in nodedev-dumpxml is not sorted correctly\n" ) elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function if pf_pci != pci_id: test.fail( "The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.model = "virtio" if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 def check_addr_attrs(): live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices hostdev_list = device.by_device_tag("hostdev") for hostdev in hostdev_list: addr = hostdev.source.untyped_address hostdev_addr_attrs = { "domain": addr.domain, "bus": addr.bus, "slot": addr.slot, "function": addr.function } if hostdev_addr_attrs == vf_addr_attrs: return False return True result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) if vf_type == "hostdev": check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60) if not check_ret: test.fail("The hostdev device detach failed from xml\n") else: utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) # For option == "--persistent", after VM destroyed and then start, the device should still be there. if option == "--persistent": virsh.destroy(vm_name) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) if vf_type != "hostdev": get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "hostdev": hostdev_list = device.by_device_tag("hostdev") if len(hostdev_list) == 0: test.fail("The hostdev device attach failed from xml\n") else: for hostdev in hostdev_list: if hostdev.type == "pci": break interface = hostdev if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail( "The driver of the hostdev interface is not vfio\n" ) break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail( "The dev name or mode of macvtap interface is wrong after attach\n" ) return interface def setup_controller(nic_num, controller_index, ctl_models): """ Create controllers bond to numa node in the guest xml :param nic_num: number of nic card bond to numa node :param controller_index: index num used to create controllers :param ctl_models: contoller topo for numa bond """ index = controller_index if nic_num == 2: ctl_models.append('pcie-switch-upstream-port') ctl_models.append('pcie-switch-downstream-port') ctl_models.append('pcie-switch-downstream-port') for i in range(index): controller = Controller("controller") controller.type = "pci" controller.index = i if i == 0: controller.model = 'pcie-root' else: controller.model = 'pcie-root-port' vmxml.add_device(controller) set_address = False for model in ctl_models: controller = Controller("controller") controller.type = "pci" controller.index = index controller.model = model if set_address or model == "pcie-switch-upstream-port": attrs = { 'type': 'pci', 'domain': '0', 'slot': '0', 'bus': index - 1, 'function': '0' } controller.address = controller.new_controller_address( **{"attrs": attrs}) logging.debug(controller) if controller.model == "pcie-expander-bus": controller.node = "0" controller.target = {'busNr': '100'} set_address = True else: set_address = False logging.debug(controller) vmxml.add_device(controller) index += 1 return index - 1 def add_numa(vmxml): """ Add numa node in the guest xml :param vmxml: The instance of VMXML clas """ vcpu = vmxml.vcpu max_mem = vmxml.max_mem max_mem_unit = vmxml.max_mem_unit numa_dict = {} numa_dict_list = [] # Compute the memory size for each numa node if vcpu == 1: numa_dict['id'] = '0' numa_dict['cpus'] = '0' numa_dict['memory'] = str(max_mem) numa_dict['unit'] = str(max_mem_unit) numa_dict_list.append(numa_dict) else: for index in range(2): numa_dict['id'] = str(index) numa_dict['memory'] = str(max_mem // 2) numa_dict['unit'] = str(max_mem_unit) if vcpu == 2: numa_dict['cpus'] = str(index) else: if index == 0: if vcpu == 3: numa_dict['cpus'] = str(index) if vcpu > 3: numa_dict['cpus'] = "%s-%s" % (index, vcpu // 2 - 1) else: numa_dict['cpus'] = "%s-%s" % (vcpu // 2, str(vcpu - 1)) numa_dict_list.append(numa_dict) numa_dict = {} # Add cpu device with numa node setting in domain xml vmxml_cpu = vm_xml.VMCPUXML() vmxml_cpu.xml = "<cpu><numa/></cpu>" vmxml_cpu.numa_cell = numa_dict_list vmxml.cpu = vmxml_cpu def create_iface_list(bus_id, nic_num, vf_list): """ Create hostdev interface list bond to numa node :param bus_id: bus_id in pci address which decides the controller attached to :param nic_num: number of nic card bond to numa node :param vf_list: sriov vf list """ iface_list = [] for num in range(nic_num): vf_addr = vf_list[num] iface = create_hostdev_interface(vf_addr, managed, model) bus_id -= num attrs = { 'type': 'pci', 'domain': '0', 'slot': '0', 'bus': bus_id, 'function': '0' } iface.address = iface.new_iface_address(**{"attrs": attrs}) iface_list.append(iface) return iface_list def check_guestos(iface_list): """ Check whether vf bond to numa node can get ip successfully in guest os :param iface_list: hostdev interface list """ for iface in iface_list: mac_addr = iface.mac_address get_ip_by_mac(mac_addr, timeout=60) def check_numa(vf_driver): """ Check whether vf bond to correct numa node in guest os :param vf_driver: vf driver """ if vm.serial_console: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n') for vf in vf_dir: numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1] logging.debug("The vf is attached to numa node %s\n", numa_node) if numa_node != "0": test.fail("The vf is not attached to numa node 0\n") session.close() def remove_devices(vmxml, device_type): """ Remove all addresses for all devices who has one. :param vm_xml: The VM XML to be modified :param device_type: The device type for removing :return: True if success, otherwise, False """ if device_type not in ['address', 'usb']: return type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'} try: for elem in vmxml.xmltreefile.findall(type_dict[device_type]): if device_type == 'usb': if elem.get('bus') == 'usb': vmxml.xmltreefile.remove(elem) else: vmxml.xmltreefile.remove(elem) except (AttributeError, TypeError) as details: test.error("Fail to remove '%s': %s" % (device_type, details)) vmxml.xmltreefile.write() vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) machine_type = params.get("machine_type", "pc") operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") dev_type = params.get("dev_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") nic_num = int(params.get("nic_num", "1")) nfv = params.get("nfv", "no") == "yes" ctl_models = params.get("ctl_models", "").split(' ') controller_index = int(params.get("controller_index", "12")) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model in ("pci-bridge", "pcie-root-port"): pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = int(max_vfs // 2 + 1) create_vfs(vf_num) vf_list = [] vf_name_list = [] vf_mac_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name), 'r') as f: vf_mac = f.readline().strip() vf_name_list.append(vf_name) vf_mac_list.append(vf_mac) if attach == "yes" and not nfv: vf_addr = vf_list[0] if dev_type: mac_addr = vf_mac_list[0] new_iface = utils_test.libvirt.create_hostdev_xml( vf_addr, managed=managed, xmlfile=False) else: new_iface = create_interface() mac_addr = new_iface.mac_address if inactive_pool: result = virsh.attach_device(vm_name, new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug( "The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if vf_type == "hostdev": addr = interface.source.untyped_address vf_addr_attrs = { "domain": addr.domain, "bus": addr.bus, "slot": addr.slot, "function": addr.function } if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if nfv: vf_driver = os.readlink( os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1] vmxml.remove_all_device_by_type('controller') remove_devices(vmxml, 'address') remove_devices(vmxml, 'usb') osxml = vmxml.os if "i440fx" in vmxml.os.machine: osxml.machine = "q35" vmxml.os = osxml add_numa(vmxml) bus_id = setup_controller(nic_num, controller_index, ctl_models) vmxml.sync() logging.debug(vmxml) iface_list = create_iface_list(bus_id, nic_num, vf_list) for iface in iface_list: process.run("cat %s" % iface.xml, shell=True).stdout_text result = virsh.attach_device(vm_name, iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) result = virsh.start(vm_name, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) check_guestos(iface_list) check_numa(vf_driver) finally: if vm.is_alive(): vm.destroy(gracefully=False) backup_xml.sync() if driver == "mlx4_core": # Reload mlx4 driver to default setting process.run( "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core", shell=True) process.run( "modprobe mlx4_core; modprobe mlx4_ib; modprobe mlx4_en", shell=True) else: process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True)
def run(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(test.tmpdir, xml_flie) # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: raise error.TestError("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: raise error.TestError("Network destroied failed!") result = virsh.net_dumpxml(net_ref, extra, network_xml, ignore_status=True) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = utils.run(xml_validate_cmd, ignore_status=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()): status_error = "yes" break finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: raise error.TestError("Network started failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") if err == "": raise error.TestFail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command!") if valid_s != 0: raise error.TestFail("Command output is invalid!") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") nic_mac = params.get("nic_mac", "") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") exist_bug = params.get("exist_bug") def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} ipxml = network_xml.IPXML() ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end} netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(zip(keys, values))) return leases except: raise error.TestError("Fail to parse output: %s" % output) def get_ip_by_mac(mac_addr, try_dhclint=False): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login() def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn("Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: raise error.TestFail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: raise error.TestFail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: raise error.TestFail("Address '%s' is not expected" % iface_ip) if expected_find and not find_mac: raise error.TestFail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() # Remove all interfaces of the VM if vm.is_alive(): vm.destroy(gracefully=False) vm.free_mac_address(0) vmxml.remove_all_device_by_type("interface") # Create new network if prepare_net: create_network() nets = virsh.net_state_dict() if net_name not in nets.keys() and not status_error: raise error.TestError("Not find network '%s'" % net_name) expected_find = False try: result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --source %s --mac %s" % (net_name, iface_mac) if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.start() else: vm.start() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True) # Allocate IP address for the new interface may fail, so only # check the result if get new IP address if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) finally: if exist_bug: logging.warn("Case may failed as bug: %s", BUG_URL % exist_bug) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run(test, params, env): """ Sriov basic test: 1.create max vfs; 2.Check the nodedev info; 3.Start a guest with vf; 4.Reboot a guest with vf; 5.suspend/resume a guest with vf """ def find_pf(): pci_address = "" for pci in pci_dirs: temp_iface_name = os.listdir("%s/net" % pci)[0] operstate = utils_net.get_net_if_operstate(temp_iface_name) if operstate == "up": pf_iface_name = temp_iface_name pci_address = pci break if pci_address == "": return False else: return pci_address def create_address_dict(pci_id): """ Use pci_xxxx_xx_xx_x to create address dict. """ device_domain = pci_id.split(':')[0] device_domain = "0x%s" % device_domain device_bus = pci_id.split(':')[1] device_bus = "0x%s" % device_bus device_slot = pci_id.split(':')[-1].split('.')[0] device_slot = "0x%s" % device_slot device_function = pci_id.split('.')[-1] device_function = "0x%s" % device_function attrs = { 'type': 'pci', 'domain': device_domain, 'slot': device_slot, 'bus': device_bus, 'function': device_function } return attrs def addr_to_pci(addr): """ Convert address dict to pci address: xxxxx:xx.x. """ pci_domain = re.findall(r"0x(.+)", addr['domain'])[0] pci_bus = re.findall(r"0x(.+)", addr['bus'])[0] pci_slot = re.findall(r"0x(.+)", addr['slot'])[0] pci_function = re.findall(r"0x(.+)", addr['function'])[0] pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function return pci_addr def create_hostdev_interface(pci_id, managed, model): """ Create hostdev type interface xml. """ attrs = create_address_dict(pci_id) new_iface = Interface('hostdev') new_iface.managed = managed if model != "": new_iface.model = model new_iface.mac_address = utils_net.generate_mac_address_simple() new_iface.hostdev_address = new_iface.new_iface_address( **{"attrs": attrs}) chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join( random.choice(chars) for _ in list(range(64))) new_iface.alias = {'name': alias_name} return new_iface def create_vfs(vf_num): """ Create max vfs. """ net_device = [] net_name = [] test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines() net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines() pci_list_before = set(pci_list) net_list_before = set(net_list) test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True) if test_res.exit_status != 0: test.fail("Fail to create vfs") pci_list_sriov = virsh.nodedev_list( cap='pci').stdout.strip().splitlines() def _vf_init_completed(): try: net_list_sriov = virsh.nodedev_list( cap='net').stdout.strip().splitlines() net_list_sriov = set(net_list_sriov) net_diff = list(net_list_sriov.difference(net_list_before)) if len(net_diff) != int(vf_num): net_diff = [] return False return net_diff except process.CmdError: raise test.fail("Get net list with 'virsh list' failed\n") pci_list_sriov = set(pci_list_sriov) pci_diff = list(pci_list_sriov.difference(pci_list_before)) net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60) if not net_diff: test.fail("Get net list with 'virsh list' failed\n") for net in net_diff: net = net.split('_') length = len(net) net = '_'.join(net[1:length - 6]) net_name.append(net) for pci_addr in pci_diff: temp_addr = pci_addr.split("_") pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4] vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0] net_device.append(vf_net_name) logging.debug(sorted(net_name)) logging.debug(sorted(net_device)) if sorted(net_name) != sorted(net_device): test.fail("The net name get from nodedev-list is wrong\n") def get_ip_by_mac(mac_addr, timeout=120): """ Get interface IP address by given MAC address. """ if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) def get_ip(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = "" iface_name = utils_net.get_linux_ifname(session, mac_addr) if iface_name is None: test.fail("no interface with MAC address %s found" % mac_addr) session.cmd("pkill -9 dhclient", ignore_all_errors=True) session.cmd("dhclient %s " % iface_name, ignore_all_errors=True) ip_addr = utils_misc.wait_for(get_ip, 20) logging.debug("The ip addr is %s", ip_addr) except Exception: logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr)) finally: session.close() return ip_addr def create_nodedev_pci(pci_address): """ Convert xxxx:xx.x to pci_xxxx_xx_xx_x. """ nodedev_addr = pci_address.split(':')[0:2] slot_function = pci_address.split(':')[2] nodedev_addr.append(slot_function.split('.')[0]) nodedev_addr.append(slot_function.split('.')[1]) nodedev_addr.insert(0, "pci") nodedev_addr = "_".join(nodedev_addr) return nodedev_addr def create_network_interface(name): """ Create network type interface xml. """ new_iface = Interface('network') new_iface.source = {'network': name} new_iface.model = "virtio" new_iface.mac_address = utils_net.generate_mac_address_simple() return new_iface def create_hostdev_network(): """ Create hostdev type with vf pool network xml. """ vf_addr_list = [] netxml = network_xml.NetworkXML() if vf_pool_source == "vf_list": for vf in vf_list: attrs = create_address_dict(vf) new_vf = netxml.new_vf_address(**{'attrs': attrs}) vf_addr_list.append(new_vf) netxml.driver = {'name': 'vfio'} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.vf_list = vf_addr_list else: netxml.pf = {"dev": pf_name} netxml.forward = {"mode": "hostdev", "managed": managed} netxml.name = net_name logging.debug(netxml) return netxml def create_macvtap_network(): """ Create macvtap type network xml. """ forward_interface_list = [] for vf_name in vf_name_list: forward_interface = {'dev': vf_name} forward_interface_list.append(forward_interface) netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'} netxml.forward_interface = forward_interface_list logging.debug(netxml) return netxml def do_operation(): """ Do operation in guest os with vf and check the os behavior after operation. """ if operation == "resume_suspend": try: virsh.suspend(vm.name, debug=True, ignore_status=False) virsh.resume(vm.name, debug=True, ignore_statue=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "reboot": try: if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() virsh.reboot(vm.name, ignore_status=False) get_ip_by_mac(mac_addr, timeout=120) except process.CmdError as detail: err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail) test.fail(err_msg) if operation == "save": result = virsh.managedsave(vm_name, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=True) def check_info(): """ Check the pf or vf info after create vfs. """ if info_type == "pf_info" or info_type == "vf_order": nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1]) xml = NodedevXML.new_from_dumpxml(nodedev_pci) if info_type == "pf_info": product_info = xml.cap.product_info max_count = xml.max_count if pci_info.find(product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") if int(max_count) != max_vfs: test.fail( "The maxCount show in nodedev-dumpxml is wrong\n") if info_type == "vf_order": vf_addr_list = xml.cap.virt_functions if len(vf_addr_list) != max_vfs: test.fail( "The num of vf list show in nodedev-dumpxml is wrong\n" ) addr_list = [] for vf_addr in vf_addr_list: addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function addr_list.append(addr) logging.debug( "The vf addr list show in nodedev-dumpxml is %s\n", addr_list) if sorted(addr_list) != addr_list: test.fail( "The vf addr list show in nodedev-dumpxml is not sorted correctly\n" ) elif info_type == "vf_info": vf_addr = vf_list[0] nodedev_pci = create_nodedev_pci(vf_addr) vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci) vf_bus_slot = ':'.join(vf_addr.split(':')[1:]) res = process.run("lspci -s %s -vv" % vf_bus_slot) vf_pci_info = res.stdout_text vf_product_info = vf_xml.cap.product_info if vf_pci_info.find(vf_product_info) == -1: test.fail( "The product_info show in nodedev-dumpxml is wrong\n") pf_addr = vf_xml.cap.virt_functions[0] pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0] pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0] pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0] pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0] pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function if pf_pci != pci_id: test.fail( "The pf address show in vf nodedev-dumpxml is wrong\n") def create_interface(): """ Call different function to create interface according to the type """ new_iface = Interface('network') if vf_type == "vf": new_iface = create_hostdev_interface(vf_addr, managed, model) if vf_type == "vf_pool": netxml = create_hostdev_network() virsh.net_define(netxml.xml, ignore_status=True) if not inactive_pool: virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) if vf_type == "macvtap": new_iface = Interface('direct') new_iface.source = {"dev": vf_name, "mode": "passthrough"} new_iface.mac_address = utils_net.generate_mac_address_simple() if vf_type == "macvtap_network": netxml = create_macvtap_network() result = virsh.net_define(netxml.xml, ignore_status=True) virsh.net_start(netxml.name) new_iface = create_network_interface(netxml.name) return new_iface def detach_interface(): """ Detach interface: 1.Detach interface from xml; 2.Check the live xml after detach interface; 3.Check the vf driver after detach interface. """ def _detach_completed(): result = virsh.domiflist(vm_name, "", ignore_status=True) return result.stdout.find(mac_addr) == -1 result = virsh.detach_device(vm_name, new_iface.xml) utils_test.libvirt.check_exit_status(result, expect_error=False) utils_misc.wait_for(_detach_completed, timeout=60) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) device = live_xml.devices logging.debug("Domain xml after detach interface:\n %s", live_xml) if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.hostdev_address.attrs == vf_addr_attrs: test.fail( "The hostdev interface still in the guest xml after detach\n" ) break driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug("The driver after vf detached from guest is %s\n", driver) if managed == "no": if driver != "vfio-pci": test.fail( "The vf pci driver is not vfio-pci after detached from guest with managed as no\n" ) result = virsh.nodedev_reattach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) elif driver != origin_driver: test.fail( "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver)) else: for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if interface.source["dev"] == vf_name: test.fail( "The macvtap interface still exist in the guest xml after detach\n" ) break def attach_interface(): """ Attach interface: 1.Attach interface from xml; 2.Check the vf driver after attach interface; 3.Check the live xml after attach interface; """ if managed == "no": result = virsh.nodedev_detach(nodedev_pci_addr) utils_test.libvirt.check_exit_status(result, expect_error=False) logging.debug("attach interface xml:\n %s", new_iface) result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True) utils_test.libvirt.check_exit_status(result, expect_error=False) if option == "--config": result = virsh.start(vm_name) utils_test.libvirt.check_exit_status(result, expect_error=False) live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) logging.debug(live_xml) get_ip_by_mac(mac_addr, timeout=60) device = live_xml.devices if vf_type == "vf" or vf_type == "vf_pool": for interface in device.by_device_tag("interface"): if interface.type_name == "hostdev": if interface.driver.driver_attr['name'] != 'vfio': test.fail( "The driver of the hostdev interface is not vfio\n" ) break vf_addr_attrs = interface.hostdev_address.attrs pci_addr = addr_to_pci(vf_addr_attrs) nic_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] if nic_driver != "vfio-pci": test.fail("The driver of the hostdev interface is not vfio\n") elif vf_type == "macvtap" or vf_type == "macvtap_network": for interface in device.by_device_tag("interface"): if interface.type_name == "direct": if vf_type == "macvtap": if interface.source["dev"] == new_iface.source["dev"]: match = "yes" vf_name = interface.source["dev"] elif interface.source['dev'] in vf_name_list: match = "yes" vf_name = interface.source["dev"] if match != "yes": test.fail( "The dev name or mode of macvtap interface is wrong after attach\n" ) return interface vm_name = params.get("main_vm", "avocado-vt-vm1") vm = env.get_vm(params["main_vm"]) operation = params.get("operation") driver = params.get("driver", "ixgbe") status_error = params.get("status_error", "no") == "yes" model = params.get("model", "") managed = params.get("managed", "yes") attach = params.get("attach", "") option = params.get("option", "") vf_type = params.get("vf_type", "") info_check = params.get("info_check", "no") info_type = params.get("info_type", "") vf_pool_source = params.get("vf_pool_source", "vf_list") loop_times = int(params.get("loop_times", "1")) start_vm = "yes" == params.get("start_vm", "yes") including_pf = "yes" == params.get("including_pf", "no") max_vfs_attached = "yes" == params.get("max_vfs_attached", "no") inactive_pool = "yes" == params.get("inactive_pool", "no") duplicate_vf = "yes" == params.get("duplicate_vf", "no") expected_error = params.get("error_msg", "") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) backup_xml = vmxml.copy() vmxml.remove_all_device_by_type('interface') vmxml.sync() if max_vfs_attached: controller_devices = vmxml.get_devices("controller") pci_bridge_controllers = [] for device in controller_devices: logging.debug(device) if device.type == 'pci' and device.model == "pci-bridge": pci_bridge_controllers.append(device) if not pci_bridge_controllers: pci_bridge_controller = Controller("controller") pci_bridge_controller.type = "pci" pci_bridge_controller.index = "1" pci_bridge_controller.model = "pci-bridge" vmxml.add_device(pci_bridge_controller) vmxml.sync() if start_vm: if not vm.is_dead(): vm.destroy() vm.start() if vm.serial_console is not None: vm.cleanup_serial_console() vm.create_serial_console() session = vm.wait_for_serial_login(timeout=240) session.close() else: if not vm.is_dead(): vm.destroy() driver_dir = "/sys/bus/pci/drivers/%s" % driver pci_dirs = glob.glob("%s/0000*" % driver_dir) pci_device_dir = "/sys/bus/pci/devices" pci_address = "" net_name = "test-net" # Prepare interface xml try: pf_iface_name = "" pci_address = utils_misc.wait_for(find_pf, timeout=60) if not pci_address: test.cancel("no up pf found in the test machine") pci_id = pci_address.split("/")[-1] pf_name = os.listdir('%s/net' % pci_address)[0] bus_slot = ':'.join(pci_address.split(':')[1:]) pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text logging.debug("The pci info of the sriov card is:\n %s", pci_info) max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1 if info_check == 'yes' or max_vfs < 32: vf_num = max_vfs create_vfs(vf_num) else: vf_num = max_vfs // 2 + 1 create_vfs(vf_num) vf_list = [] vf_name_list = [] for i in range(vf_num): vf = os.readlink("%s/virtfn%s" % (pci_address, str(i))) vf = os.path.split(vf)[1] vf_list.append(vf) vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0] vf_name_list.append(vf_name) if attach == "yes": vf_addr = vf_list[0] new_iface = create_interface() if inactive_pool: result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) else: mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) origin_driver = os.readlink( os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1] logging.debug( "The driver of vf before attaching to guest is %s\n", origin_driver) count = 0 while count < loop_times: interface = attach_interface() if vf_type in ["vf", "vf_pool"]: vf_addr_attrs = interface.hostdev_address.attrs if operation != "": do_operation() detach_interface() count += 1 if max_vfs_attached: interface_list = [] for vf_addr in vf_list: new_iface = create_interface() mac_addr = new_iface.mac_address nodedev_pci_addr = create_nodedev_pci(vf_addr) attach_interface() interface_list.append(new_iface) count = 0 for new_iface in interface_list: vf_addr = vf_list[count] vf_addr_attrs = new_iface.hostdev_address.attrs detach_interface() count += 1 if info_check == "yes": check_info() if including_pf: vf_list = [] pf_addr = pci_id vf_list.append(pf_addr) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) if duplicate_vf: vf_list.append(vf_list[0]) netxml = create_hostdev_network() result = virsh.net_define(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) result = virsh.net_create(netxml.xml, ignore_status=True, debug=True) utils_test.libvirt.check_exit_status(result, expected_error) finally: if vm.is_alive(): vm.destroy(gracefully=False) process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True) if vf_type == "vf_pool" or vf_type == "macvtap_network": virsh.net_destroy(net_name) virsh.net_undefine(net_name, ignore_status=True) backup_xml.sync()
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): # If network is transient if ((not backup_state[netxml.name]['persistent']) and backup_state[netxml.name]['active']): netxml.create() continue # autostart = True requires persistent = True first! for state in ['persistent', 'autostart', 'active']: try: netxml[state] = backup_state[netxml.name][state] except xcepts.LibvirtXMLError, detail: fail_flag = 1 result_info.append(str(detail)) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml
def run(test, params, env): """ Test command: virsh net-event 1. Prepare a new network. 2. Running virsh net-event with different options, and start/stop the network if needed, then check the output of net-event. 3. Clean the environment. """ prepare_net = "yes" == params.get("prepare_net", "yes") net_addr = params.get("net_addr") net_name = params.get("net_name") net_event_list = "yes" == params.get("net_event_list", "no") net_event_loop = "yes" == params.get("net_event_loop", "no") net_event_name = params.get("net_event_name") net_event_timeout = params.get("net_event_timeout") net_event_amount = int(params.get("net_event_amount", 1)) status_error = "yes" == params.get("status_error", "no") net_event_option = params.get("net_event_option", "") virsh_dargs = {'debug': True, 'ignore_status': True} net_event_interrupt = False libv_net = None expected_event_list = [] virsh_session = aexpect.ShellSession(virsh.VIRSH_EXEC, auto_close=True) def trigger_net_event(event_amount=1): """ Trigger network start/stop actions in event_number times """ i = event_amount // 2 event_list = [] try: while i > 0: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") virsh.net_destroy(net_name, **virsh_dargs) event_list.append("Stopped") i -= 1 if event_amount % 2: virsh.net_start(net_name, **virsh_dargs) event_list.append("Started") finally: return event_list def check_output(output, expected_event_list): """ Check received net-event in output. :param output: The virsh shell output, such as: Welcome to virsh, the virtualization interactive terminal. Type: 'help' for help with commands 'quit' to quit virsh # event 'lifecycle' for network virttest_net: Started events received: 1 virsh # : expected_event_list: A list of expected events ['Started', 'Stopped', ..] """ event_match_str = "event 'lifecycle' for network %s: %s" if net_event_interrupt: output = output.strip().splitlines()[5:] else: output = output.strip().splitlines()[5:-2] output = [o.replace("virsh #", "").strip() for o in output] # Both order and content should match index = 0 for event_str in expected_event_list: match_str = event_match_str % (net_name, event_str) logging.debug("Expected output: %s", match_str) logging.debug("Actual output: %s", output[index]) if not output[index].count(match_str): test.fail("Event received not match") index += 1 try: if prepare_net: libv_net = utlv.LibvirtNetwork("vnet", address=net_addr, net_name=net_name, persistent=True) # Destroy the network if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name) logging.info("Defined network %s", net_name) if net_event_list: net_event_option += " --list" if net_event_loop: net_event_option += " --loop" if not status_error and not net_event_list: # Assemble the net-event command net_event_cmd = "net-event %s" % net_event_option if net_name: net_event_cmd += " --network %s" % net_name if net_event_name: net_event_cmd += " --event %s" % net_event_name if net_event_timeout: net_event_cmd += " --timeout %s" % net_event_timeout if not status_error: net_event_timeout = int(net_event_timeout) # Run the command in a new virsh session, then waiting for # 'lifecycle' events logging.info("Sending '%s' to virsh shell", net_event_cmd) virsh_session.sendline(net_event_cmd) else: result = virsh.net_event(network=net_name, event=net_event_name, event_timeout=net_event_timeout, options=net_event_option, **virsh_dargs) utlv.check_exit_status(result, status_error) if not status_error: # Verify 'lifecycle' events if not net_event_list and net_event_name == 'lifecycle': expected_event_list = trigger_net_event(net_event_amount) if net_event_timeout: # Make sure net-event will timeout on time time.sleep(net_event_timeout) elif net_event_loop: virsh_session.send_ctrl("^C") net_event_interrupt = True ret_output = virsh_session.get_stripped_output() check_output(ret_output, expected_event_list) finally: virsh_session.close() if libv_net: libv_net.cleanup()
def run_virsh_net_list(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]["active"]: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]["autostart"]: autostart_status = "no" # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]["active"]: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]["active"]: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") result = virsh.net_list(option, extra, ignore_status=True) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") loop = int(params.get("loop", 1)) # Get params about creating a bridge bridge = params.get('bridge', None) create_bridge = "yes" == params.get('create_bridge', 'no') ovs_bridge = "yes" == params.get('ovs_bridge', 'no') iface_name = utils_net.get_net_if(state="UP")[0] # Get params about creating a network create_netxml = "yes" == params.get("create_netxml", "no") domain = params.get('domain', None) forward = params.get("forward", None) net_dns_txt = params.get("net_dns_txt", None) net_bandwidth_inbound = params.get("net_bandwidth_inbound", None) net_bandwidth_outbound = params.get("net_bandwidth_outbound", None) mac = params.get("mac") # Edit the created network xml to get the xml to be tested del_mac = "yes" == params.get('del_mac', 'no') del_ip = "yes" == params.get('del_ip', 'no') add_dev = "yes" == params.get('add_dev', 'no') virtualport = 'yes' == params.get("virtualport", "no") virtualport_type = params.get("virtualport_type") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) logging.debug("Get network xml as testnet_xml: %s" % testnet_xml) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True } try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask range_4 = network_xml.RangeXML() range_4.attrs = { "start": dhcp_ranges_start, "end": dhcp_ranges_end } ipxml_v4.dhcp_ranges = range_4 testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = process.run(sysctl_cmd + ' -n').stdout_text if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) if create_netxml: net_dict = { 'del_nat_attrs': True, 'del_ip': del_ip, 'dns_txt': net_dns_txt, 'domain': domain, 'bridge': bridge, 'forward': forward, 'interface_dev': iface_name, 'virtualport': virtualport, 'virtualport_type': virtualport_type, 'mac': mac, 'net_bandwidth_inbound': net_bandwidth_inbound, 'net_bandwidth_outbound': net_bandwidth_outbound } logging.debug("net_dict is %s" % net_dict) testnet_xml = libvirt_network.modify_network_xml( net_dict, testnet_xml) testnet_xml.debug_xml() if create_bridge: if ovs_bridge: utils_net.create_ovs_bridge(bridge, ignore_status=False) else: utils_net.create_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Run test case while loop: try: define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states after define if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined network: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: logging.debug("check result is %s, expect_msg is %s" % (define_result, expect_msg)) libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append( "Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append( "Found wrong network states for " "defined network: %s" % str(net_state)) # Check network states after start if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started network: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append( "Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append( "Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append( "undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append( "Found wrong network states for " "undefined network: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append( "Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append( "undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine( undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status except Exception: logging.debug( "The define and undefine operation in loop %s failed. ", loop) finally: loop = loop - 1 finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml if create_bridge: if ovs_bridge: utils_net.delete_ovs_bridge(bridge, ignore_status=False) else: utils_net.delete_linux_bridge_tmux(bridge, iface_name, ignore_status=False) # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test command: virsh net-dumpxml. This command can output the network information as an XML dump to stdout. 1.Get all parameters from config file. 2.If test case's network status is inactive, destroy it. 3.Perform virsh net-dumpxml operation. 4.Recover test environment(network status). 5.Confirm the test result. """ status_error = params.get("status_error", "no") net_ref = params.get("net_dumpxml_net_ref") net_name = params.get("net_dumpxml_network", "default") net_status = params.get("net_dumpxml_network_status", "active") xml_flie = params.get("net_dumpxml_xml_file", "default.xml") extra = params.get("net_dumpxml_extra", "") network_xml = os.path.join(data_dir.get_tmp_dir(), xml_flie) # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(net_name).stdout.strip() elif net_ref == "name": net_ref = net_name net_status_current = "active" if not virsh.net_state_dict()[net_name]['active']: net_status_current = "inactive" if not virsh.net_state_dict()[net_name]['persistent']: test.error("Network is transient!") try: if net_status == "inactive" and net_status_current == "active": status_destroy = virsh.net_destroy(net_name, ignore_status=True).exit_status if status_destroy != 0: test.error("Network destroied failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_dumpxml(net_ref, extra, network_xml, **virsh_dargs) status = result.exit_status err = result.stderr.strip() xml_validate_cmd = "virt-xml-validate %s network" % network_xml valid_s = process.run(xml_validate_cmd, ignore_status=True, shell=True).exit_status # Check option valid or not. if extra.find("--") != -1: options = extra.split("--") for option in options: if option.strip() == "": continue if not virsh.has_command_help_match("net-dumpxml", option.strip()) and\ status_error == "no": test.cancel("The current libvirt version" " doesn't support '%s' option" % option.strip()) finally: # Recover network if net_status == "inactive" and net_status_current == "active": status_start = virsh.net_start(net_name, ignore_status=True).exit_status if status_start != 0: test.error("Network started failed!") # Check status_error if status_error == "yes": if status == 0: test.fail("Run successfully with wrong command!") if err == "": test.fail("The wrong command has no error outputed!") elif status_error == "no": if status != 0: test.fail("Run failed with right command!") if valid_s != 0: test.fail("Command output is invalid!") else: test.error("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") # Confirm the network exists. output_all = virsh.net_list("--all").stdout.strip() if not re.search(network_name, output_all): raise error.TestNAError("Make sure the network exists!!") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name # Get status of network and prepare network status. network_current_status = "active" try: if not virsh.net_state_dict()[network_name]['active']: network_current_status = "inactive" if network_status == "active": virsh.net_start(network_name) else: if network_status == "inactive": virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Prepare network status failed!") status = virsh.net_destroy(net_ref, extra, ignore_status=True).exit_status # Confirm the network has been destroied. if virsh.net_state_dict()[network_name]['active']: status = 1 # Recover network status try: if (network_current_status == "active" and not virsh.net_state_dict()[network_name]['active']): virsh.net_start(network_name) if (network_current_status == "inactive" and virsh.net_state_dict()[network_name]['active']): virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test bridge support from network 1) create a linux bridge and connect a physical interface to it 2) define nwfilter with "vdsm-no-mac-spoofing" 3) redefine the vm with the new create bridge and filter 4) check if guest can get public ip after vm start 5) check if guest and host can ping each other 6) check if guest and host can ping outside 7) start another vm connected to the same bridge 8) check if the 2 guests can ping each other """ def create_bridge(br_name, iface_name): """ Create a linux bridge by virsh cmd: 1. Stop NetworkManager and Start network service 2. virsh iface-bridge <iface> <name> [--no-stp] :param br_name: bridge name :param iface_name: physical interface name :return: bridge created or raise exception """ # Make sure the bridge not exist if libvirt.check_iface(br_name, "exists", "--all"): test.cancel("The bridge %s already exist" % br_name) # Create bridge utils_package.package_install('tmux') cmd = 'tmux -c "ip link add name {0} type bridge; ip link set {1} up;' \ ' ip link set {1} master {0}; ip link set {0} up;' \ ' pkill dhclient; sleep 6; dhclient {0}; ifconfig {1} 0"'.format(br_name, iface_name) process.run(cmd, shell=True, verbose=True) def create_bridge_network(br_name, net_name): """ Define and start the bridge type network """ # check if network with the same name already exists output_all = virsh.net_list("--all").stdout.strip() if re.search(net_name, output_all): test.cancel("Network with the same name already exists!") test_xml = network_xml.NetworkXML(network_name="%s" % net_name) test_xml.forward = {"mode": "bridge"} test_xml.bridge = {"name": br_name} test_xml.create() def define_nwfilter(filter_name): """ Define nwfilter vdsm-no-mac-spoofing with content like: <filter name='vdsm-no-mac-spoofing' chain='root'> <filterref filter='no-mac-spoofing'/> <filterref filter='no-arp-mac-spoofing'/> </filter> :param filter_name: the name of nwfilter :return: filter created or raise exception """ filter_uuid = params.get("filter_uuid", "11111111-b071-6127-b4ec-111111111111") filter_params = { "filter_name": "vdsm-no-mac-spoofing", "filter_chain": "root", "filter_uuid": filter_uuid, "filterref_name_1": "no-mac-spoofing", "filterref_name_2": "no-arp-mac-spoofing" } filter_xml = libvirt.create_nwfilter_xml(filter_params).xml # Run command result = virsh.nwfilter_define(filter_xml, ignore_status=True, debug=True) if result.exit_status: test.fail("Failed to define nwfilter with %s" % filter_xml) def ping(src_ip, dest_ip, ping_count, timeout, session=None): """ Wrap of ping :param src_ip: source address :param dest_ip: destination address :param ping_count: count of icmp packet :param timeout: timeout for the ping command :param session: local execution or session to execute the ping command :return: ping succeed or raise exception """ status, output = utils_net.ping(dest=dest_ip, count=ping_count, interface=src_ip, timeout=timeout, session=session, force_ipv4=True) if status: test.fail("Fail to ping %s from %s" % (dest_ip, src_ip)) def check_net_functions(guest_ip, ping_count, ping_timeout, guest_session, host_ip, remote_url, endpoint_ip): # make sure host network works well # host ping remote url ping(host_ip, remote_url, ping_count, ping_timeout) # host ping guest ping(host_ip, guest_ip, ping_count, ping_timeout) # guest ping host ping(guest_ip, host_ip, ping_count, ping_timeout, session=guest_session) # guest ping remote url ping(guest_ip, remote_url, ping_count, ping_timeout, session=guest_session) # guest ping endpoint ping(guest_ip, endpoint_ip, ping_count, ping_timeout, session=guest_session) # Get test params bridge_name = params.get("bridge_name", "test_br0") filter_name = params.get("filter_name", "vdsm-no-mac-spoofing") ping_count = params.get("ping_count", "5") ping_timeout = float(params.get("ping_timeout", "10")) iface_name = utils_net.get_net_if(state="UP")[0] bridge_script = NETWORK_SCRIPT + bridge_name iface_script = NETWORK_SCRIPT + iface_name iface_script_bk = os.path.join(data_dir.get_tmp_dir(), "iface-%s.bk" % iface_name) attach_interface = "yes" == params.get("attach_interface", "no") iface_model = params.get("iface_model", "virtio") iface_source = eval(params.get("iface_source", "{'bridge':'test_br0'}")) iface_type = params.get("iface_type", None) iface_target = params.get("iface_target", "br_target") iface_alias = params.get("iface_alias", None) hotplug = "yes" == params.get("hotplug", "no") iface_driver = params.get("iface_driver", None) start_vm2 = "yes" == params.get("start_vm2", "no") create_network = "yes" == params.get("create_network", "no") update_device = "yes" == params.get("update_with_diff_type", "no") vms = params.get("vms").split() if len(vms) <= 1: test.cancel("Need two VMs to test") else: vm1_name = vms[0] vm2_name = vms[1] vm1 = env.get_vm(vm1_name) vm2 = env.get_vm(vm2_name) # Back up the interface script process.run("cp %s %s" % (iface_script, iface_script_bk), shell=True, verbose=True) # Back up vm xml vm1_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm1_name) vm2_xml_bak = vm_xml.VMXML.new_from_dumpxml(vm2_name) # Stop NetworkManager service NM_service = service.Factory.create_service("NetworkManager") NM_status = NM_service.status() if not NM_status: NM_service.start() mac = utils_net.generate_mac_address_simple() try: create_bridge(bridge_name, iface_name) define_nwfilter(filter_name) if hotplug: err_msgs = ("No more available PCI slots", "No more available PCI addresses") # delete the original interface on the vm before hot-plug if vm1.is_alive(): vm1.destroy() vmxml = vm_xml.VMXML.new_from_dumpxml(vm1_name) iface_xml = vmxml.get_devices('interface')[0] logging.debug("Delete the original interface") vmxml.del_device(iface_xml) vmxml.sync() vm1.start() # do hot-plug if attach_interface: logging.info("Try to hot-plug interface") options = ( "%s %s --model %s --mac %s" % (iface_type, iface_source['bridge'], iface_model, mac)) ret = virsh.attach_interface(vm1_name, options, ignore_status=True) else: logging.info("Try to hot-plug device") if create_network: create_bridge_network(bridge_name, iface_source["network"]) target = str({'dev': iface_target}) iface_alias = str({'name': iface_alias}) vm_iface_source = str(iface_source) iface_params = { "type": iface_type, "source": vm_iface_source, "filter": filter_name, "mac": mac, 'alias': iface_alias, 'target': target, 'model': iface_model, 'driver': iface_driver } attach_xml = interface.Interface(iface_params['type']) attach_xml.xml = libvirt.modify_vm_iface( vm1_name, 'get_xml', iface_params) ret = virsh.attach_device(vm1_name, attach_xml.xml, ignore_status=True, debug=True) if ret.exit_status: if any([msg in ret.stderr for msg in err_msgs]): test.error("No more pci slots, can't attach more devices") else: test.fail("Failed to attach-interface: %s" % ret.stderr.strip()) else: logging.debug("Hot-plug interface or device pass") if update_device: # As the interface type will change to actual type "bridge" in live xml, we need to ensure # the update with original "network" type will not fail. # Try to delete the nwfilter with original type in iface_params update_xml = interface.Interface(iface_type) iface_params_update = { "del_filter": "yes", "type": "network", "source": vm_iface_source } update_xml.xml = libvirt.modify_vm_iface( vm1_name, 'get_xml', iface_params_update) ret = virsh.update_device(vm1_name, update_xml.xml, ignore_status=True, debug=True) libvirt.check_exit_status(ret) else: vm_iface_source = str(iface_source) vm1_iface_params = { "type": "bridge", "source": vm_iface_source, "filter": filter_name, "mac": mac, 'driver': iface_driver, "iface_model": iface_model } libvirt.modify_vm_iface(vm1_name, "update_iface", vm1_iface_params) if vm1.is_alive(): vm1.destroy() vm1.start() # apply ip address as it may not be initialized session1 = session2 = None session1 = vm1.wait_for_serial_login() utils_net.restart_guest_network(session1) output = session1.cmd_output("ifconfig || ip a") logging.debug("guest1 ip info %s" % output) # Check guest's network function host_ip = utils_net.get_ip_address_by_interface(bridge_name) remote_url = params.get("remote_ip", "www.google.com") try: vm1_ip = utils_net.get_guest_ip_addr(session1, mac) except Exception as errs: test.fail("vm1 can't get IP with the new create bridge: %s" % errs) if hotplug: # reboot vm1 then check network function to ensure the interface still there and works fine logging.info("reboot the vm") virsh.reboot(vm1) if session1 is None: session1 = vm1.wait_for_serial_login() ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1) # restart libvirtd service then check the interface still works fine libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() vm1.cleanup_serial_console() vm1.create_serial_console() session1 = vm1.wait_for_serial_login() ping(vm1_ip, remote_url, ping_count, ping_timeout, session=session1) logging.info( "after reboot and restart libvirtd, the network works fine") if iface_driver: try: driver_dict = eval(iface_driver) if session1 is None: session1 = vm1.wait_for_serial_login() guest_iface_info = session1.cmd_output("ip l").strip() guest_iface_name = re.findall( r"^\d+: (\S+?)[@:].*state UP.*$", guest_iface_info, re.MULTILINE)[0] comb_size = driver_dict.get('queues') rx_size = driver_dict.get('rx_queue_size') session1.cmd_status("ethtool -L %s combined %s" % (guest_iface_name, comb_size)) ret, outp = session1.cmd_status_output("ethtool -l %s" % guest_iface_name) logging.debug("ethtool cmd output:%s" % outp) if not ret: pre_comb = re.search( "Pre-set maximums:[\s\S]*?Combined:.*?(\d+)", outp).group(1) cur_comb = re.search( "Current hardware settings:[\s\S]*?Combined:.*?(\d+)", outp).group(1) if int(pre_comb) != int(comb_size) or int( cur_comb) != int(comb_size): test.fail( "Fail to check the combined size: setting: %s," "Pre-set: %s, Current-set: %s" % (comb_size, pre_comb, cur_comb)) else: logging.info( "Getting correct Pre-set and Current set value" ) else: test.error("ethtool list fail: %s" % outp) # as tx_queue size is only supported for vhost-user interface, only check rx_queue size ret1, outp1 = session1.cmd_status_output("ethtool -g %s" % guest_iface_name) logging.debug("guest queue size setting is %s" % outp1) if not ret1: pre_set = re.search(r"Pre-set maximums:\s*RX:\s*(\d+)", outp1).group(1) cur_set = re.search( r"Current hardware settings:\s*RX:\s*(\d+)", outp1).group(1) if int(pre_set) != int(rx_size) or int(cur_set) != int( rx_size): test.fail("Fail to check the rx_queue_size!") except Exception as errs: test.fail("fail to get driver info") # hot-unplug interface/device if attach_interface: ret = virsh.detach_interface(vm1_name, "bridge", ignore_status=True) else: ret = virsh.detach_device(vm1_name, attach_xml.xml, ignore_status=True, debug=True) if ret.exit_status: test.fail("Hot-unplug interface/device fail") else: logging.info("hot-unplug interface/device succeed") else: if start_vm2: # Start vm2 connect to the same bridge mac2 = utils_net.generate_mac_address_simple() vm2_iface_params = { "type": "bridge", "source": vm_iface_source, "filter": filter_name, "mac": mac2 } libvirt.modify_vm_iface(vm2_name, "update_iface", vm2_iface_params) if vm2.is_alive(): vm2.destroy() vm2.start() # Check if vm1 and vm2 can ping each other try: utils_net.update_mac_ip_address(vm2, timeout=120) vm2_ip = vm2.get_address() except Exception as errs: test.fail( "vm2 can't get IP with the new create bridge: %s" % errs) session2 = vm2.wait_for_login() # make sure guest has got ip address utils_net.restart_guest_network(session2) output2 = session2.cmd_output("ifconfig || ip a") logging.debug("guest ip info %s" % output2) # check 2 guests' network functions check_net_functions(vm1_ip, ping_count, ping_timeout, session1, host_ip, remote_url, vm2_ip) check_net_functions(vm2_ip, ping_count, ping_timeout, session2, host_ip, remote_url, vm1_ip) finally: logging.debug("Start to restore") vm1_xml_bak.sync() vm2_xml_bak.sync() virsh.nwfilter_undefine(filter_name, ignore_status=True) if libvirt.check_iface(bridge_name, "exists", "--all"): virsh.iface_unbridge(bridge_name, timeout=60, debug=True) if os.path.exists(iface_script_bk): process.run("mv %s %s" % (iface_script_bk, iface_script), shell=True, verbose=True) if os.path.exists(bridge_script): process.run("rm -rf %s" % bridge_script, shell=True, verbose=True) cmd = 'tmux -c "ip link set {1} nomaster; ip link delete {0};' \ 'pkill dhclient; sleep 6; dhclient {1}"'.format(bridge_name, iface_name) process.run(cmd, shell=True, verbose=True) # reload network configuration NM_service.restart() # recover NetworkManager if NM_status is True: NM_service.start() if 'network' in iface_source and iface_source[ "network"] in virsh.net_state_dict(): virsh.net_destroy(iface_source["network"], ignore_status=False)
def run(test, params, env): """ Test command: virsh net-dhcp-leases 1. Create a new network and run virsh command to check dhcp leases info. 2. Attach an interface before or after start the domain, then check the dhcp leases info. 3. Clean the environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) net_name = params.get("net_name", "default") net_option = params.get("net_option", "") status_error = "yes" == params.get("status_error", "no") prepare_net = "yes" == params.get("prepare_net", "yes") hotplug_iface = "yes" == params.get("hotplug_interface", "no") filter_by_mac = "yes" == params.get("filter_by_mac", "no") invalid_mac = "yes" == params.get("invalid_mac", "no") expect_msg = params.get("leases_err_msg") # Generate a random string as the MAC address nic_mac = None if invalid_mac: nic_mac = utils_misc.generate_random_string(17) # Command won't fail on old libvirt if not libvirt_version.version_compare(1, 3, 1) and invalid_mac: logging.debug("Reset case to positive as BZ#1261432") status_error = False def create_network(): """ Create a network """ net_ip_addr = params.get("net_ip_addr", "192.168.200.1") net_ip_netmask = params.get("net_ip_netmask", "255.255.255.0") net_dhcp_start = params.get("net_dhcp_start", "192.168.200.2") net_dhcp_end = params.get("net_dhcp_end", "192.168.200.254") netxml = network_xml.NetworkXML() netxml.name = net_name netxml.forward = {'mode': "nat"} ipxml = network_xml.IPXML() ipxml.address = net_ip_addr ipxml.netmask = net_ip_netmask ipxml.dhcp_ranges = {'start': net_dhcp_start, "end": net_dhcp_end} netxml.set_ip(ipxml) netxml.create() def get_net_dhcp_leases(output): """ Return the dhcp lease info in a list """ leases = [] lines = output.splitlines() if not lines: return leases try: pat = r"\S+\ ?\S+\ ?\S+\ ?\S+|\S+" keys = re.findall(pat, lines[0]) for line in lines[2:]: values = re.findall(pat, line) leases.append(dict(list(zip(keys, values)))) return leases except Exception: test.error("Fail to parse output: %s" % output) def get_ip_by_mac(mac_addr, try_dhclint=False, timeout=120): """ Get interface IP address by given MAC addrss. If try_dhclint is True, then try to allocate IP addrss for the interface. """ session = vm.wait_for_login(login_nic_index, timeout=timeout, serial=True) def f(): return utils_net.get_guest_ip_addr(session, mac_addr) try: ip_addr = utils_misc.wait_for(f, 10) if ip_addr is None: iface_name = utils_net.get_linux_ifname(session, mac_addr) if try_dhclint: session.cmd("dhclient %s" % iface_name) ip_addr = utils_misc.wait_for(f, 10) else: # No IP for the interface, just print the interface name logging.warn("Find '%s' with MAC address '%s', " "but which has no IP address", iface_name, mac_addr) finally: session.close() return ip_addr def check_net_lease(net_leases, expected_find=True): """ Check the dhcp lease info. """ if not net_leases: if expected_find: test.fail("Lease info is empty") else: logging.debug("No dhcp lease info find as expected") else: if not expected_find: test.fail("Find unexpected dhcp lease info: %s" % net_leases) find_mac = False for net_lease in net_leases: net_mac = net_lease['MAC address'] net_ip = net_lease['IP address'][:-3] if vm_xml.VMXML.get_iface_by_mac(vm_name, net_mac): find_mac = True logging.debug("Find '%s' in domain XML", net_mac) else: logging.debug("Not find '%s' in domain XML", net_mac) continue iface_ip = get_ip_by_mac(net_mac) if iface_ip and iface_ip != net_ip: test.fail("Address '%s' is not expected" % iface_ip) if expected_find and not find_mac: test.fail("No matched MAC address") vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vmxml_backup = vmxml.copy() if vm.is_alive(): vm.destroy(gracefully=False) login_nic_index = 0 new_nic_index = 0 # Cleanup dirty dnsmaq, firstly get all network,and destroy all networks except # default net_state = virsh.net_state_dict(only_names=True) logging.debug("current networks: %s, destroy and undefine networks " "except default!", net_state) for net in net_state: if net != "default": virsh.net_destroy(net) virsh.net_undefine(net) cmd = "ps aux|grep dnsmasq|grep -v grep | grep -v default | awk '{print $2}'" pid_list = results_stdout_52lts(process.run(cmd, shell=True)).strip().splitlines() logging.debug(pid_list) for pid in pid_list: utils_misc.safe_kill(pid, signal.SIGKILL) # Create new network if prepare_net: create_network() nets = virsh.net_state_dict() if net_name not in list(nets.keys()) and not status_error: test.error("Not find network '%s'" % net_name) expected_find = False try: result = virsh.net_dhcp_leases(net_name, mac=nic_mac, options=net_option, debug=True, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) if not status_error: iface_mac = utils_net.generate_mac_address_simple() if filter_by_mac: nic_mac = iface_mac op = "--type network --model virtio --source %s --mac %s" \ % (net_name, iface_mac) nic_params = {'mac': iface_mac, 'nettype': 'bridge', 'ip_version': 'ipv4'} login_timeout = 120 if not hotplug_iface: op += " --config" virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) vm.start() new_nic_index = vm.get_nic_index_by_mac(iface_mac) if new_nic_index > 0: login_nic_index = new_nic_index else: vm.start() # wait for VM start before hotplug interface vm.wait_for_serial_login() virsh.attach_interface(vm_name, option=op, debug=True, ignore_status=False) vm.add_nic(**nic_params) # As VM already started, so the login timeout could be shortened login_timeout = 10 new_interface_ip = get_ip_by_mac(iface_mac, try_dhclint=True, timeout=login_timeout) # Allocate IP address for the new interface may fail, so only # check the result if get new IP address if new_interface_ip: expected_find = True result = virsh.net_dhcp_leases(net_name, mac=nic_mac, debug=False, ignore_status=True) utlv.check_exit_status(result, status_error) lease = get_net_dhcp_leases(result.stdout.strip()) check_net_lease(lease, expected_find) else: if expect_msg: utlv.check_result(result, expect_msg.split(';')) finally: # Delete the new attached interface if new_nic_index > 0: vm.del_nic(new_nic_index) if vm.is_alive(): vm.destroy(gracefully=False) vmxml_backup.sync() if prepare_net: virsh.net_destroy(net_name)
def run(test, params, env): """ Test command: virsh net-destroy. The command can forcefully stop a given network. 1.Make sure the network exists. 2.Prepare network status. 3.Perform virsh net-destroy operation. 4.Check if the network has been destroied. 5.Recover network environment. 6.Confirm the test result. """ net_ref = params.get("net_destroy_net_ref") extra = params.get("net_destroy_extra", "") network_name = params.get("net_destroy_network", "default") network_status = params.get("net_destroy_status", "active") status_error = params.get("status_error", "no") # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Confirm the network exists. output_all = virsh.net_list("--all").stdout.strip() if not re.search(network_name, output_all): raise error.TestNAError("Make sure the network exists!!") # Run test case if net_ref == "uuid": net_ref = virsh.net_uuid(network_name).stdout.strip() elif net_ref == "name": net_ref = network_name # Get status of network and prepare network status. network_current_status = "active" try: if not virsh.net_state_dict()[network_name]['active']: network_current_status = "inactive" if network_status == "active": virsh.net_start(network_name) else: if network_status == "inactive": virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Prepare network status failed!") status = virsh.net_destroy(net_ref, extra, uri=uri, debug=True, unprivileged_user=unprivileged_user, ignore_status=True).exit_status # Confirm the network has been destroied. if virsh.net_state_dict()[network_name]['active']: status = 1 # Recover network status try: if (network_current_status == "active" and not virsh.net_state_dict()[network_name]['active']): virsh.net_start(network_name) if (network_current_status == "inactive" and virsh.net_state_dict()[network_name]['active']): virsh.net_destroy(network_name) except error.CmdError: raise error.TestError("Recover network status failed!") # Check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") else: raise error.TestError("The status_error must be 'yes' or 'no'!")
def run(test, params, env): """ Test command: virsh net-list. The command returns list of networks. 1.Get all parameters from configuration. 2.Get current network's status(State, Autostart). 3.Do some prepare works for testing. 4.Perform virsh net-list operation. 5.Recover network status. 6.Confirm the result. """ option = params.get("net_list_option", "") extra = params.get("net_list_extra", "") status_error = params.get("status_error", "no") net_name = params.get("net_list_name", "default") persistent = params.get("net_list_persistent", "yes") net_status = params.get("net_list_error", "active") tmp_xml = os.path.join(test.tmpdir, "tmp.xml") net_current_status = "active" autostart_status = "yes" if not virsh.net_state_dict()[net_name]['active']: net_current_status = "inactive" if not virsh.net_state_dict()[net_name]['autostart']: autostart_status = "no" # acl polkit params uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") # Create a transient network. try: if persistent == "no": virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False) if net_current_status == "inactive": virsh.net_destroy(net_name, ignore_status=False) virsh.net_undefine(net_name, ignore_status=False) virsh.net_create(tmp_xml, ignore_status=False) except error.CmdError: raise error.TestFail("Transient network test failed!") # Prepare network's status for testing. if net_status == "active": try: if not virsh.net_state_dict()[net_name]['active']: virsh.net_start(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Active network test failed!") else: try: if virsh.net_state_dict()[net_name]['active']: virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Inactive network test failed!") virsh_dargs = {'ignore_status': True} if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs['unprivileged_user'] = unprivileged_user virsh_dargs['uri'] = uri result = virsh.net_list(option, extra, **virsh_dargs) status = result.exit_status output = result.stdout.strip() # Recover network try: if persistent == "no": virsh.net_destroy(net_name, ignore_status=False) virsh.net_define(tmp_xml, ignore_status=False) if net_current_status == "active": virsh.net_start(net_name, ignore_status=False) if autostart_status == "yes": virsh.net_autostart(net_name, ignore_status=False) else: if net_current_status == "active" and net_status == "inactive": virsh.net_start(net_name, ignore_status=False) elif net_current_status == "inactive" and net_status == "active": virsh.net_destroy(net_name, ignore_status=False) except error.CmdError: raise error.TestFail("Recover network failed!") # check result if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") elif status_error == "no": if status != 0: raise error.TestFail("Run failed with right command") if option == "--inactive": if net_status == "active": if re.search(net_name, output): raise error.TestFail("Found an active network with" " --inactive option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Found no inactive networks with" " --inactive option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail( "Found transient inactive networks" " with --inactive option") elif option == "": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with no" " option") else: if re.search(net_name, output): raise error.TestFail("Found inactive network with" " no option") elif option == "--all": if net_status == "active": if not re.search(net_name, output): raise error.TestFail("Can't find active network with" " --all option") else: if persistent == "yes": if not re.search(net_name, output): raise error.TestFail("Can't find inactive network with" " --all option") else: # If network is transient, after net-destroy it, # it will disapear. if re.search(net_name, output): raise error.TestFail("Found transient inactive network" " with --all option")
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': raise error.TestNAError("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): raise error.TestNAError("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in backup.values(): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} cmd = "chmod 666 %s" % testnet_xml.xml utils.system(cmd) try: # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) # Undefine an active network and check state undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status if not undefine_status: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in leftovers.values(): netxml.orbital_nuclear_strike() # Recover from backup for netxml in backup.values(): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: raise error.TestFail("Define network for transaction test " "failed:%s", result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: raise error.TestFail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: raise error.TestFail("Define an unexpected network, " "and start it successfully.") else: raise error.TestFail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: raise error.TestFail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: raise error.TestFail("Run failed with right command") else: if start_status != 0: raise error.TestFail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test command: virsh net-define/net-undefine. 1) Collect parameters&environment info before test 2) Prepare options for command 3) Execute command for test 4) Check state of defined network 5) Recover environment 6) Check result """ uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) net_name = params.get("net_define_undefine_net_name", "default") net_uuid = params.get("net_define_undefine_net_uuid", "") options_ref = params.get("net_define_undefine_options_ref", "default") trans_ref = params.get("net_define_undefine_trans_ref", "trans") extra_args = params.get("net_define_undefine_extra", "") remove_existing = params.get("net_define_undefine_remove_existing", "yes") status_error = "yes" == params.get("status_error", "no") check_states = "yes" == params.get("check_states", "no") net_persistent = "yes" == params.get("net_persistent") net_active = "yes" == params.get("net_active") expect_msg = params.get("net_define_undefine_err_msg") # define multi ip/dhcp sections in network multi_ip = "yes" == params.get("multi_ip", "no") netmask = params.get("netmask") prefix_v6 = params.get("prefix_v6") single_v6_range = "yes" == params.get("single_v6_range", "no") # Get 2nd ipv4 dhcp range dhcp_ranges_start = params.get("dhcp_ranges_start", None) dhcp_ranges_end = params.get("dhcp_ranges_end", None) # Get 2 groups of ipv6 ip address and dhcp section address_v6_1 = params.get("address_v6_1") dhcp_ranges_v6_start_1 = params.get("dhcp_ranges_v6_start_1", None) dhcp_ranges_v6_end_1 = params.get("dhcp_ranges_v6_end_1", None) address_v6_2 = params.get("address_v6_2") dhcp_ranges_v6_start_2 = params.get("dhcp_ranges_v6_start_2", None) dhcp_ranges_v6_end_2 = params.get("dhcp_ranges_v6_end_2", None) # Edit net xml forward/ip part then define/start to check invalid setting edit_xml = "yes" == params.get("edit_xml", "no") address_v4 = params.get("address_v4") nat_port_start = params.get("nat_port_start") nat_port_end = params.get("nat_port_end") test_port = "yes" == params.get("test_port", "no") virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Prepare environment and record current net_state_dict backup = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) backup_state = virsh_instance.net_state_dict() logging.debug("Backed up network(s): %s", backup_state) # Make some XML to use for testing, for now we just copy 'default' test_xml = xml_utils.TempXMLFile() # temporary file try: # LibvirtXMLBase.__str__ returns XML content test_xml.write(str(backup['default'])) test_xml.flush() except (KeyError, AttributeError): test.cancel("Test requires default network to exist") testnet_xml = get_network_xml_instance(virsh_dargs, test_xml, net_name, net_uuid, bridge=None) if remove_existing: for netxml in list(backup.values()): netxml.orbital_nuclear_strike() # Test both define and undefine, So collect info # both of them for result check. # When something wrong with network, set it to 1 fail_flag = 0 result_info = [] if options_ref == "correct_arg": define_options = testnet_xml.xml undefine_options = net_name elif options_ref == "no_option": define_options = "" undefine_options = "" elif options_ref == "not_exist_option": define_options = "/not/exist/file" undefine_options = "NOT_EXIST_NETWORK" define_extra = undefine_extra = extra_args if trans_ref != "define": define_extra = "" if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = {'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True} cmd = "chmod 666 %s" % testnet_xml.xml process.run(cmd, shell=True) if params.get('net_define_undefine_readonly', 'no') == 'yes': virsh_dargs = {'uri': uri, 'debug': False, 'ignore_status': True, 'readonly': True} try: if edit_xml: ipxml_v4 = network_xml.IPXML() ipxml_v4.address = address_v4 ipxml_v4.netmask = netmask ipxml_v4.dhcp_ranges = {"start": dhcp_ranges_start, "end": dhcp_ranges_end} testnet_xml.del_ip() testnet_xml.set_ip(ipxml_v4) if test_port: nat_port = {"start": nat_port_start, "end": nat_port_end} testnet_xml.nat_port = nat_port testnet_xml.debug_xml() if multi_ip: # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = to_text( process.system_output(sysctl_cmd + ' -n')) if original_accept_ra != '2': process.system(sysctl_cmd + '=2') # add another ipv4 address and dhcp range set_ip_section(testnet_xml, address_v4, ipv6=False, netmask=netmask, dhcp_ranges_start=dhcp_ranges_start, dhcp_ranges_end=dhcp_ranges_end) # add ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_1, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_1, dhcp_ranges_end=dhcp_ranges_v6_end_1) # 2nd ipv6 address and dhcp range set_ip_section(testnet_xml, address_v6_2, ipv6=True, prefix_v6=prefix_v6, dhcp_ranges_start=dhcp_ranges_v6_start_2, dhcp_ranges_end=dhcp_ranges_v6_end_2) testnet_xml.debug_xml() # Run test case define_result = virsh.net_define(define_options, define_extra, **virsh_dargs) logging.debug(define_result) define_status = define_result.exit_status # Check network states if check_states and not define_status: net_state = virsh_instance.net_state_dict() if (net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "defined netowrk: %s" % str(net_state)) if define_status == 1 and status_error and expect_msg: libvirt.check_result(define_result, expect_msg.split(';')) # If defining network succeed, then trying to start it. if define_status == 0: start_result = virsh.net_start(net_name, extra="", **virsh_dargs) logging.debug(start_result) start_status = start_result.exit_status if trans_ref == "trans": if define_status: fail_flag = 1 result_info.append("Define network with right command failed.") else: if start_status: fail_flag = 1 result_info.append("Network is defined as expected, " "but failed to start it.") # Check network states for normal test if check_states and not status_error: net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or not net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "started netowrk: %s" % str(net_state)) # Try to set autostart virsh.net_autostart(net_name, **virsh_dargs) net_state = virsh_instance.net_state_dict() if not net_state[net_name]['autostart']: fail_flag = 1 result_info.append("Failed to set autostart for network %s" % net_name) # Restart libvirtd and check state # Close down persistent virsh session before libvirtd restart if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() # Need to redefine virsh_instance after libvirtd restart virsh_instance = virsh.VirshPersistent(**virsh_dargs) net_state = virsh_instance.net_state_dict() if (not net_state[net_name]['active'] or not net_state[net_name]['autostart']): fail_flag = 1 result_info.append("Found wrong network state after restarting" " libvirtd: %s" % str(net_state)) logging.debug("undefine network:") # prepare the network status if not net_persistent: virsh.net_undefine(net_name, ignore_status=False) if not net_active: virsh.net_destroy(net_name, ignore_status=False) undefine_status = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs).exit_status net_state = virsh_instance.net_state_dict() if net_persistent: if undefine_status: fail_flag = 1 result_info.append("undefine should succeed but failed") if net_active: if (not net_state[net_name]['active'] or net_state[net_name]['autostart'] or net_state[net_name]['persistent']): fail_flag = 1 result_info.append("Found wrong network states for " "undefined netowrk: %s" % str(net_state)) else: if net_name in net_state: fail_flag = 1 result_info.append("Transient network should not exists " "after undefine : %s" % str(net_state)) else: if not undefine_status: fail_flag = 1 result_info.append("undefine transient network should fail " "but succeed: %s" % str(net_state)) # Stop network for undefine test anyway destroy_result = virsh.net_destroy(net_name, extra="", **virsh_dargs) logging.debug(destroy_result) # Undefine network if not check_states: undefine_result = virsh.net_undefine(undefine_options, undefine_extra, **virsh_dargs) if trans_ref != "define": logging.debug(undefine_result) undefine_status = undefine_result.exit_status finally: # Recover environment leftovers = network_xml.NetworkXML.new_all_networks_dict( virsh_instance) for netxml in list(leftovers.values()): netxml.orbital_nuclear_strike() # Recover from backup for netxml in list(backup.values()): netxml.sync(backup_state[netxml.name]) # Close down persistent virsh session (including for all netxml copies) if hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() # Done with file, cleanup del test_xml del testnet_xml # Check status_error # If fail_flag is set, it must be transaction test. if fail_flag: test.fail("Define network for transaction test " "failed:%s" % result_info) # The logic to check result: # status_error&only undefine:it is negative undefine test only # status_error&(no undefine):it is negative define test only # (not status_error)&(only undefine):it is positive transaction test. # (not status_error)&(no undefine):it is positive define test only if status_error: if trans_ref == "undefine": if undefine_status == 0: test.fail("Run successfully with wrong command.") else: if define_status == 0: if start_status == 0: test.fail("Define an unexpected network, " "and start it successfully.") else: test.fail("Define an unexpected network, " "but start it failed.") else: if trans_ref == "undefine": if undefine_status: test.fail("Define network for transaction " "successfully, but undefine failed.") else: if define_status != 0: test.fail("Run failed with right command") else: if start_status != 0: test.fail("Network is defined as expected, " "but start it failed.")
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_package.package_install(pkg_list): test.error("Failed ot install required packages") boot_initrd = params.get("boot_initrd", "EXAMPLE_INITRD") boot_vmlinuz = params.get("boot_vmlinuz", "EXAMPLE_VMLINUZ") if boot_initrd.count("EXAMPLE") or boot_vmlinuz.count("EXAMPLE"): test.cancel("Please provide initrd/vmlinuz URL") # Download pxe boot images process.system("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) process.system("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) process.system("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root), shell=True) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt) def modify_iface_xml(): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if pxe_boot: # Config boot console for pxe boot osxml = vm_xml.VMOSXML() osxml.type = vmxml.os.type osxml.arch = vmxml.os.arch osxml.machine = vmxml.os.machine osxml.loader = "/usr/share/seabios/bios.bin" osxml.bios_useserial = "yes" osxml.bios_reboot_timeout = "-1" osxml.boots = ['network'] del vmxml.os vmxml.os = osxml xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] iface_bandwidth = {} iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) if iface_inbound: iface_bandwidth["inbound"] = iface_inbound if iface_outbound: iface_bandwidth["outbound"] = iface_outbound if iface_bandwidth: bandwidth = iface.new_bandwidth(**iface_bandwidth) iface.bandwidth = bandwidth iface_type = params.get("iface_type", "network") iface.type_name = iface_type source = ast.literal_eval(iface_source) if not source: source = {"network": "default"} net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn("Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] del iface.source iface.source = source iface_model = params.get("iface_model", "virtio") iface.model = iface_model logging.debug("New interface xml file: %s", iface) vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def run_dnsmasq_default_test(key, value=None, exists=True, name="default"): """ Test dnsmasq configuration. :param key: key in conf file to check :param value: value in conf file to check :param exists: check the key:value exist or not :param name: The name of conf file """ conf_file = "/var/lib/libvirt/dnsmasq/%s.conf" % name if not os.path.exists(conf_file): test.cancel("Can't find %s.conf file" % name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if value: config = "%s=%s" % (key, value) else: config = key if not configs.count(config): if exists: test.fail("Can't find %s=%s in configuration file" % (key, value)) else: if not exists: test.fail("Found %s=%s in configuration file" % (key, value)) def run_dnsmasq_addnhosts_test(hostip, hostnames): """ Test host ip and names configuration """ conf_file = "/var/lib/libvirt/dnsmasq/default.addnhosts" hosts_re = ".*".join(hostnames) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not re.search(r"%s.*%s" % (hostip, hosts_re), configs, re.M): test.fail("Can't find '%s' in configuration file" % hostip) def run_dnsmasq_host_test(iface_mac, guest_ip, guest_name): """ Test host name and ip configuration for dnsmasq """ conf_file = "/var/lib/libvirt/dnsmasq/default.hostsfile" config = "%s,%s,%s" % (iface_mac, guest_ip, guest_name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not configs.count(config): test.fail("Can't find host configuration in file %s" % conf_file) def check_class_rules(ifname, rule_id, bandwidth): """ Check bandwidth settings via 'tc class' output """ cmd = "tc class show dev %s" % ifname class_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth class output: %s", class_output) class_pattern = (r"class htb %s.*rate (\d+)(K?M?)bit ceil" " (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" % rule_id) se = re.search(class_pattern, class_output, re.M) if not se: test.fail("Can't find outbound setting for htb %s" % rule_id) logging.debug("bandwidth from tc output:%s" % str(se.groups())) rate = None if "floor" in bandwidth: rate = int(bandwidth["floor"]) * 8 elif "average" in bandwidth: rate = int(bandwidth["average"]) * 8 if rate: if se.group(2) == 'M': rate_check = int(se.group(1)) * 1000 else: rate_check = int(se.group(1)) assert rate_check == rate if "peak" in bandwidth: if se.group(4) == 'M': ceil_check = int(se.group(3)) * 1000 else: ceil_check = int(se.group(3)) assert ceil_check == int(bandwidth["peak"]) * 8 if "burst" in bandwidth: if se.group(6) == 'M': tc_burst = int(se.group(5)) * 1024 else: tc_burst = int(se.group(5)) assert tc_burst == int(bandwidth["burst"]) def check_filter_rules(ifname, bandwidth): """ Check bandwidth settings via 'tc filter' output """ cmd = "tc -d filter show dev %s parent ffff:" % ifname filter_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth filter output: %s", filter_output) if not filter_output.count("filter protocol all pref"): test.fail("Can't find 'protocol all' settings in filter rules") filter_pattern = ".*police.*rate (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" se = re.search(r"%s" % filter_pattern, filter_output, re.M) if not se: test.fail("Can't find any filter policy") logging.debug("bandwidth from tc output:%s" % str(se.groups())) logging.debug("bandwidth from setting:%s" % str(bandwidth)) if "average" in bandwidth: if se.group(2) == 'M': tc_average = int(se.group(1)) * 1000 else: tc_average = int(se.group(1)) assert tc_average == int(bandwidth["average"]) * 8 if "burst" in bandwidth: if se.group(4) == 'M': tc_burst = int(se.group(3)) * 1024 else: tc_burst = int(se.group(3)) assert tc_burst == int(bandwidth["burst"]) def check_host_routes(): """ Check network routes on host """ for rt in routes: try: route = ast.literal_eval(rt) addr = "%s/%s" % (route["address"], route["prefix"]) cmd = "ip route list %s" % addr if "family" in route and route["family"] == "ipv6": cmd = "ip -6 route list %s" % addr output = to_text(process.system_output(cmd)) match_obj = re.search(r"via (\S+).*metric (\d+)", output) if match_obj: via_addr = match_obj.group(1) metric = match_obj.group(2) logging.debug("via address %s for %s, matric is %s" % (via_addr, addr, metric)) assert via_addr == route["gateway"] if "metric" in route: assert metric == route["metric"] except KeyError: pass def run_bandwidth_test(check_net=False, check_iface=False): """ Test bandwidth option for network or interface by tc command. """ iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) net_inbound = ast.literal_eval(net_bandwidth_inbound) net_outbound = ast.literal_eval(net_bandwidth_outbound) net_bridge_name = ast.literal_eval(net_bridge)["name"] iface_name = libvirt.get_ifname_host(vm_name, iface_mac) try: if check_net and net_inbound: # Check qdisc rules cmd = "tc -d qdisc show dev %s" % net_bridge_name qdisc_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth qdisc output: %s", qdisc_output) if not qdisc_output.count("qdisc ingress ffff:"): test.fail("Can't find ingress setting") check_class_rules(net_bridge_name, "1:1", {"average": net_inbound["average"], "peak": net_inbound["peak"]}) check_class_rules(net_bridge_name, "1:2", net_inbound) # Check filter rules on bridge interface if check_net and net_outbound: check_filter_rules(net_bridge_name, net_outbound) # Check class rules on interface inbound settings if check_iface and iface_inbound: check_class_rules(iface_name, "1:1", {'average': iface_inbound['average'], 'peak': iface_inbound['peak'], 'burst': iface_inbound['burst']}) if "floor" in iface_inbound: if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") check_class_rules(net_bridge_name, "1:3", {'floor': iface_inbound["floor"]}) # Check filter rules on interface outbound settings if check_iface and iface_outbound: check_filter_rules(iface_name, iface_outbound) except AssertionError: stacktrace.log_exc_info(sys.exc_info()) test.fail("Failed to check network bandwidth") def check_name_ip(session): """ Check dns resolving on guest """ # Check if bind-utils is installed if not utils_package.package_install(['bind-utils'], session): test.error("Failed to install bind-utils on guest") # Run host command to check if hostname can be resolved if not guest_ipv4 and not guest_ipv6: test.fail("No ip address found from parameters") guest_ip = guest_ipv4 if guest_ipv4 else guest_ipv6 cmd = "host %s | grep %s" % (guest_name, guest_ip) if session.cmd_status(cmd): test.fail("Can't resolve name %s on guest" % guest_name) def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") ipt_rules = ("INPUT -i %s -p udp -m udp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % br_name, "FORWARD -i {0} -o {0} -j ACCEPT".format(br_name), "FORWARD -o %s -j REJECT --reject-with icmp" % br_name, "FORWARD -i %s -j REJECT --reject-with icmp" % br_name, "OUTPUT -o %s -p udp -m udp --dport 68 -j ACCEPT" % br_name) net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] if check_ipv4: ipv4_rules = list(ipt_rules) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp" " -j MASQUERADE".format(net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT" % (net_ipv4, net_dev_in, br_name, ctr_rule)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = to_text(process.system_output('iptables-save')) logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('iptables-save')) if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s -j ACCEPT" % (net_ipv6, net_dev_in, br_name)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = to_text(process.system_output("ip6tables-save")) logging.debug("iptables: %s", output) for ipt in ipv6_rules: if not output.count(ipt): test.fail("Can't find ipbtable rule:\n%s" % ipt) return ipv6_rules def run_ip_test(session, ip_ver): """ Check iptables on host and ipv6 address on guest """ if ip_ver == "ipv6": # Clean up iptables rules for guest to get ipv6 address session.cmd_status("ip6tables -F") # It may take some time to get the ip address def get_ip_func(): return utils_net.get_guest_ip_addr(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) if not get_ip_func(): utils_net.restart_guest_network(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) vm_ip = get_ip_func() logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: test.fail("Can't find ip address on guest") ip_gateway = net_ip_address if ip_ver == "ipv6": ip_gateway = net_ipv6_address # Cleanup ip6talbes on host for ping6 test process.system("ip6tables -F") if ip_gateway and not routes: ping_s, _ = ping(dest=ip_gateway, count=5, timeout=10, session=session) if ping_s: test.fail("Failed to ping gateway address: %s" % ip_gateway) def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if not utils_package.package_install(['libvirt'], session): test.error("Failed ot install libvirt package on guest") # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*yes'") if session.cmd_status(cmd): test.fail("'default' network isn't in active state") # Try to destroy&start default network on guest for opt in ['net-destroy', 'net-start']: cmd = "virsh %s default" % opt status, output = session.cmd_status_output(cmd) logging.debug("Run %s on guest exit %s, output %s" % (cmd, status, output)) if status: test.fail(output) if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") restart_error = "yes" == params.get("restart_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") net_domain = params.get("net_domain") net_ip_address = params.get("net_ip_address") net_ipv6_address = params.get("net_ipv6_address") net_dns_forward = params.get("net_dns_forward") net_dns_txt = params.get("net_dns_txt") net_dns_srv = params.get("net_dns_srv") net_dns_hostip = params.get("net_dns_hostip") net_dns_hostnames = params.get("net_dns_hostnames", "").split() dhcp_start_ipv4 = params.get("dhcp_start_ipv4") dhcp_end_ipv4 = params.get("dhcp_end_ipv4") dhcp_start_ipv6 = params.get("dhcp_start_ipv6") dhcp_end_ipv6 = params.get("dhcp_end_ipv6") guest_name = params.get("guest_name") guest_ipv4 = params.get("guest_ipv4") guest_ipv6 = params.get("guest_ipv6") tftp_root = params.get("tftp_root") pxe_boot = "yes" == params.get("pxe_boot", "no") routes = params.get("routes", "").split() net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}") net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}") iface_bandwidth_inbound = params.get("iface_bandwidth_inbound", "{}") iface_bandwidth_outbound = params.get("iface_bandwidth_outbound", "{}") iface_num = params.get("iface_num", "1") iface_source = params.get("iface_source", "{}") multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") attach_iface = "yes" == params.get("attach_iface", "no") serial_login = "******" == params.get("serial_login", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_bridge = "yes" == params.get("test_bridge", "no") test_dnsmasq = "yes" == params.get("test_dnsmasq", "no") test_dhcp_range = "yes" == params.get("test_dhcp_range", "no") test_dns_host = "yes" == params.get("test_dns_host", "no") test_qos_bandwidth = "yes" == params.get("test_qos_bandwidth", "no") test_pg_bandwidth = "yes" == params.get("test_portgroup_bandwidth", "no") test_qos_remove = "yes" == params.get("test_qos_remove", "no") test_ipv4_address = "yes" == params.get("test_ipv4_address", "no") test_ipv6_address = "yes" == params.get("test_ipv6_address", "no") test_guest_libvirt = "yes" == params.get("test_guest_libvirt", "no") net_no_bridge = "yes" == params.get("no_bridge", "no") net_no_mac = "yes" == params.get("no_mac", "no") net_no_ip = "yes" == params.get("no_ip", "no") net_with_dev = "yes" == params.get("with_dev", "no") username = params.get("username") password = params.get("password") ipt_rules = [] # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vms_list = [] if "floor" in ast.literal_eval(iface_bandwidth_inbound): if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = to_text(process.system_output(sysctl_cmd + ' -n')) if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + '=2') # Build the xml and run test. try: if test_dnsmasq: # Check the settings before modifying network xml if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed", exists=False) run_dnsmasq_default_test("local", "//", exists=False) if net_domain: run_dnsmasq_default_test("domain", net_domain, exists=False) run_dnsmasq_default_test("expand-hosts", exists=False) # Prepare pxe boot directory if pxe_boot: prepare_pxe_boot() # Edit the network xml or create a new one. if create_network: net_ifs = utils_net.get_net_if(state="UP") # Check forward device is valid or not, # if it's not in host interface list, try to set # forward device to first active interface of host forward = ast.literal_eval(params.get("net_forward", "{}")) if ('mode' in forward and forward['mode'] in ['passthrough', 'private', 'bridge', 'macvtap'] and 'dev' in forward and forward['dev'] not in net_ifs): logging.warn("Forward device %s is not a interface" " of host, reset to %s", forward['dev'], net_ifs[0]) forward['dev'] = net_ifs[0] params["net_forward"] = str(forward) forward_iface = params.get("forward_iface") if forward_iface: interface = [x for x in forward_iface.split()] # The guest will use first interface of the list, # check if it's valid or not, if it's not in host # interface list, try to set forward interface to # first active interface of host. if interface[0] not in net_ifs: logging.warn("Forward interface %s is not a " " interface of host, reset to %s", interface[0], net_ifs[0]) interface[0] = net_ifs[0] params["forward_iface"] = " ".join(interface) netxml = libvirt.create_net_xml(net_name, params) if "mode" in forward and forward["mode"] == "open": netxml.mac = utils_net.generate_mac_address_simple() try: if net_no_bridge: netxml.del_bridge() if net_no_ip: netxml.del_ip() netxml.del_ip() if net_no_mac: netxml.del_mac() except xcepts.LibvirtXMLNotFoundError: pass if net_with_dev: net_forward = netxml.forward net_forward.update({"dev": net_ifs[0]}) netxml.forward = net_forward logging.info("netxml before define is %s", netxml) try: netxml.sync() except xcepts.LibvirtXMLError as details: logging.info(str(details)) if define_error: return else: test.fail("Failed to define network") # Check open mode network xml if "mode" in forward and forward["mode"] == "open": netxml_new = NetworkXML.new_from_net_dumpxml(net_name) logging.info("netxml after define is %s", netxml_new) try: if net_no_bridge: net_bridge = str(netxml_new.bridge) if net_no_mac: netxml_new.mac except xcepts.LibvirtXMLNotFoundError as details: test.fail("Failed to check %s xml: %s" % (net_name, details)) logging.info("mac/bridge still exist even if removed before define") # Edit the interface xml. if change_iface_option: modify_iface_xml() # Attach interface if needed if attach_iface: iface_type = params.get("iface_type", "network") iface_model = params.get("iface_model", "virtio") for i in range(int(iface_num)): logging.info("Try to attach interface loop %s" % i) options = ("%s %s --model %s --config" % (iface_type, net_name, iface_model)) ret = virsh.attach_interface(vm_name, options, ignore_status=True) if ret.exit_status: logging.error("Command output %s" % ret.stdout.strip()) test.fail("Failed to attach-interface") if multiple_guests: # Clone more vms for testing for i in range(int(multiple_guests)): guest_name = "%s_%s" % (vm_name, i) timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) vms_list.append(vm.clone(guest_name)) if test_bridge: bridge = ast.literal_eval(net_bridge) br_if = utils_net.Interface(bridge['name']) if not br_if.is_up(): test.fail("Bridge interface isn't up") if test_dnsmasq: # Check dnsmasq process dnsmasq_cmd = to_text(process.system_output("ps -aux|grep dnsmasq", shell=True)) logging.debug(dnsmasq_cmd) if not re.search("dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/%s.conf" % net_name, dnsmasq_cmd): test.fail("Can not find dnsmasq process or the process is not correct") # Check the settings in dnsmasq config file if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed") run_dnsmasq_default_test("local", "//") if net_domain: run_dnsmasq_default_test("domain", net_domain) run_dnsmasq_default_test("expand-hosts") if net_bridge: bridge = ast.literal_eval(net_bridge) run_dnsmasq_default_test("interface", bridge['name'], name=net_name) if 'stp' in bridge and bridge['stp'] == 'on': if 'delay' in bridge and bridge['delay'] != '0': br_delay = float(bridge['delay']) cmd = ("brctl showstp %s | grep 'bridge forward delay'" % bridge['name']) out = to_text(process.system_output( cmd, shell=True, ignore_status=False)) logging.debug("brctl showstp output: %s", out) pattern = (r"\s*forward delay\s+(\d+.\d+)\s+bridge" " forward delay\s+(\d+.\d+)") match_obj = re.search(pattern, out, re.M) if not match_obj or len(match_obj.groups()) != 2: test.fail("Can't see forward delay messages from command") elif (float(match_obj.groups()[0]) != br_delay or float(match_obj.groups()[1]) != br_delay): test.fail("Foward delay setting can't take effect") if dhcp_start_ipv4 and dhcp_end_ipv4: run_dnsmasq_default_test("dhcp-range", "%s,%s" % (dhcp_start_ipv4, dhcp_end_ipv4), name=net_name) if dhcp_start_ipv6 and dhcp_end_ipv6: run_dnsmasq_default_test("dhcp-range", "%s,%s,64" % (dhcp_start_ipv6, dhcp_end_ipv6), name=net_name) if guest_name and guest_ipv4: run_dnsmasq_host_test(iface_mac, guest_ipv4, guest_name) # check the left part in dnsmasq conf run_dnsmasq_default_test("strict-order", name=net_name) run_dnsmasq_default_test("pid-file", "/var/run/libvirt/network/%s.pid" % net_name, name=net_name) run_dnsmasq_default_test("except-interface", "lo", name=net_name) run_dnsmasq_default_test("bind-dynamic", name=net_name) run_dnsmasq_default_test("dhcp-no-override", name=net_name) if dhcp_start_ipv6 and dhcp_start_ipv4: run_dnsmasq_default_test("dhcp-lease-max", "493", name=net_name) else: range_num = int(params.get("dhcp_range", "252")) run_dnsmasq_default_test("dhcp-lease-max", str(range_num+1), name=net_name) run_dnsmasq_default_test("dhcp-hostsfile", "/var/lib/libvirt/dnsmasq/%s.hostsfile" % net_name, name=net_name) run_dnsmasq_default_test("addn-hosts", "/var/lib/libvirt/dnsmasq/%s.addnhosts" % net_name, name=net_name) if dhcp_start_ipv6: run_dnsmasq_default_test("enable-ra", name=net_name) if test_dns_host: if net_dns_txt: dns_txt = ast.literal_eval(net_dns_txt) run_dnsmasq_default_test("txt-record", "%s,%s" % (dns_txt["name"], dns_txt["value"])) if net_dns_srv: dns_srv = ast.literal_eval(net_dns_srv) run_dnsmasq_default_test("srv-host", "_%s._%s.%s,%s,%s,%s,%s" % (dns_srv["service"], dns_srv["protocol"], dns_srv["domain"], dns_srv["target"], dns_srv["port"], dns_srv["priority"], dns_srv["weight"])) if net_dns_hostip and net_dns_hostnames: run_dnsmasq_addnhosts_test(net_dns_hostip, net_dns_hostnames) # Run bandwidth test for network if test_qos_bandwidth: run_bandwidth_test(check_net=True) # Check routes if needed if routes: check_host_routes() try: # Start the VM. vm.start() if start_error: test.fail("VM started unexpectedly") if pxe_boot: # Just check network boot messages here vm.serial_console.read_until_output_matches( ["Loading vmlinuz", "Loading initrd.img"], utils_misc.strip_console_codes) output = vm.serial_console.get_stripped_output() logging.debug("Boot messages: %s", output) else: if serial_login: session = vm.wait_for_serial_login(username=username, password=password) else: session = vm.wait_for_login() if test_dhcp_range: dhcp_range = int(params.get("dhcp_range", "252")) utils_net.restart_guest_network(session, iface_mac) vm_ip = utils_net.get_guest_ip_addr(session, iface_mac) logging.debug("Guest has ip: %s", vm_ip) if not vm_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vm_ip and not dhcp_range: test.fail("Guest has ip address: %s" % vm_ip) dhcp_range = dhcp_range - 1 for vms in vms_list: # Start other VMs. vms.start() sess = vms.wait_for_serial_login() vms_mac = vms.get_virsh_mac_address() # restart guest network to get ip addr utils_net.restart_guest_network(sess, vms_mac) vms_ip = utils_net.get_guest_ip_addr(sess, vms_mac) if not vms_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vms_ip and not dhcp_range: # Get IP address on guest should return Null # if it exceeds the dhcp range test.fail("Guest has ip address: %s" % vms_ip) dhcp_range = dhcp_range - 1 if vms_ip: ping_s, _ = ping(dest=vm_ip, count=5, timeout=10, session=sess) if ping_s: test.fail("Failed to ping, src: %s, " "dst: %s" % (vms_ip, vm_ip)) sess.close() # Check dnsmasq settings if take affect in guest if guest_ipv4: check_name_ip(session) # Run bandwidth test for interface if test_qos_bandwidth: run_bandwidth_test(check_iface=True) # Run bandwidth test for portgroup if test_pg_bandwidth: pg_bandwidth_inbound = params.get( "portgroup_bandwidth_inbound", "").split() pg_bandwidth_outbound = params.get( "portgroup_bandwidth_outbound", "").split() pg_name = params.get("portgroup_name", "").split() pg_default = params.get("portgroup_default", "").split() iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) iface_name = libvirt.get_ifname_host(vm_name, iface_mac) if_source = ast.literal_eval(iface_source) if "portgroup" in if_source: pg = if_source["portgroup"] else: pg = "default" for (name, df, bw_ib, bw_ob) in zip(pg_name, pg_default, pg_bandwidth_inbound, pg_bandwidth_outbound): if pg == name: inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) elif pg == "default" and df == "yes": inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) else: continue # Interface bandwidth settings will # overwriting portgroup settings if iface_inbound: inbound = iface_inbound if iface_outbound: outbound = iface_outbound check_class_rules(iface_name, "1:1", inbound) check_filter_rules(iface_name, outbound) if test_qos_remove: # Remove the bandwidth settings in network xml logging.debug("Removing network bandwidth settings...") netxml_backup.sync() vm.destroy(gracefully=False) # Should fail to start vm vm.start() if restart_error: test.fail("VM started unexpectedly") if test_ipv6_address: ipt_rules = check_ipt_rules(check_ipv6=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv6") if test_ipv4_address: ipt_rules = check_ipt_rules(check_ipv4=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not (start_error or restart_error): test.fail('VM failed to start:\n%s' % details) # Destroy created network and check iptable rules if net_name != "default": virsh.net_destroy(net_name) if ipt_rules: output_des = to_text(process.system_output('iptables-save')) for ipt in ipt_rules: if re.search(r"%s" % ipt, output_des, re.M): test.fail("Find iptable rule %s after net destroyed" % ipt) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync() if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + "=%s" % original_accept_ra)
def run(test, params, env): """ Test command: virsh net-start. """ # Gather test parameters uri = libvirt_vm.normalize_connect_uri(params.get("connect_uri", "default")) status_error = "yes" == params.get("status_error", "no") inactive_default = "yes" == params.get("net_start_inactive_default", "yes") net_ref = params.get("net_start_net_ref", "netname") # default is tested extra = params.get("net_start_options_extra", "") # extra cmd-line params. route_test = "yes" == params.get("route_test", "no") firewalld_operate = params.get("firewalld_operate", None) # make easy to maintain virsh_dargs = {'uri': uri, 'debug': True, 'ignore_status': True} virsh_instance = virsh.VirshPersistent(**virsh_dargs) # libvirt acl polkit related params if not libvirt_version.version_compare(1, 1, 1): if params.get('setup_libvirt_polkit') == 'yes': test.cancel("API acl test not supported in current" " libvirt version.") virsh_uri = params.get("virsh_uri") if virsh_uri and not utils_split_daemons.is_modular_daemon(): virsh_uri = "qemu:///system" unprivileged_user = params.get('unprivileged_user') if unprivileged_user: if unprivileged_user.count('EXAMPLE'): unprivileged_user = '******' # Get all network instance origin_nets = network_xml.NetworkXML.new_all_networks_dict(virsh_instance) # Prepare default network for following test. try: default_netxml = origin_nets['default'] except KeyError: virsh_instance.close_session() test.cancel("Test requires default network to exist") try: # To confirm default network is active if not default_netxml.active: default_netxml.active = True # inactive default according test's need if inactive_default: logging.info("Stopped default network") default_netxml.active = False # State before run command origin_state = virsh_instance.net_state_dict() logging.debug("Origin network(s) state: %s", origin_state) if route_test: # define a network "def" with route address is "192.168.122.x" # 1. start def first then start default current_state = virsh_instance.net_state_dict() if 'def' in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) expect_fail = "yes" == params.get("expect_start_fail", "no") test_xml = network_xml.NetworkXML(network_name="def") test_xml.forward = {'mode': 'nat'} test_xml.routes = [{ 'address': '192.168.122.0', 'prefix': '24', 'gateway': '192.168.100.1' }] ipxml = IPXML(address='192.168.100.1', netmask='255.255.255.0') range_4 = network_xml.RangeXML() range_4.attrs = { 'start': '192.168.100.2', 'end': '192.168.100.254' } ipxml.dhcp_ranges = range_4 test_xml.ip = ipxml test_xml.define() virsh.net_start("def") # start default, should fail result = virsh.net_start("default") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) # 2. start default then start def virsh.net_destroy("def") virsh.net_start("default") current_state11 = virsh_instance.net_state_dict() logging.debug("before start 2nd network(s) state: %s", current_state11) # start def, should fail result = virsh.net_start("def") logging.debug(result) libvirt.check_exit_status(result, expect_error=expect_fail) current_state12 = virsh_instance.net_state_dict() logging.debug("after start 2nd network(s) state: %s", current_state12) # clear the env virsh.net_undefine("def") else: if net_ref == "netname": net_ref = default_netxml.name elif net_ref == "netuuid": net_ref = default_netxml.uuid if params.get('setup_libvirt_polkit') == 'yes': virsh_dargs = { 'uri': virsh_uri, 'unprivileged_user': unprivileged_user, 'debug': False, 'ignore_status': True } if params.get('net_start_readonly', 'no') == 'yes': virsh_dargs = { 'uri': uri, 'debug': True, 'readonly': True, 'ignore_status': True } # Run test case if 'unprivileged_user' in virsh_dargs and status_error: test_virsh = virsh.VirshPersistent( unprivileged_user=virsh_dargs['unprivileged_user']) virsh_dargs.pop('unprivileged_user') result = test_virsh.net_start(net_ref, extra, **virsh_dargs) test_virsh.close_session() elif not route_test: result = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug(result) status = result.exit_status # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) if 'default' not in current_state: test.fail('Network "default" cannot be found') if firewalld_operate: # current network is active, ensure firewalld is active # if not, restart firewalld, then restart libvirtd firewalld_service = service.Factory.create_service("firewalld") libvirtd_obj = utils_libvirtd.Libvirtd() if not firewalld_service.status(): firewalld_service.start() libvirtd_obj.restart() virsh_instance = virsh.VirshPersistent(**virsh_dargs) if firewalld_operate == "restart": # after firewalld restart, destroy and start the network firewalld_service.restart() time.sleep(5) res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) # need to add wait time. As libvirt doesn't know that firewalld has restarted until it gets the # dbus message, but that message won't arrive until some time after all of libvirt's chains/rules # have already been removed by the firewalld restart. refer to bug 1942805 time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) elif firewalld_operate == "stop_start": # start network which has been destroyed before firewalld restart res1 = virsh.net_destroy(net_ref, extra, **virsh_dargs) firewalld_service.stop() firewalld_service.start() time.sleep(5) res2 = virsh.net_start(net_ref, extra, **virsh_dargs) logging.debug( "firewalld_operate is %s, result for start network after firewalld restart: %s", firewalld_operate, res2) status1 = res1.exit_status | res2.exit_status if status1: test.fail( "Start or destroy network after firewalld restart fail!") # Check status_error if status_error: if not status: test.fail("Run successfully with wrong command!") else: if status: test.fail("Run failed with right command") # Get current net_stat_dict current_state = virsh_instance.net_state_dict() logging.debug("Current network(s) state: %s", current_state) is_default_active = current_state['default']['active'] if not is_default_active: test.fail( "Execute cmd successfully but default is inactive actually." ) finally: virsh_instance.close_session() current_state = virsh_instance.net_state_dict() if "def" in current_state: virsh.net_destroy("def", ignore_status=True) virsh.net_undefine("def", ignore_status=True) virsh.net_start('default', debug=True, ignore_status=True)
run_ip_test(session, "ipv6") if test_ipv4_address: check_ipt_rules(check_ipv4=True) run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError, details: logging.info(str(details)) if start_error or restart_error: pass else: raise error.TestFail('VM Failed to start for some reason!') finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync()
def run(test, params, env): """ Test mtu feature from virtual network """ vm_name = params.get('main_vm') vm = env.get_vm(vm_name) mtu_type = params.get('mtu_type') mtu_size = params.get('mtu_size', '') net = params.get('net', DEFAULT_NET) net_type = params.get('net_type', '') with_iface = 'yes' == params.get('with_iface', 'no') with_net = 'yes' == params.get('with_net', 'no') status_error = 'yes' == params.get('status_error', 'no') check = params.get('check', '') error_msg = params.get('error_msg', '') bridge_name = 'br_mtu' + utils_misc.generate_random_string(3) add_pkg = params.get('add_pkg', '') model = params.get('model', 'virtio') def set_network(size, net='default'): """ Set mtu size to a certain network """ logging.info('Set mtu size of network "%s" to %s', net, size) default_xml = NetworkXML.new_from_net_dumpxml(net) default_xml.mtu = size default_xml.sync() logging.debug(virsh.net_dumpxml(net)) def set_interface(mtu_size='', source_network='default', iface_type='network', iface_model='virtio'): """ Set mtu size to a certain interface """ interface_type = 'bridge' if iface_type in ( 'bridge', 'openvswitch') else iface_type iface_dict = { 'type': interface_type, 'source': "{'%s': '%s'}" % (interface_type, source_network), 'model': iface_model } if iface_type == 'openvswitch': iface_dict.update({'virtualport_type': 'openvswitch'}) if mtu_size: iface_dict.update({'mtu': "{'size': %s}" % mtu_size}) libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict) logging.debug(virsh.dumpxml(vm_name).stdout) def get_default_if(): """ Get default interface that is using by vm """ ifaces = utils_net.get_sorted_net_if() logging.debug('Interfaces on host: %s', ifaces) for iface in ifaces[0]: if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text: logging.debug('Found host interface "%s"', iface) return iface def create_bridge(): """ Create a bridge on host for test """ cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s' con_name = 'con_' + utils_misc.generate_random_string(3) bridge_name = 'br_' + utils_misc.generate_random_string(3) process.run(cmd_create_br % (con_name, bridge_name), verbose=True) return con_name, bridge_name def create_network_xml(name, network_type, base_if='', **kwargs): """ Create a network xml to be defined """ m_net = NetworkXML(name) m_net.forward = {'mode': 'bridge'} if network_type in ('bridge', 'openvswitch'): m_net.bridge = {'name': kwargs['bridge_name']} elif network_type == 'macvtap': if base_if: m_net.forward_interface = [{'dev': base_if}] if network_type == 'openvswitch': m_net.virtualport_type = 'openvswitch' if 'mtu' in kwargs: m_net.mtu = kwargs['mtu'] logging.debug(m_net) return m_net.xml def create_iface(iface_type, **kwargs): """ Create a interface to be attached to vm """ m_iface = Interface(iface_type) m_iface.mac_address = utils_net.generate_mac_address_simple() if 'base_if' in kwargs: m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'} if 'source_net' in kwargs: m_iface.source = {'network': kwargs['source_net']} if 'mtu' in kwargs: m_iface.mtu = {'size': kwargs['mtu']} if 'model_net' in kwargs: m_iface.model = kwargs['model_net'] logging.debug(m_iface.get_xml()) logging.debug(m_iface) return m_iface def check_mtu(mtu_size, qemu=False): """ Check if mtu meets expectation on host """ error = '' live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) iface_xml = live_vmxml.get_devices('interface')[0] logging.debug(iface_xml.target) dev = iface_xml.target['dev'] ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev, shell=True, verbose=True).stdout_text if 'mtu %s' % mtu_size in ifconfig_info: logging.info('PASS on ifconfig check for vnet.') else: error += 'Fail on ifconfig check for vnet.' if qemu: qemu_mtu_info = process.run('ps aux|grep qemu-kvm', shell=True, verbose=True).stdout_text if 'host_mtu=%s' % mtu_size in qemu_mtu_info: logging.info('PASS on qemu cmd line check.') else: error += 'Fail on qemu cmd line check.' if error: test.fail(error) def check_mtu_in_vm(fn_login, mtu_size): """ Check if mtu meets expectations in vm """ session = fn_login() check_cmd = 'ifconfig' output = session.cmd(check_cmd) session.close() logging.debug(output) if 'mtu %s' % mtu_size not in output: test.fail('MTU check inside vm failed.') else: logging.debug("MTU check inside vm passed.") try: bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET) if add_pkg: add_pkg = add_pkg.split() if 'openvswitch' in add_pkg and shutil.which('ovs-vsctl'): new_pkg = add_pkg.copy() new_pkg.remove('openvswitch') utils_package.package_install(new_pkg) if 'openvswitch' in add_pkg: br = 'ovsbr0' + utils_misc.generate_random_string(3) process.run('systemctl start openvswitch.service', shell=True, verbose=True) process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True) process.run('ovs-vsctl show', shell=True, verbose=True) if not check or check in ['save', 'managedsave', 'hotplug_save']: # Create bridge or network and set mtu iface_type = 'network' if net_type in ('bridge', 'openvswitch'): if net_type == 'bridge': params['con_name'], br = create_bridge() if mtu_type == 'network': test_net = create_network_xml(bridge_name, net_type, bridge_name=br) virsh.net_create(test_net, debug=True) virsh.net_dumpxml(bridge_name, debug=True) if mtu_type == 'interface': iface_type = net_type bridge_name = br elif net_type == 'network': if mtu_type == 'network': set_network(mtu_size) iface_mtu = 0 if mtu_type == 'interface': iface_mtu = mtu_size if mtu_type == 'network' and with_iface: mtu_size = str(int(mtu_size) // 2) iface_mtu = mtu_size source_net = bridge_name if net_type in ( 'bridge', 'openvswitch') else 'default' # set mtu in vm interface set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model) vm.start() vm_login = vm.wait_for_serial_login if net_type in ( 'bridge', 'openvswitch') else vm.wait_for_login vm_login().close() check_qemu = True if mtu_type == 'interface' else False # Test mtu after save vm if check in ('save', 'hotplug_save'): if check == 'hotplug_save': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) params['mac'] = iface.mac_address virsh.attach_device(vm_name, iface.xml, debug=True) virsh.dumpxml(vm_name, debug=True) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] not in str(dom_xml): test.fail('Failed to attach interface with mtu') save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save') virsh.save(vm_name, save_path, debug=True) virsh.restore(save_path, debug=True) if check == 'managedsave': virsh.managedsave(vm_name, debug=True) virsh.start(vm_name, debug=True) # Check in both host and vm check_mtu(mtu_size, check_qemu) if mtu_type == 'interface' or with_iface: check_mtu_in_vm(vm_login, mtu_size) vm_login(timeout=60).close() if check == 'hotplug_save': virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True) time.sleep(5) dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name) if params['mac'] in str(dom_xml): test.fail( 'Failed to detach interface with mtu after save-restore' ) else: hotplug = 'yes' == params.get('hotplug', 'False') if check == 'net_update': result = virsh.net_update(DEFAULT_NET, 'modify', 'mtu', '''"<mtu size='%s'/>"''' % mtu_size, debug=True) if check in ('macvtap', 'bridge_net', 'ovswitch_net'): base_if = get_default_if() macv_name = 'direct-macvtap' + utils_misc.generate_random_string( 3) # Test mtu in different type of network if mtu_type == 'network': if check == 'macvtap': test_net = create_network_xml(macv_name, 'macvtap', base_if, mtu=mtu_size) if check == 'bridge_net': params['con_name'], br = create_bridge() test_net = create_network_xml(bridge_name, 'bridge', mtu=mtu_size, bridge_name=br) if check == 'ovswitch_net': test_net = create_network_xml(bridge_name, 'openvswitch', mtu=mtu_size, bridge_name=br) if 'net_create' in params['id']: result = virsh.net_create(test_net, debug=True) if 'net_define' in params['id']: result = virsh.net_define(test_net, debug=True) # Test mtu with or without a binding network elif mtu_type == 'interface': vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) if with_net: test_net = create_network_xml(macv_name, 'macvtap', base_if) virsh.net_create(test_net, debug=True) iface = create_iface('network', source_net=macv_name, mtu=mtu_size, model_net=model) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) vmxml.sync() result = virsh.start(vm_name) else: iface = create_iface('direct', base_if=base_if, mtu=mtu_size, model_net=model) if hotplug: result = virsh.attach_device(vm_name, iface.xml, debug=True) else: vmxml.add_device(iface) result = virsh.define(vmxml.xml, debug=True) if check == 'invalid_val': iface = create_iface('network', source_net='default', mtu=mtu_size, model_net=model) result = virsh.attach_device(vm_name, iface.xml, debug=True) # Check result libvirt.check_exit_status(result, status_error) libvirt.check_result(result, [error_msg]) finally: bk_xml.sync() bk_netxml.sync() if 'test_net' in locals(): virsh.net_destroy(bridge_name, debug=True) if params.get('con_name'): process.run('nmcli con del %s' % params['con_name'], verbose=True) if add_pkg: process.run("ovs-vsctl del-br %s" % br, verbose=True) utils_package.package_remove(add_pkg)
def run(test, params, env): """ Test openvswitch support for network. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def modify_iface_xml(): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] iface_type = params.get("iface_type") if iface_type: iface.type_name = iface_type source = eval(iface_source) if source: del iface.source iface.source = source iface_model = params.get("iface_model", "virtio") iface.model = iface_model iface_virtualport = params.get("iface_virtualport") if iface_virtualport: iface.virtualport_type = iface_virtualport logging.debug("New interface xml file: %s", iface) vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def check_ovs_port(ifname, brname): """ Check OVS port that created by libvirt """ pg_name = params.get("porgroup_name", "").split() pg_vlan = params.get("portgroup_vlan", "").split() if_source = eval(iface_source) port_vlan = {} if if_source.has_key("portgroup"): pg = if_source["portgroup"] for (name, vlan) in zip(pg_name, pg_vlan): if pg == name: port_vlan = eval(vlan) # Check bridge name by port name _, bridge = utils_net.find_current_bridge(ifname) assert bridge == brname # Get port info from ovs-vsctl output cmd = "ovs-vsctl list port %s" % ifname output = utils.run(cmd).stdout logging.debug("ovs port output: %s", output) for line in output.splitlines(): if line.count("tag"): tag_info = line.rsplit(':') if (port_vlan.has_key("id") and tag_info[0] == "tag"): assert port_vlan["id"] == tag_info[1] elif line.count("vlan_mode"): mode_info = line.rsplit(':') if (port_vlan.has_key("nativeMode") and mode_info[0] == "vlan_mode"): assert (port_vlan["nativeMode"] == "native-%s" % mode_info[1]) start_error = "yes" == params.get("start_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") iface_source = params.get("iface_source", "{}") create_network = "yes" == params.get("create_network", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_ovs_port = "yes" == params.get("test_ovs_port", "no") # Destroy the guest first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) bridge_name = eval(net_bridge)['name'] # Build the xml and run test. try: # Edit the network xml or create a new one. if create_network: # Try to add ovs bridge first if not utils_net.ovs_br_exists(bridge_name): utils_net.add_ovs_bridge(bridge_name) netxml = libvirt.create_net_xml(net_name, params) netxml.sync() # Edit the interface xml. if change_iface_option: # Try to add bridge if needed source = eval(iface_source) if source: if source.has_key("bridge"): if not utils_net.ovs_br_exists(source["bridge"]): utils_net.add_ovs_bridge(source["bridge"]) modify_iface_xml() try: # Start the VM. vm.start() if start_error: raise error.TestFail("VM started unexpectedly") iface_name = libvirt.get_ifname_host(vm_name, iface_mac) if test_ovs_port: check_ovs_port(iface_name, bridge_name) except virt_vm.VMStartError, details: logging.info(str(details)) if start_error: pass else: raise error.TestFail('VM Failed to start for some reason!') finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) # Try to recovery ovs bridge if utils_net.ovs_br_exists(bridge_name): utils_net.del_ovs_bridge(bridge_name) vmxml_backup.sync()
def run(test, params, env): """ Test interafce xml options. 1.Prepare test environment,destroy or suspend a VM. 2.Edit xml and start the domain. 3.Perform test operation. 4.Recover test environment. 5.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) def prepare_pxe_boot(): """ Prepare tftp server and pxe boot files """ pkg_list = ["syslinux", "tftp-server", "tftp", "ipxe-roms-qemu", "wget"] # Try to install required packages if not utils_package.package_install(pkg_list): test.error("Failed ot install required packages") boot_initrd = params.get("boot_initrd", "EXAMPLE_INITRD") boot_vmlinuz = params.get("boot_vmlinuz", "EXAMPLE_VMLINUZ") if boot_initrd.count("EXAMPLE") or boot_vmlinuz.count("EXAMPLE"): test.cancel("Please provide initrd/vmlinuz URL") # Download pxe boot images process.system("wget %s -O %s/initrd.img" % (boot_initrd, tftp_root)) process.system("wget %s -O %s/vmlinuz" % (boot_vmlinuz, tftp_root)) process.system("cp -f /usr/share/syslinux/pxelinux.0 {0};" " mkdir -m 777 -p {0}/pxelinux.cfg".format(tftp_root), shell=True) pxe_file = "%s/pxelinux.cfg/default" % tftp_root boot_txt = """ DISPLAY boot.txt DEFAULT rhel LABEL rhel kernel vmlinuz append initrd=initrd.img PROMPT 1 TIMEOUT 3""" with open(pxe_file, 'w') as p_file: p_file.write(boot_txt) def modify_iface_xml(): """ Modify interface xml options """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) if pxe_boot: # Config boot console for pxe boot osxml = vm_xml.VMOSXML() osxml.type = vmxml.os.type osxml.arch = vmxml.os.arch osxml.machine = vmxml.os.machine osxml.loader = "/usr/share/seabios/bios.bin" osxml.bios_useserial = "yes" osxml.bios_reboot_timeout = "-1" osxml.boots = ['network'] del vmxml.os vmxml.os = osxml xml_devices = vmxml.devices iface_index = xml_devices.index( xml_devices.by_device_tag("interface")[0]) iface = xml_devices[iface_index] iface_bandwidth = {} iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) if iface_inbound: iface_bandwidth["inbound"] = iface_inbound if iface_outbound: iface_bandwidth["outbound"] = iface_outbound if iface_bandwidth: bandwidth = iface.new_bandwidth(**iface_bandwidth) iface.bandwidth = bandwidth iface_type = params.get("iface_type", "network") iface.type_name = iface_type source = ast.literal_eval(iface_source) if not source: source = {"network": "default"} net_ifs = utils_net.get_net_if(state="UP") # Check source device is valid or not, # if it's not in host interface list, try to set # source device to first active interface of host if (iface.type_name == "direct" and 'dev' in source and source['dev'] not in net_ifs): logging.warn("Source device %s is not a interface" " of host, reset to %s", source['dev'], net_ifs[0]) source['dev'] = net_ifs[0] del iface.source iface.source = source if iface_model: iface.model = iface_model if iface_rom: iface.rom = eval(iface_rom) if iface_boot: vmxml.remove_all_boots() iface.boot = iface_boot logging.debug("New interface xml file: %s", iface) vmxml.devices = xml_devices vmxml.xmltreefile.write() vmxml.sync() def run_dnsmasq_default_test(key, value=None, exists=True, name="default"): """ Test dnsmasq configuration. :param key: key in conf file to check :param value: value in conf file to check :param exists: check the key:value exist or not :param name: The name of conf file """ conf_file = "/var/lib/libvirt/dnsmasq/%s.conf" % name if not os.path.exists(conf_file): test.cancel("Can't find %s.conf file" % name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if value: config = "%s=%s" % (key, value) else: config = key if not configs.count(config): if exists: test.fail("Can't find %s=%s in configuration file" % (key, value)) else: if not exists: test.fail("Found %s=%s in configuration file" % (key, value)) def run_dnsmasq_addnhosts_test(hostip, hostnames): """ Test host ip and names configuration """ conf_file = "/var/lib/libvirt/dnsmasq/default.addnhosts" hosts_re = ".*".join(hostnames) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not re.search(r"%s.*%s" % (hostip, hosts_re), configs, re.M): test.fail("Can't find '%s' in configuration file" % hostip) def run_dnsmasq_host_test(iface_mac, guest_ip, guest_name): """ Test host name and ip configuration for dnsmasq """ conf_file = "/var/lib/libvirt/dnsmasq/default.hostsfile" config = "%s,%s,%s" % (iface_mac, guest_ip, guest_name) configs = "" with open(conf_file, 'r') as f: configs = f.read() logging.debug("configs in file %s: %s", conf_file, configs) if not configs.count(config): test.fail("Can't find host configuration in file %s" % conf_file) def check_class_rules(ifname, rule_id, bandwidth): """ Check bandwidth settings via 'tc class' output """ cmd = "tc class show dev %s" % ifname class_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth class output: %s", class_output) class_pattern = (r"class htb %s.*rate (\d+)(K?M?)bit ceil" " (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" % rule_id) se = re.search(class_pattern, class_output, re.M) if not se: test.fail("Can't find outbound setting for htb %s" % rule_id) logging.debug("bandwidth from tc output:%s" % str(se.groups())) rate = None if "floor" in bandwidth: rate = int(bandwidth["floor"]) * 8 elif "average" in bandwidth: rate = int(bandwidth["average"]) * 8 if rate: if se.group(2) == 'M': rate_check = int(se.group(1)) * 1000 else: rate_check = int(se.group(1)) assert rate_check == rate if "peak" in bandwidth: if se.group(4) == 'M': ceil_check = int(se.group(3)) * 1000 else: ceil_check = int(se.group(3)) assert ceil_check == int(bandwidth["peak"]) * 8 if "burst" in bandwidth: if se.group(6) == 'M': tc_burst = int(se.group(5)) * 1024 else: tc_burst = int(se.group(5)) assert tc_burst == int(bandwidth["burst"]) def check_filter_rules(ifname, bandwidth): """ Check bandwidth settings via 'tc filter' output """ cmd = "tc -d filter show dev %s parent ffff:" % ifname filter_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth filter output: %s", filter_output) if not filter_output.count("filter protocol all pref"): test.fail("Can't find 'protocol all' settings in filter rules") filter_pattern = ".*police.*rate (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" se = re.search(r"%s" % filter_pattern, filter_output, re.M) if not se: test.fail("Can't find any filter policy") logging.debug("bandwidth from tc output:%s" % str(se.groups())) logging.debug("bandwidth from setting:%s" % str(bandwidth)) if "average" in bandwidth: if se.group(2) == 'M': tc_average = int(se.group(1)) * 1000 else: tc_average = int(se.group(1)) assert tc_average == int(bandwidth["average"]) * 8 if "burst" in bandwidth: if se.group(4) == 'M': tc_burst = int(se.group(3)) * 1024 else: tc_burst = int(se.group(3)) assert tc_burst == int(bandwidth["burst"]) def check_host_routes(): """ Check network routes on host """ for rt in routes: try: route = ast.literal_eval(rt) addr = "%s/%s" % (route["address"], route["prefix"]) cmd = "ip route list %s" % addr if "family" in route and route["family"] == "ipv6": cmd = "ip -6 route list %s" % addr output = to_text(process.system_output(cmd)) match_obj = re.search(r"via (\S+).*metric (\d+)", output) if match_obj: via_addr = match_obj.group(1) metric = match_obj.group(2) logging.debug("via address %s for %s, matric is %s" % (via_addr, addr, metric)) assert via_addr == route["gateway"] if "metric" in route: assert metric == route["metric"] except KeyError: pass def run_bandwidth_test(check_net=False, check_iface=False): """ Test bandwidth option for network or interface by tc command. """ iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) net_inbound = ast.literal_eval(net_bandwidth_inbound) net_outbound = ast.literal_eval(net_bandwidth_outbound) net_bridge_name = ast.literal_eval(net_bridge)["name"] iface_name = libvirt.get_ifname_host(vm_name, iface_mac) try: if check_net and net_inbound: # Check qdisc rules cmd = "tc -d qdisc show dev %s" % net_bridge_name qdisc_output = to_text(process.system_output(cmd)) logging.debug("Bandwidth qdisc output: %s", qdisc_output) if not qdisc_output.count("qdisc ingress ffff:"): test.fail("Can't find ingress setting") check_class_rules(net_bridge_name, "1:1", {"average": net_inbound["average"], "peak": net_inbound["peak"]}) check_class_rules(net_bridge_name, "1:2", net_inbound) # Check filter rules on bridge interface if check_net and net_outbound: check_filter_rules(net_bridge_name, net_outbound) # Check class rules on interface inbound settings if check_iface and iface_inbound: check_class_rules(iface_name, "1:1", {'average': iface_inbound['average'], 'peak': iface_inbound['peak'], 'burst': iface_inbound['burst']}) if "floor" in iface_inbound: if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") check_class_rules(net_bridge_name, "1:3", {'floor': iface_inbound["floor"]}) # Check filter rules on interface outbound settings if check_iface and iface_outbound: check_filter_rules(iface_name, iface_outbound) except AssertionError: stacktrace.log_exc_info(sys.exc_info()) test.fail("Failed to check network bandwidth") def check_name_ip(session): """ Check dns resolving on guest """ # Check if bind-utils is installed if "ubuntu" in vm.get_distro().lower(): pkg = "bind9" else: pkg = "bind-utils" if not utils_package.package_install(pkg, session): test.error("Failed to install bind-utils on guest") # Run host command to check if hostname can be resolved if not guest_ipv4 and not guest_ipv6: test.fail("No ip address found from parameters") guest_ip = guest_ipv4 if guest_ipv4 else guest_ipv6 cmd = "host %s | grep %s" % (guest_name, guest_ip) if session.cmd_status(cmd): test.fail("Can't resolve name %s on guest" % guest_name) def check_ipt_rules(check_ipv4=True, check_ipv6=False): """ Check iptables for network/interface """ br_name = ast.literal_eval(net_bridge)["name"] net_forward = ast.literal_eval(params.get("net_forward", "{}")) net_ipv4 = params.get("net_ipv4") net_ipv6 = params.get("net_ipv6") net_dev_in = "" net_dev_out = "" if "dev" in net_forward: net_dev_in = " -i %s" % net_forward["dev"] net_dev_out = " -o %s" % net_forward["dev"] ipt_rules = ( "INPUT -i %s -p udp -m udp --dport 53 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 53 -j ACCEPT" % br_name, "FORWARD -i {0} -o {0} -j ACCEPT".format(br_name), "FORWARD -o %s -j REJECT --reject-with icmp" % br_name, "FORWARD -i %s -j REJECT --reject-with icmp" % br_name) if check_ipv4: ipv4_rules = list(ipt_rules) ipv4_rules.extend( ["INPUT -i %s -p udp -m udp --dport 67 -j ACCEPT" % br_name, "INPUT -i %s -p tcp -m tcp --dport 67 -j ACCEPT" % br_name, "OUTPUT -o %s -p udp -m udp --dport 68 -j ACCEPT" % br_name, "POSTROUTING -o %s -p udp -m udp --dport 68 " "-j CHECKSUM --checksum-fill" % br_name]) ctr_rule = "" nat_rules = [] if "mode" in net_forward and net_forward["mode"] == "nat": nat_port = ast.literal_eval(params.get("nat_port")) p_start = nat_port["start"] p_end = nat_port["end"] ctr_rule = " -m .* RELATED,ESTABLISHED" nat_rules = [("POSTROUTING -s {0} ! -d {0} -p tcp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0} -p udp -j MASQUERADE" " --to-ports {1}-{2}".format(net_ipv4, p_start, p_end)), ("POSTROUTING -s {0} ! -d {0}" " -j MASQUERADE".format(net_ipv4))] if nat_rules: ipv4_rules.extend(nat_rules) if (net_ipv4 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s%s -j ACCEPT" % (net_ipv4, net_dev_in, br_name, ctr_rule)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv4, br_name, net_dev_out))] ipv4_rules.extend(rules) output = to_text(process.system_output('iptables-save')) logging.debug("iptables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv4, br_name), output, re.M): test.fail("Find iptable rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('iptables-save')) if re.search(r"%s|%s" % (net_ipv4, br_name), output_again, re.M): test.fail("Find iptable rule for open mode after restart " "libvirtd") else: logging.info("Can't find iptable rule for open mode as expected") else: for ipt in ipv4_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find iptable rule:\n%s" % ipt) return ipv4_rules if check_ipv6: ipv6_rules = list(ipt_rules) ipt6_rules.extend([ ("INPUT -i %s -p udp -m udp --dport 547 -j ACCEPT" % br_name)]) if (net_ipv6 and "mode" in net_forward and net_forward["mode"] in ["nat", "route"]): rules = [("FORWARD -d %s%s -o %s -j ACCEPT" % (net_ipv6, net_dev_in, br_name)), ("FORWARD -s %s -i %s%s -j ACCEPT" % (net_ipv6, br_name, net_dev_out))] ipv6_rules.extend(rules) output = to_text(process.system_output("ip6tables-save")) logging.debug("ip6tables: %s", output) if "mode" in net_forward and net_forward["mode"] == "open": if re.search(r"%s|%s" % (net_ipv6, br_name), output, re.M): test.fail("Find ip6table rule for open mode") utils_libvirtd.libvirtd_restart() output_again = to_text(process.system_output('ip6tables-save')) if re.search(r"%s|%s" % (net_ipv6, br_name), output_again, re.M): test.fail("Find ip6table rule for open mode after restart " "libvirtd") else: for ipt in ipv6_rules: if not re.search(r"%s" % ipt, output, re.M): test.fail("Can't find ip6table rule:\n%s" % ipt) return ipv6_rules def run_ip_test(session, ip_ver): """ Check iptables on host and ipv6 address on guest """ if ip_ver == "ipv6": # Clean up iptables rules for guest to get ipv6 address session.cmd_status("ip6tables -F") # It may take some time to get the ip address def get_ip_func(): return utils_net.get_guest_ip_addr(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) if not get_ip_func(): utils_net.restart_guest_network(session, iface_mac, ip_version=ip_ver) utils_misc.wait_for(get_ip_func, 5) vm_ip = get_ip_func() logging.debug("Guest has ip: %s", vm_ip) if not vm_ip: test.fail("Can't find ip address on guest") ip_gateway = net_ip_address if ip_ver == "ipv6": ip_gateway = net_ipv6_address # Cleanup ip6talbes on host for ping6 test process.system("ip6tables -F") if ip_gateway and not routes: ping_s, _ = ping(dest=ip_gateway, count=5, timeout=10, session=session) if ping_s: test.fail("Failed to ping gateway address: %s" % ip_gateway) def run_guest_libvirt(session): """ Check guest libvirt network """ # Try to install required packages if "ubuntu" in vm.get_distro().lower(): pkg = "libvirt-bin" else: pkg = "libvirt" if not utils_package.package_install(pkg, session): test.error("Failed to install libvirt package on guest") # Try to load tun module first session.cmd("lsmod | grep tun || modprobe tun") # Check network state on guest cmd = ("service libvirtd restart; virsh net-info default" " | grep 'Active:.*yes'") if session.cmd_status(cmd): test.fail("'default' network isn't in active state") # Try to destroy&start default network on guest for opt in ['net-destroy', 'net-start']: cmd = "virsh %s default" % opt status, output = session.cmd_status_output(cmd) logging.debug("Run %s on guest exit %s, output %s" % (cmd, status, output)) if status: test.fail(output) if not utils_package.package_remove("libvirt*", session): test.error("Failed to remove libvirt packages on guest") start_error = "yes" == params.get("start_error", "no") define_error = "yes" == params.get("define_error", "no") restart_error = "yes" == params.get("restart_error", "no") # network specific attributes. net_name = params.get("net_name", "default") net_bridge = params.get("net_bridge", "{'name':'virbr0'}") net_domain = params.get("net_domain") net_ip_address = params.get("net_ip_address") net_ipv6_address = params.get("net_ipv6_address") net_dns_forward = params.get("net_dns_forward") net_dns_txt = params.get("net_dns_txt") net_dns_srv = params.get("net_dns_srv") net_dns_hostip = params.get("net_dns_hostip") net_dns_hostnames = params.get("net_dns_hostnames", "").split() dhcp_start_ipv4 = params.get("dhcp_start_ipv4") dhcp_end_ipv4 = params.get("dhcp_end_ipv4") dhcp_start_ipv6 = params.get("dhcp_start_ipv6") dhcp_end_ipv6 = params.get("dhcp_end_ipv6") guest_name = params.get("guest_name") guest_ipv4 = params.get("guest_ipv4") guest_ipv6 = params.get("guest_ipv6") tftp_root = params.get("tftp_root") pxe_boot = "yes" == params.get("pxe_boot", "no") routes = params.get("routes", "").split() net_bandwidth_inbound = params.get("net_bandwidth_inbound", "{}") net_bandwidth_outbound = params.get("net_bandwidth_outbound", "{}") iface_bandwidth_inbound = params.get("iface_bandwidth_inbound", "{}") iface_bandwidth_outbound = params.get("iface_bandwidth_outbound", "{}") iface_num = params.get("iface_num", "1") iface_source = params.get("iface_source", "{}") iface_rom = params.get("iface_rom") iface_boot = params.get("iface_boot") iface_model = params.get("iface_model") multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") attach_iface = "yes" == params.get("attach_iface", "no") serial_login = "******" == params.get("serial_login", "no") change_iface_option = "yes" == params.get("change_iface_option", "no") test_bridge = "yes" == params.get("test_bridge", "no") test_dnsmasq = "yes" == params.get("test_dnsmasq", "no") test_dhcp_range = "yes" == params.get("test_dhcp_range", "no") test_dns_host = "yes" == params.get("test_dns_host", "no") test_qos_bandwidth = "yes" == params.get("test_qos_bandwidth", "no") test_pg_bandwidth = "yes" == params.get("test_portgroup_bandwidth", "no") test_qos_remove = "yes" == params.get("test_qos_remove", "no") test_ipv4_address = "yes" == params.get("test_ipv4_address", "no") test_ipv6_address = "yes" == params.get("test_ipv6_address", "no") test_guest_libvirt = "yes" == params.get("test_guest_libvirt", "no") net_no_bridge = "yes" == params.get("no_bridge", "no") net_no_mac = "yes" == params.get("no_mac", "no") net_no_ip = "yes" == params.get("no_ip", "no") net_with_dev = "yes" == params.get("with_dev", "no") username = params.get("username") password = params.get("password") forward = ast.literal_eval(params.get("net_forward", "{}")) boot_failure = "yes" == params.get("boot_failure", "no") ipt_rules = [] ipt6_rules = [] # Destroy VM first if vm.is_alive(): vm.destroy(gracefully=False) # Back up xml file. netxml_backup = NetworkXML.new_from_net_dumpxml("default") iface_mac = vm_xml.VMXML.get_first_mac_by_name(vm_name) params["guest_mac"] = iface_mac vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) vms_list = [] if "floor" in ast.literal_eval(iface_bandwidth_inbound): if not libvirt_version.version_compare(1, 0, 1): test.cancel("Not supported Qos options 'floor'") # Enabling IPv6 forwarding with RA routes without accept_ra set to 2 # is likely to cause routes loss sysctl_cmd = 'sysctl net.ipv6.conf.all.accept_ra' original_accept_ra = to_text(process.system_output(sysctl_cmd + ' -n')) if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + '=2') # Build the xml and run test. try: if test_dnsmasq: # Check the settings before modifying network xml if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed", exists=False) run_dnsmasq_default_test("local", "//", exists=False) if net_domain: run_dnsmasq_default_test("domain", net_domain, exists=False) run_dnsmasq_default_test("expand-hosts", exists=False) # Prepare pxe boot directory if pxe_boot: prepare_pxe_boot() # Edit the network xml or create a new one. if create_network: net_ifs = utils_net.get_net_if(state="UP") # Check forward device is valid or not, # if it's not in host interface list, try to set # forward device to first active interface of host if ('mode' in forward and forward['mode'] in ['passthrough', 'private', 'bridge', 'macvtap'] and 'dev' in forward and forward['dev'] not in net_ifs): logging.warn("Forward device %s is not a interface" " of host, reset to %s", forward['dev'], net_ifs[0]) forward['dev'] = net_ifs[0] params["net_forward"] = str(forward) forward_iface = params.get("forward_iface") if forward_iface: interface = [x for x in forward_iface.split()] # The guest will use first interface of the list, # check if it's valid or not, if it's not in host # interface list, try to set forward interface to # first active interface of host. if interface[0] not in net_ifs: logging.warn("Forward interface %s is not a " " interface of host, reset to %s", interface[0], net_ifs[0]) interface[0] = net_ifs[0] params["forward_iface"] = " ".join(interface) netxml = libvirt.create_net_xml(net_name, params) if "mode" in forward and forward["mode"] == "open": netxml.mac = utils_net.generate_mac_address_simple() try: if net_no_bridge: netxml.del_bridge() if net_no_ip: netxml.del_ip() netxml.del_ip() if net_no_mac: netxml.del_mac() except xcepts.LibvirtXMLNotFoundError: pass if net_with_dev: net_forward = netxml.forward net_forward.update({"dev": net_ifs[0]}) netxml.forward = net_forward logging.info("netxml before define is %s", netxml) try: netxml.sync() except xcepts.LibvirtXMLError as details: logging.info(str(details)) if define_error: return else: test.fail("Failed to define network") # Check open mode network xml if "mode" in forward and forward["mode"] == "open": netxml_new = NetworkXML.new_from_net_dumpxml(net_name) logging.info("netxml after define is %s", netxml_new) try: if net_no_bridge: net_bridge = str(netxml_new.bridge) if net_no_mac: netxml_new.mac except xcepts.LibvirtXMLNotFoundError as details: test.fail("Failed to check %s xml: %s" % (net_name, details)) logging.info("mac/bridge still exist even if removed before define") # Edit the interface xml. if change_iface_option: try: modify_iface_xml() except xcepts.LibvirtXMLError as details: logging.info(str(details)) if define_error: if not str(details).count("Failed to define"): test.fail("VM sync failed msg not expected") return else: test.fail("Failed to sync VM") # Attach interface if needed if attach_iface: iface_type = params.get("iface_type", "network") for i in range(int(iface_num)): logging.info("Try to attach interface loop %s" % i) options = ("%s %s --model %s --config" % (iface_type, net_name, iface_model)) ret = virsh.attach_interface(vm_name, options, ignore_status=True) if ret.exit_status: logging.error("Command output %s" % ret.stdout.strip()) test.fail("Failed to attach-interface") if multiple_guests: # Clone more vms for testing for i in range(int(multiple_guests)): guest_name = "%s_%s" % (vm_name, i) timeout = params.get("clone_timeout", 360) utils_libguestfs.virt_clone_cmd(vm_name, guest_name, True, timeout=timeout) vms_list.append(vm.clone(guest_name)) if test_bridge: bridge = ast.literal_eval(net_bridge) br_if = utils_net.Interface(bridge['name']) if not br_if.is_up(): test.fail("Bridge interface isn't up") if test_dnsmasq: # Check dnsmasq process dnsmasq_cmd = to_text(process.system_output("ps -aux|grep dnsmasq", shell=True)) logging.debug(dnsmasq_cmd) if not re.search("dnsmasq --conf-file=/var/lib/libvirt/dnsmasq/%s.conf" % net_name, dnsmasq_cmd): test.fail("Can not find dnsmasq process or the process is not correct") # Check the settings in dnsmasq config file if net_dns_forward == "no": run_dnsmasq_default_test("domain-needed") run_dnsmasq_default_test("local", "//") if net_domain: run_dnsmasq_default_test("domain", net_domain) run_dnsmasq_default_test("expand-hosts") if net_bridge: bridge = ast.literal_eval(net_bridge) run_dnsmasq_default_test("interface", bridge['name'], name=net_name) if 'stp' in bridge and bridge['stp'] == 'on': if 'delay' in bridge and bridge['delay'] != '0': # network xml forward delay value in seconds, while on # host, check by ip command, the value is in second*100 br_delay = int(bridge['delay'])*100 logging.debug("Expect forward_delay is %s ms" % br_delay) cmd = ("ip -d link sh %s | grep 'bridge forward_delay'" % bridge['name']) out = to_text(process.system_output( cmd, shell=True, ignore_status=False)) logging.debug("bridge statistics output: %s", out) pattern = (r"\s*bridge forward_delay\s+(\d+)") match_obj = re.search(pattern, out, re.M) if not match_obj: test.fail("Can't see forward delay messages from command") elif int(match_obj.group(1)) != br_delay: test.fail("Foward delay setting can't take effect") else: logging.debug("Foward delay set successfully!") if dhcp_start_ipv4 and dhcp_end_ipv4: run_dnsmasq_default_test("dhcp-range", "%s,%s" % (dhcp_start_ipv4, dhcp_end_ipv4), name=net_name) if dhcp_start_ipv6 and dhcp_end_ipv6: run_dnsmasq_default_test("dhcp-range", "%s,%s,64" % (dhcp_start_ipv6, dhcp_end_ipv6), name=net_name) if guest_name and guest_ipv4: run_dnsmasq_host_test(iface_mac, guest_ipv4, guest_name) # check the left part in dnsmasq conf run_dnsmasq_default_test("strict-order", name=net_name) run_dnsmasq_default_test("pid-file", "/var/run/libvirt/network/%s.pid" % net_name, name=net_name) run_dnsmasq_default_test("except-interface", "lo", name=net_name) run_dnsmasq_default_test("bind-dynamic", name=net_name) run_dnsmasq_default_test("dhcp-no-override", name=net_name) if dhcp_start_ipv6 and dhcp_start_ipv4: run_dnsmasq_default_test("dhcp-lease-max", "493", name=net_name) else: range_num = int(params.get("dhcp_range", "252")) run_dnsmasq_default_test("dhcp-lease-max", str(range_num+1), name=net_name) run_dnsmasq_default_test("dhcp-hostsfile", "/var/lib/libvirt/dnsmasq/%s.hostsfile" % net_name, name=net_name) run_dnsmasq_default_test("addn-hosts", "/var/lib/libvirt/dnsmasq/%s.addnhosts" % net_name, name=net_name) if dhcp_start_ipv6: run_dnsmasq_default_test("enable-ra", name=net_name) if test_dns_host: if net_dns_txt: dns_txt = ast.literal_eval(net_dns_txt) run_dnsmasq_default_test("txt-record", "%s,%s" % (dns_txt["name"], dns_txt["value"])) if net_dns_srv: dns_srv = ast.literal_eval(net_dns_srv) run_dnsmasq_default_test("srv-host", "_%s._%s.%s,%s,%s,%s,%s" % (dns_srv["service"], dns_srv["protocol"], dns_srv["domain"], dns_srv["target"], dns_srv["port"], dns_srv["priority"], dns_srv["weight"])) if net_dns_hostip and net_dns_hostnames: run_dnsmasq_addnhosts_test(net_dns_hostip, net_dns_hostnames) # Run bandwidth test for network if test_qos_bandwidth: run_bandwidth_test(check_net=True) # Check routes if needed if routes: check_host_routes() try: # Start the VM. vm.start() if start_error: test.fail("VM started unexpectedly") if pxe_boot: # Just check network boot messages here try: vm.serial_console.read_until_output_matches( ["Loading vmlinuz", "Loading initrd.img"], utils_misc.strip_console_codes) output = vm.serial_console.get_stripped_output() logging.debug("Boot messages: %s", output) except ExpectTimeoutError as details: if boot_failure: logging.info("Fail to boot from pxe as expected") else: test.fail("Fail to boot from pxe") else: if serial_login: session = vm.wait_for_serial_login(username=username, password=password) else: session = vm.wait_for_login() if test_dhcp_range: dhcp_range = int(params.get("dhcp_range", "252")) utils_net.restart_guest_network(session, iface_mac) vm_ip = utils_net.get_guest_ip_addr(session, iface_mac) logging.debug("Guest has ip: %s", vm_ip) if not vm_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vm_ip and not dhcp_range: test.fail("Guest has ip address: %s" % vm_ip) dhcp_range = dhcp_range - 1 for vms in vms_list: # Start other VMs. vms.start() sess = vms.wait_for_serial_login() vms_mac = vms.get_virsh_mac_address() # restart guest network to get ip addr utils_net.restart_guest_network(sess, vms_mac) vms_ip = utils_net.get_guest_ip_addr(sess, vms_mac) if not vms_ip and dhcp_range: test.fail("Guest has invalid ip address") elif vms_ip and not dhcp_range: # Get IP address on guest should return Null # if it exceeds the dhcp range test.fail("Guest has ip address: %s" % vms_ip) dhcp_range = dhcp_range - 1 if vms_ip: ping_s, _ = ping(dest=vm_ip, count=5, timeout=10, session=sess) if ping_s: test.fail("Failed to ping, src: %s, " "dst: %s" % (vms_ip, vm_ip)) sess.close() # Check dnsmasq settings if take affect in guest if guest_ipv4: check_name_ip(session) # Run bandwidth test for interface if test_qos_bandwidth: run_bandwidth_test(check_iface=True) # Run bandwidth test for portgroup if test_pg_bandwidth: pg_bandwidth_inbound = params.get( "portgroup_bandwidth_inbound", "").split() pg_bandwidth_outbound = params.get( "portgroup_bandwidth_outbound", "").split() pg_name = params.get("portgroup_name", "").split() pg_default = params.get("portgroup_default", "").split() iface_inbound = ast.literal_eval(iface_bandwidth_inbound) iface_outbound = ast.literal_eval(iface_bandwidth_outbound) iface_name = libvirt.get_ifname_host(vm_name, iface_mac) if_source = ast.literal_eval(iface_source) if "portgroup" in if_source: pg = if_source["portgroup"] else: pg = "default" for (name, df, bw_ib, bw_ob) in zip(pg_name, pg_default, pg_bandwidth_inbound, pg_bandwidth_outbound): if pg == name: inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) elif pg == "default" and df == "yes": inbound = ast.literal_eval(bw_ib) outbound = ast.literal_eval(bw_ob) else: continue # Interface bandwidth settings will # overwriting portgroup settings if iface_inbound: inbound = iface_inbound if iface_outbound: outbound = iface_outbound check_class_rules(iface_name, "1:1", inbound) check_filter_rules(iface_name, outbound) if test_qos_remove: # Remove the bandwidth settings in network xml logging.debug("Removing network bandwidth settings...") netxml_backup.sync() vm.destroy(gracefully=False) # Should fail to start vm vm.start() if restart_error: test.fail("VM started unexpectedly") if test_ipv6_address: ipt6_rules = check_ipt_rules(check_ipv4=False, check_ipv6=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv6") if test_ipv4_address: ipt_rules = check_ipt_rules(check_ipv4=True) if not ("mode" in forward and forward["mode"] == "open"): run_ip_test(session, "ipv4") if test_guest_libvirt: run_guest_libvirt(session) session.close() except virt_vm.VMStartError as details: logging.info(str(details)) if not (start_error or restart_error): test.fail('VM failed to start:\n%s' % details) # Destroy created network and check iptable rules if net_name != "default": virsh.net_destroy(net_name) if ipt_rules: output_des = to_text(process.system_output('iptables-save')) for ipt in ipt_rules: if re.search(r"%s" % ipt, output_des, re.M): test.fail("Find iptable rule %s after net destroyed" % ipt) if ipt6_rules: output_des = to_text(process.system_output('ip6tables-save')) for ipt in ipt6_rules: if re.search(r"%s" % ipt, output_des, re.M): test.fail("Find ip6table rule %s after net destroyed" % ipt) finally: # Recover VM. if vm.is_alive(): vm.destroy(gracefully=False) for vms in vms_list: virsh.remove_domain(vms.name, "--remove-all-storage") logging.info("Restoring network...") if net_name == "default": netxml_backup.sync() else: # Destroy and undefine new created network virsh.net_destroy(net_name) virsh.net_undefine(net_name) vmxml_backup.sync() if test_ipv6_address and original_accept_ra != '2': process.system(sysctl_cmd + "=%s" % original_accept_ra)