Exemplo n.º 1
0
def do_low_level_test(virsh_dargs, test_xml, options_ref, extra):

    # Process command-line argument/option parameters
    if options_ref == "file_arg":
        alt_file = "--file %s" % test_xml.name  # returns filename
    elif options_ref == "no_file":
        alt_file = "''"  # empty string will be passed
    elif options_ref == "extra_file":
        alt_file = "%s %s" % (test_xml.name, test_xml.name)
    elif options_ref == "no_exist_file":
        alt_file = "foobar" + test_xml.name
    else:
        alt_file = test_xml.name

    logging.debug("Calling virsh net-create with alt_file '%s' and "
                  "extra '%s'", alt_file, extra)

    try:
        # ignore_status==False
        virsh.net_create(alt_file, extra, **virsh_dargs)
        return True
    except (error.CmdError), cmd_excpt:
        # CmdError catches failing virsh commands
        logging.debug("Exception-thrown: " + str(cmd_excpt))
        return False
Exemplo n.º 2
0
    def restore(self, name):
        """
        Restore networks from _net_.

        :param net: Target net to be restored.
        :raise CalledProcessError: when restore failed.
        """
        net = name
        name = net['name']
        nets = self.current_state
        if name in nets:
            self.remove(nets[name])

        netfile = tempfile.NamedTemporaryFile(delete=False)
        fname = netfile.name
        netfile.writelines(net['inactive xml'])
        netfile.close()

        try:
            if net['persistent'] == 'yes':
                res = virsh.net_define(fname)
                if res.exit_status:
                    raise Exception(str(res))
                if net['active'] == 'yes':
                    res = virsh.net_start(name)
                    if res.exit_status:
                        res = virsh.net_start(name)
                        if res.exit_status:
                            raise Exception(str(res))
            else:
                res = virsh.net_create(fname)
                if res.exit_status:
                    raise Exception(str(res))
        finally:
            os.remove(fname)

        if net['autostart'] == 'yes':
            res = virsh.net_autostart(name)
            if res.exit_status:
                raise Exception(str(res))
Exemplo n.º 3
0
def run_virsh_net_list(test, params, env):
    """
    Test command: virsh net-list.

    The command returns list of networks.
    1.Get all parameters from configuration.
    2.Get current network's status(State, Autostart).
    3.Do some prepare works for testing.
    4.Perform virsh net-list operation.
    5.Recover network status.
    6.Confirm the result.
    """
    option = params.get("net_list_option", "")
    extra = params.get("net_list_extra", "")
    status_error = params.get("status_error", "no")
    net_name = params.get("net_list_name", "default")
    persistent = params.get("net_list_persistent", "yes")
    net_status = params.get("net_list_error", "active")
    tmp_xml = os.path.join(test.tmpdir, "tmp.xml")
    net_current_status = "active"
    autostart_status = "yes"
    if not virsh.net_state_dict()[net_name]["active"]:
        net_current_status = "inactive"
    if not virsh.net_state_dict()[net_name]["autostart"]:
        autostart_status = "no"

    # Create a transient network.
    try:
        if persistent == "no":
            virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False)
            if net_current_status == "inactive":
                virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_undefine(net_name, ignore_status=False)
            virsh.net_create(tmp_xml, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Transient network test failed!")

    # Prepare network's status for testing.
    if net_status == "active":
        try:
            if not virsh.net_state_dict()[net_name]["active"]:
                virsh.net_start(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Active network test failed!")
    else:
        try:
            if virsh.net_state_dict()[net_name]["active"]:
                virsh.net_destroy(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Inactive network test failed!")

    result = virsh.net_list(option, extra, ignore_status=True)
    status = result.exit_status
    output = result.stdout.strip()

    # Recover network
    try:
        if persistent == "no":
            virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_define(tmp_xml, ignore_status=False)
            if net_current_status == "active":
                virsh.net_start(net_name, ignore_status=False)
            if autostart_status == "yes":
                virsh.net_autostart(net_name, ignore_status=False)
        else:
            if net_current_status == "active" and net_status == "inactive":
                virsh.net_start(net_name, ignore_status=False)
            elif net_current_status == "inactive" and net_status == "active":
                virsh.net_destroy(net_name, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Recover network failed!")

    # check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        if option == "--inactive":
            if net_status == "active":
                if re.search(net_name, output):
                    raise error.TestFail("Found an active network with" " --inactive option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Found no inactive networks with" " --inactive option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive networks" " with --inactive option")
        elif option == "":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with no" " option")
            else:
                if re.search(net_name, output):
                    raise error.TestFail("Found inactive network with" " no option")
        elif option == "--all":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with" " --all option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Can't find inactive network with" " --all option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive network" " with --all option")
Exemplo n.º 4
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        # cleanup env and create vfs
        cmd = "echo 0 > %s/sriov_numvfs" % pci_address
        if driver == "mlx4_core":
            cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core"
        process.run(cmd, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address)
        if driver == "mlx4_core":
            cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \
                    % (vf_num, vf_num)
        test_res = process.run(cmd, shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                net_count = len(net_diff)
                if ((driver != "mlx4_core" and net_count != vf_num) or
                    (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail(
                    "Get net list with 'virsh nodedev-list' failed\n")

        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300)
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()
        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        if not net_diff:
            test.fail("Get net list with 'virsh nodedev-list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            mac = ':'.join(net[length - 6:])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if driver != "mlx4_core" and sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
        Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
            new_iface.model = "virtio"
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        def check_addr_attrs():
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device = live_xml.devices
            hostdev_list = device.by_device_tag("hostdev")
            for hostdev in hostdev_list:
                addr = hostdev.source.untyped_address
                hostdev_addr_attrs = {
                    "domain": addr.domain,
                    "bus": addr.bus,
                    "slot": addr.slot,
                    "function": addr.function
                }
                if hostdev_addr_attrs == vf_addr_attrs:
                    return False
            return True

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if vf_type == "hostdev":
            check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60)
            if not check_ret:
                test.fail("The hostdev device detach failed from xml\n")
        else:
            utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)

        if vf_type != "hostdev":
            get_ip_by_mac(mac_addr, timeout=60)

        device = live_xml.devices

        if vf_type == "hostdev":
            hostdev_list = device.by_device_tag("hostdev")
            if len(hostdev_list) == 0:
                test.fail("The hostdev device attach failed from xml\n")
            else:
                for hostdev in hostdev_list:
                    if hostdev.type == "pci":
                        break
                interface = hostdev

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {
                    'type': 'pci',
                    'domain': '0',
                    'slot': '0',
                    'bus': index - 1,
                    'function': '0'
                }
                controller.address = controller.new_controller_address(
                    **{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {
                'type': 'pci',
                'domain': '0',
                'slot': '0',
                'bus': bus_id,
                'function': '0'
            }
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" %
                                    vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' %
                                           vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    dev_type = params.get("dev_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model in ("pci-bridge",
                                                         "pcie-root-port"):
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()
    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = int(max_vfs // 2 + 1)
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []
        vf_mac_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name),
                      'r') as f:
                vf_mac = f.readline().strip()
            vf_name_list.append(vf_name)
            vf_mac_list.append(vf_mac)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            if dev_type:
                mac_addr = vf_mac_list[0]
                new_iface = utils_test.libvirt.create_hostdev_xml(
                    vf_addr, managed=managed, xmlfile=False)
            else:
                new_iface = create_interface()
                mac_addr = new_iface.mac_address
            if inactive_pool:
                result = virsh.attach_device(vm_name,
                                             new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if vf_type == "hostdev":
                        addr = interface.source.untyped_address
                        vf_addr_attrs = {
                            "domain": addr.domain,
                            "bus": addr.bus,
                            "slot": addr.slot,
                            "function": addr.function
                        }
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if nfv:
            vf_driver = os.readlink(
                os.path.join(pci_device_dir, vf_list[0],
                             "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            osxml = vmxml.os
            if "i440fx" in vmxml.os.machine:
                osxml.machine = "q35"
                vmxml.os = osxml
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                process.run("cat %s" % iface.xml, shell=True).stdout_text
                result = virsh.attach_device(vm_name,
                                             iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if driver == "mlx4_core":
            # Reload mlx4 driver to default setting
            process.run(
                "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core",
                shell=True)
            process.run(
                "modprobe mlx4_core; modprobe mlx4_ib;  modprobe mlx4_en",
                shell=True)
        else:
            process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
Exemplo n.º 5
0
def run(test, params, env):
    """
    Test command: virsh net-list.

    The command returns list of networks.
    1.Get all parameters from configuration.
    2.Get current network's status(State, Autostart).
    3.Do some prepare works for testing.
    4.Perform virsh net-list operation.
    5.Recover network status.
    6.Confirm the result.
    """
    option = params.get("net_list_option", "")
    extra = params.get("net_list_extra", "")
    status_error = params.get("status_error", "no")
    net_name = params.get("net_list_name", "default")
    persistent = params.get("net_list_persistent", "yes")
    net_status = params.get("net_list_error", "active")
    tmp_xml = os.path.join(test.tmpdir, "tmp.xml")
    net_current_status = "active"
    autostart_status = "yes"
    if not virsh.net_state_dict()[net_name]['active']:
        net_current_status = "inactive"
    if not virsh.net_state_dict()[net_name]['autostart']:
        autostart_status = "no"

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Create a transient network.
    try:
        if persistent == "no":
            virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False)
            if net_current_status == "inactive":
                virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_undefine(net_name, ignore_status=False)
            virsh.net_create(tmp_xml, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Transient network test failed!")

    # Prepare network's status for testing.
    if net_status == "active":
        try:
            if not virsh.net_state_dict()[net_name]['active']:
                virsh.net_start(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Active network test failed!")
    else:
        try:
            if virsh.net_state_dict()[net_name]['active']:
                virsh.net_destroy(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Inactive network test failed!")

    virsh_dargs = {'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri
    result = virsh.net_list(option, extra, **virsh_dargs)
    status = result.exit_status
    output = result.stdout.strip()

    # Recover network
    try:
        if persistent == "no":
            virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_define(tmp_xml, ignore_status=False)
            if net_current_status == "active":
                virsh.net_start(net_name, ignore_status=False)
            if autostart_status == "yes":
                virsh.net_autostart(net_name, ignore_status=False)
        else:
            if net_current_status == "active" and net_status == "inactive":
                virsh.net_start(net_name, ignore_status=False)
            elif net_current_status == "inactive" and net_status == "active":
                virsh.net_destroy(net_name, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Recover network failed!")

    # check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        if option == "--inactive":
            if net_status == "active":
                if re.search(net_name, output):
                    raise error.TestFail("Found an active network with"
                                         " --inactive option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Found no inactive networks with"
                                             " --inactive option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive networks"
                                             " with --inactive option")
        elif option == "":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with no"
                                         " option")
            else:
                if re.search(net_name, output):
                    raise error.TestFail("Found inactive network with"
                                         " no option")
        elif option == "--all":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with"
                                         " --all option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Can't find inactive network with"
                                             " --all option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive network"
                                             " with --all option")
Exemplo n.º 6
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {'type': 'pci', 'domain': device_domain, 'slot': device_slot,
                 'bus': device_bus, 'function': device_function}
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(**{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")
        pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                if len(net_diff) != int(vf_num):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail("Get net list with 'virsh list' failed\n")

        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60)
        if not net_diff:
            test.fail("Get net list with 'virsh list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length-6])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
            Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail("The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail("The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail("The num of vf list show in nodedev-dumpxml is wrong\n")
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain+":"+vf_addr.bus+":"+vf_addr.slot+"."+vf_addr.function
                    addr_list.append(addr)
                logging.debug("The vf addr list show in nodedev-dumpxml is %s\n", addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail("The vf addr list show in nodedev-dumpxml is not sorted correctly\n")
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail("The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain+":"+pf_addr_bus+":"+pf_addr_slot+"."+pf_addr_function
            if pf_pci != pci_id:
                test.fail("The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail("The hostdev interface still in the guest xml after detach\n")
                    break
            driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n", driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n")
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result, expect_error=False)
            elif driver != origin_driver:
                test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail("The macvtap interface still exist in the guest xml after detach\n")
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail("The driver of the hostdev interface is not vfio\n")
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail("The dev name or mode of macvtap interface is wrong after attach\n")
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {'type': 'pci', 'domain': '0', 'slot': '0',
                         'bus': index - 1, 'function': '0'}
                controller.address = controller.new_controller_address(**{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {'type': 'pci', 'domain': '0', 'slot': '0',
                     'bus': bus_id, 'function': '0'}
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address',
                     'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model == "pci-bridge":
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()

    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/0000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = max_vfs // 2 + 1
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            vf_name_list.append(vf_name)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            new_iface = create_interface()
            if inactive_pool:
                result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option,
                                             ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                mac_addr = new_iface.mac_address
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
                logging.debug("The driver of vf before attaching to guest is %s\n", origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if nfv:
            for os_machine_type in (machine_type, vmxml.os.machine):
                'q35' in os_machine_type or test.cancel("nfv only run with q35 machine type")
            vf_driver = os.readlink(os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                process.run("cat %s" % iface.xml, shell=True).stdout_text
                result = virsh.attach_device(vm_name, file_opt=iface.xml, flagstr=option,
                                             ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result, expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
        backup_xml.sync()
Exemplo n.º 7
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='',
                      source_network='default',
                      iface_type='network',
                      iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in (
            'bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' %
                                                   iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'], 'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True,
                                    verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True,
                                        verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            if 'openvswitch' in add_pkg and shutil.which('ovs-vsctl'):
                new_pkg = add_pkg.copy()
                new_pkg.remove('openvswitch')
            utils_package.package_install(new_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service',
                        shell=True,
                        verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(bridge_name,
                                                  net_type,
                                                  bridge_name=br)
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size) // 2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in (
                'bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu,
                          source_network=source_net,
                          iface_type=iface_type,
                          iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in (
                'bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network',
                                         source_net='default',
                                         mtu=mtu_size,
                                         model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(),
                                         vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            if mtu_type == 'interface' or with_iface:
                check_mtu_in_vm(vm_login, mtu_size)
                vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name,
                                       'network %s' % params['mac'],
                                       debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail(
                        'Failed to detach interface with mtu after save-restore'
                    )

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(DEFAULT_NET,
                                          'modify',
                                          'mtu',
                                          '''"<mtu size='%s'/>"''' % mtu_size,
                                          debug=True)
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(
                    3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name,
                                                      'macvtap',
                                                      base_if,
                                                      mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(bridge_name,
                                                      'bridge',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(bridge_name,
                                                      'openvswitch',
                                                      mtu=mtu_size,
                                                      bridge_name=br)
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network',
                                             source_net=macv_name,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct',
                                             base_if=base_if,
                                             mtu=mtu_size,
                                             model_net=model)
                        if hotplug:
                            result = virsh.attach_device(vm_name,
                                                         iface.xml,
                                                         debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network',
                                     source_net='default',
                                     mtu=mtu_size,
                                     model_net=model)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
Exemplo n.º 8
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address,
                               shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        test_res = process.run("echo %d > %s/sriov_numvfs" %
                               (vf_num, pci_address),
                               shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                if len(net_diff) != int(vf_num):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail("Get net list with 'virsh list' failed\n")

        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60)
        if not net_diff:
            test.fail("Get net list with 'virsh list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
            Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     file_opt=new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model == "pci-bridge":
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()

    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/0000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = max_vfs // 2 + 1
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            vf_name_list.append(vf_name)

        if attach == "yes":
            vf_addr = vf_list[0]
            new_iface = create_interface()
            if inactive_pool:
                result = virsh.attach_device(vm_name,
                                             file_opt=new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                mac_addr = new_iface.mac_address
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
        backup_xml.sync()
Exemplo n.º 9
0
def run(test, params, env):
    """
    Test mtu feature from virtual network
    """
    vm_name = params.get('main_vm')
    vm = env.get_vm(vm_name)
    mtu_type = params.get('mtu_type')
    mtu_size = params.get('mtu_size', '')
    net = params.get('net', DEFAULT_NET)
    net_type = params.get('net_type', '')
    with_iface = 'yes' == params.get('with_iface', 'no')
    with_net = 'yes' == params.get('with_net', 'no')
    status_error = 'yes' == params.get('status_error', 'no')
    check = params.get('check', '')
    error_msg = params.get('error_msg', '')
    bridge_name = 'br_mtu' + utils_misc.generate_random_string(3)
    add_pkg = params.get('add_pkg', '')
    model = params.get('model', 'virtio')

    def set_network(size, net='default'):
        """
        Set mtu size to a certain network
        """
        logging.info('Set mtu size of network "%s" to %s', net, size)
        default_xml = NetworkXML.new_from_net_dumpxml(net)
        default_xml.mtu = size
        default_xml.sync()
        logging.debug(virsh.net_dumpxml(net))

    def set_interface(mtu_size='', source_network='default',
                      iface_type='network', iface_model='virtio'):
        """
        Set mtu size to a certain interface
        """
        interface_type = 'bridge' if iface_type in ('bridge', 'openvswitch') else iface_type
        iface_dict = {
            'type': interface_type,
            'source': "{'%s': '%s'}" % (interface_type, source_network),
            'model': iface_model
        }

        if iface_type == 'openvswitch':
            iface_dict.update({'virtualport_type': 'openvswitch'})

        if mtu_size:
            iface_dict.update({'mtu': "{'size': %s}" % mtu_size})

        libvirt.modify_vm_iface(vm_name, 'update_iface', iface_dict)
        logging.debug(virsh.dumpxml(vm_name).stdout)

    def get_default_if():
        """
        Get default interface that is using by vm
        """
        ifaces = utils_net.get_sorted_net_if()
        logging.debug('Interfaces on host: %s', ifaces)
        for iface in ifaces[0]:
            if 'Link detected: yes' in process.run('ethtool %s' % iface).stdout_text:
                logging.debug('Found host interface "%s"', iface)
                return iface

    def create_bridge():
        """
        Create a bridge on host for test
        """
        cmd_create_br = 'nmcli con add type bridge con-name %s ifname %s'
        con_name = 'con_' + utils_misc.generate_random_string(3)
        bridge_name = 'br_' + utils_misc.generate_random_string(3)
        process.run(cmd_create_br % (con_name, bridge_name), verbose=True)
        return con_name, bridge_name

    def create_network_xml(name, network_type, base_if='', **kwargs):
        """
        Create a network xml to be defined
        """
        m_net = NetworkXML(name)
        m_net.forward = {'mode': 'bridge'}
        if network_type in ('bridge', 'openvswitch'):
            m_net.bridge = {'name': kwargs['bridge_name']}
        elif network_type == 'macvtap':
            if base_if:
                m_net.forward_interface = [{'dev': base_if}]
        if network_type == 'openvswitch':
            m_net.virtualport_type = 'openvswitch'
        if 'mtu' in kwargs:
            m_net.mtu = kwargs['mtu']
        logging.debug(m_net)
        return m_net.xml

    def create_iface(iface_type, **kwargs):
        """
        Create a interface to be attached to vm
        """
        m_iface = Interface(iface_type)
        m_iface.mac_address = utils_net.generate_mac_address_simple()
        if 'base_if' in kwargs:
            m_iface.source = {'dev': kwargs['base_if'],
                              'mode': 'vepa'}
        if 'source_net' in kwargs:
            m_iface.source = {'network': kwargs['source_net']}
        if 'mtu' in kwargs:
            m_iface.mtu = {'size': kwargs['mtu']}
        if 'model_net' in kwargs:
            m_iface.model = kwargs['model_net']
        logging.debug(m_iface.get_xml())
        logging.debug(m_iface)
        return m_iface

    def check_mtu(mtu_size, qemu=False):
        """
        Check if mtu meets expectation on host
        """
        error = ''
        live_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface_xml = live_vmxml.get_devices('interface')[0]
        logging.debug(iface_xml.target)
        dev = iface_xml.target['dev']
        ifconfig_info = process.run('ifconfig|grep mtu|grep %s' % dev,
                                    shell=True, verbose=True).stdout_text
        if 'mtu %s' % mtu_size in ifconfig_info:
            logging.info('PASS on ifconfig check for vnet.')
        else:
            error += 'Fail on ifconfig check for vnet.'
        if qemu:
            qemu_mtu_info = process.run('ps aux|grep qemu-kvm',
                                        shell=True, verbose=True).stdout_text
            if 'host_mtu=%s' % mtu_size in qemu_mtu_info:
                logging.info('PASS on qemu cmd line check.')
            else:
                error += 'Fail on qemu cmd line check.'
        if error:
            test.fail(error)

    def check_mtu_in_vm(fn_login, mtu_size):
        """
        Check if mtu meets expectations in vm
        """
        session = fn_login()
        check_cmd = 'ifconfig'
        output = session.cmd(check_cmd)
        session.close()
        logging.debug(output)
        if 'mtu %s' % mtu_size not in output:
            test.fail('MTU check inside vm failed.')
        else:
            logging.debug("MTU check inside vm passed.")

    try:
        bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        bk_netxml = NetworkXML.new_from_net_dumpxml(DEFAULT_NET)
        if add_pkg:
            add_pkg = add_pkg.split()
            utils_package.package_install(add_pkg)
        if 'openvswitch' in add_pkg:
            br = 'ovsbr0' + utils_misc.generate_random_string(3)
            process.run('systemctl start openvswitch.service', shell=True, verbose=True)
            process.run('ovs-vsctl add-br %s' % br, shell=True, verbose=True)
            process.run('ovs-vsctl show', shell=True, verbose=True)

        if not check or check in ['save', 'managedsave', 'hotplug_save']:
            # Create bridge or network and set mtu
            iface_type = 'network'
            if net_type in ('bridge', 'openvswitch'):
                if net_type == 'bridge':
                    params['con_name'], br = create_bridge()
                if mtu_type == 'network':
                    test_net = create_network_xml(
                        bridge_name, net_type,
                        bridge_name=br
                    )
                    virsh.net_create(test_net, debug=True)
                    virsh.net_dumpxml(bridge_name, debug=True)
                if mtu_type == 'interface':
                    iface_type = net_type
                    bridge_name = br
            elif net_type == 'network':
                if mtu_type == 'network':
                    set_network(mtu_size)

            iface_mtu = 0
            if mtu_type == 'interface':
                iface_mtu = mtu_size
            if mtu_type == 'network' and with_iface:
                mtu_size = str(int(mtu_size)//2)
                iface_mtu = mtu_size

            source_net = bridge_name if net_type in ('bridge', 'openvswitch') else 'default'

            # set mtu in vm interface
            set_interface(iface_mtu, source_network=source_net, iface_type=iface_type, iface_model=model)
            vm.start()
            vm_login = vm.wait_for_serial_login if net_type in ('bridge', 'openvswitch') else vm.wait_for_login
            vm_login().close()
            check_qemu = True if mtu_type == 'interface' else False

            # Test mtu after save vm
            if check in ('save', 'hotplug_save'):
                if check == 'hotplug_save':
                    iface = create_iface('network', source_net='default',
                                         mtu=mtu_size, model_net=model)
                    params['mac'] = iface.mac_address
                    virsh.attach_device(vm_name, iface.xml, debug=True)
                    virsh.dumpxml(vm_name, debug=True)
                    dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                    if params['mac'] not in str(dom_xml):
                        test.fail('Failed to attach interface with mtu')
                save_path = os.path.join(data_dir.get_tmp_dir(), vm_name + '.save')
                virsh.save(vm_name, save_path, debug=True)
                virsh.restore(save_path, debug=True)
            if check == 'managedsave':
                virsh.managedsave(vm_name, debug=True)
                virsh.start(vm_name, debug=True)

            # Check in both host and vm
            check_mtu(mtu_size, check_qemu)
            check_mtu_in_vm(vm_login, mtu_size)
            vm_login(timeout=60).close()

            if check == 'hotplug_save':
                virsh.detach_interface(vm_name, 'network %s' % params['mac'], debug=True)
                time.sleep(5)
                dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if params['mac'] in str(dom_xml):
                    test.fail('Failed to detach interface with mtu after save-restore')

        else:
            hotplug = 'yes' == params.get('hotplug', 'False')
            if check == 'net_update':
                result = virsh.net_update(
                    DEFAULT_NET, 'modify', 'mtu',
                    '''"<mtu size='%s'/>"''' % mtu_size,
                    debug=True
                )
            if check in ('macvtap', 'bridge_net', 'ovswitch_net'):
                base_if = get_default_if()
                macv_name = 'direct-macvtap' + utils_misc.generate_random_string(3)

                # Test mtu in different type of network
                if mtu_type == 'network':
                    if check == 'macvtap':
                        test_net = create_network_xml(macv_name, 'macvtap',
                                                      base_if, mtu=mtu_size)
                    if check == 'bridge_net':
                        params['con_name'], br = create_bridge()
                        test_net = create_network_xml(
                            bridge_name, 'bridge', mtu=mtu_size,
                            bridge_name=br
                        )
                    if check == 'ovswitch_net':
                        test_net = create_network_xml(
                            bridge_name, 'openvswitch', mtu=mtu_size,
                            bridge_name=br
                        )
                    if 'net_create' in params['id']:
                        result = virsh.net_create(test_net, debug=True)
                    if 'net_define' in params['id']:
                        result = virsh.net_define(test_net, debug=True)

                # Test mtu with or without a binding network
                elif mtu_type == 'interface':
                    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
                    if with_net:
                        test_net = create_network_xml(macv_name, 'macvtap', base_if)
                        virsh.net_create(test_net, debug=True)
                        iface = create_iface('network', source_net=macv_name, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            vmxml.sync()
                            result = virsh.start(vm_name)
                    else:
                        iface = create_iface('direct', base_if=base_if, mtu=mtu_size)
                        if hotplug:
                            result = virsh.attach_device(vm_name, iface.xml, debug=True)
                        else:
                            vmxml.add_device(iface)
                            result = virsh.define(vmxml.xml, debug=True)
            if check == 'invalid_val':
                iface = create_iface('network', source_net='default', mtu=mtu_size)
                result = virsh.attach_device(vm_name, iface.xml, debug=True)

            # Check result
            libvirt.check_exit_status(result, status_error)
            libvirt.check_result(result, [error_msg])

    finally:
        bk_xml.sync()
        bk_netxml.sync()
        if 'test_net' in locals():
            virsh.net_destroy(bridge_name, debug=True)
        if params.get('con_name'):
            process.run('nmcli con del %s' % params['con_name'], verbose=True)
        if add_pkg:
            process.run("ovs-vsctl del-br %s" % br, verbose=True)
            utils_package.package_remove(add_pkg)
Exemplo n.º 10
0
def run(test, params, env):
    """
    Test command: virsh net-list.

    The command returns list of networks.
    1.Get all parameters from configuration.
    2.Get current network's status(State, Autostart).
    3.Do some prepare works for testing.
    4.Perform virsh net-list operation.
    5.Recover network status.
    6.Confirm the result.
    """
    option = params.get("net_list_option", "")
    extra = params.get("net_list_extra", "")
    status_error = params.get("status_error", "no")
    net_name = params.get("net_list_name", "default")
    persistent = params.get("net_list_persistent", "yes")
    net_status = params.get("net_list_error", "active")
    tmp_xml = os.path.join(test.tmpdir, "tmp.xml")
    net_current_status = "active"
    autostart_status = "yes"
    if not virsh.net_state_dict()[net_name]['active']:
        net_current_status = "inactive"
    if not virsh.net_state_dict()[net_name]['autostart']:
        autostart_status = "no"

    # acl polkit params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Create a transient network.
    try:
        if persistent == "no":
            virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False)
            if net_current_status == "inactive":
                virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_undefine(net_name, ignore_status=False)
            virsh.net_create(tmp_xml, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Transient network test failed!")

    # Prepare network's status for testing.
    if net_status == "active":
        try:
            if not virsh.net_state_dict()[net_name]['active']:
                virsh.net_start(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Active network test failed!")
    else:
        try:
            if virsh.net_state_dict()[net_name]['active']:
                virsh.net_destroy(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Inactive network test failed!")

    virsh_dargs = {'ignore_status': True}
    if params.get('setup_libvirt_polkit') == 'yes':
        virsh_dargs['unprivileged_user'] = unprivileged_user
        virsh_dargs['uri'] = uri
    result = virsh.net_list(option, extra, **virsh_dargs)
    status = result.exit_status
    output = result.stdout.strip()

    # Recover network
    try:
        if persistent == "no":
            virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_define(tmp_xml, ignore_status=False)
            if net_current_status == "active":
                virsh.net_start(net_name, ignore_status=False)
            if autostart_status == "yes":
                virsh.net_autostart(net_name, ignore_status=False)
        else:
            if net_current_status == "active" and net_status == "inactive":
                virsh.net_start(net_name, ignore_status=False)
            elif net_current_status == "inactive" and net_status == "active":
                virsh.net_destroy(net_name, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Recover network failed!")

    # check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        if option == "--inactive":
            if net_status == "active":
                if re.search(net_name, output):
                    raise error.TestFail("Found an active network with"
                                         " --inactive option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Found no inactive networks with"
                                             " --inactive option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail(
                            "Found transient inactive networks"
                            " with --inactive option")
        elif option == "":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with no"
                                         " option")
            else:
                if re.search(net_name, output):
                    raise error.TestFail("Found inactive network with"
                                         " no option")
        elif option == "--all":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with"
                                         " --all option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Can't find inactive network with"
                                             " --all option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive network"
                                             " with --all option")
Exemplo n.º 11
0
def run_virsh_net_list(test, params, env):
    """
    Test command: virsh net-list.

    The command returns list of networks.
    1.Get all parameters from configuration.
    2.Get current network's status(State, Autostart).
    3.Do some prepare works for testing.
    4.Perform virsh net-list operation.
    5.Recover network status.
    6.Confirm the result.
    """
    option = params.get("net_list_option", "")
    extra = params.get("net_list_extra", "")
    status_error = params.get("status_error", "no")
    net_name = params.get("net_list_name", "default")
    persistent = params.get("net_list_persistent", "yes")
    net_status = params.get("net_list_error", "active")
    tmp_xml = os.path.join(test.tmpdir, "tmp.xml")
    net_current_status = "active"
    autostart_status = "yes"
    if not virsh.net_state_dict()[net_name]['active']:
        net_current_status = "inactive"
    if not virsh.net_state_dict()[net_name]['autostart']:
        autostart_status = "no"

    # Create a transient network.
    try:
        if persistent == "no":
            virsh.net_dumpxml(net_name, to_file=tmp_xml, ignore_status=False)
            if net_current_status == "inactive":
                virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_undefine(net_name, ignore_status=False)
            virsh.net_create(tmp_xml, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Transient network test failed!")

    # Prepare network's status for testing.
    if net_status == "active":
        try:
            if not virsh.net_state_dict()[net_name]['active']:
                virsh.net_start(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Active network test failed!")
    else:
        try:
            if virsh.net_state_dict()[net_name]['active']:
                virsh.net_destroy(net_name, ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Inactive network test failed!")

    result = virsh.net_list(option, extra, ignore_status=True)
    status = result.exit_status
    output = result.stdout.strip()

    # Recover network
    try:
        if persistent == "no":
            virsh.net_destroy(net_name, ignore_status=False)
            virsh.net_define(tmp_xml, ignore_status=False)
            if net_current_status == "active":
                virsh.net_start(net_name, ignore_status=False)
            if autostart_status == "yes":
                virsh.net_autostart(net_name, ignore_status=False)
        else:
            if net_current_status == "active" and net_status == "inactive":
                virsh.net_start(net_name, ignore_status=False)
            elif net_current_status == "inactive" and net_status == "active":
                virsh.net_destroy(net_name, ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Recover network failed!")

    # check result
    if status_error == "yes":
        if status == 0:
            raise error.TestFail("Run successfully with wrong command!")
    elif status_error == "no":
        if status != 0:
            raise error.TestFail("Run failed with right command")
        if option == "--inactive":
            if net_status == "active":
                if re.search(net_name, output):
                    raise error.TestFail("Found an active network with"
                                         " --inactive option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Found no inactive networks with"
                                             " --inactive option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive networks"
                                             " with --inactive option")
        elif option == "":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with no"
                                         " option")
            else:
                if re.search(net_name, output):
                    raise error.TestFail("Found inactive network with"
                                         " no option")
        elif option == "--all":
            if net_status == "active":
                if not re.search(net_name, output):
                    raise error.TestFail("Can't find active network with"
                                         " --all option")
            else:
                if persistent == "yes":
                    if not re.search(net_name, output):
                        raise error.TestFail("Can't find inactive network with"
                                             " --all option")
                else:
                    # If network is transient, after net-destroy it,
                    # it will disapear.
                    if re.search(net_name, output):
                        raise error.TestFail("Found transient inactive network"
                                             " with --all option")