Exemplo n.º 1
0
    def __init__(self, pf_pci, vf_no=4):
        self.pf_pci = pf_pci
        self.vf_no = vf_no
        self.pf_pci_path = utils_misc.get_pci_path(self.pf_pci)
        utils_sriov.set_vf(self.pf_pci_path, 0)

        self.pf_iface = utils_sriov.get_pf_info_by_pci(
            self.pf_pci).get('iface')
        if not self.pf_iface:
            raise exceptions.TestCancel("NO available pf found.")
        self.br_name = self.pf_iface + '_br'

        self.ovs = factory(openvswitch.OpenVSwitchSystem)()
Exemplo n.º 2
0
    def test_vf():
        """
        Detach/Reattach a vf when it is used in guest

        1) Detach/reattach the device
        2) Add the device to VM
        3) Start the VM
        4) Check driver of the device
        5) Detach/reattach the device again
        """
        logging.info("Initialize the vfs.")
        sriov_base.setup_vf(pf_pci, params)
        vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
        pf_name = utils_sriov.get_pf_info_by_pci(pf_pci).get('iface')
        vf_mac = utils_sriov.get_vf_mac(pf_name, is_admin=False)
        logging.debug("VF's mac: %s.", vf_mac)

        logging.info("Check the vf's driver, it should not be vfio-pci.")
        dev_name = utils_sriov.get_device_name(vf_pci)
        check_driver_from_xml(dev_name)

        logging.info("Detach and reattach the device and check vf's mac.")
        nodedev_test(dev_name, no_reset=True)
        utils_misc.wait_for(
            lambda: libvirt_vfio.check_vfio_pci(vf_pci, True, True), 10, 5)
        compare_vf_mac(pf_name, vf_mac)

        logging.info("Cold-plug the device into the VM.")
        add_hostdev_iface(vm, vf_pci)
        vm.start()
        check_hostdev_iface(vm.name)

        logging.info("Check the device info. It should be vfio-pci.")
        check_driver_from_xml(dev_name, status_error=True)
        nodedev_test(dev_name, True)

        logging.info("Destroy the vm, and check the vf's mac is recovered.")
        vm.destroy(gracefully=False)
        compare_vf_mac(pf_name, vf_mac)
Exemplo n.º 3
0
def run(test, params, env):
    """
    Test interfaces attached from network
    """
    def test_at_dt():
        """
        Test attach-detach interfaces
        """
        if not pf_status:
            logging.info("Set pf state to down.")
            pf_iface_obj = utils_net.Interface(pf_name)
            pf_iface_obj.down()

        logging.info("Define network - %s.", params.get("net_name"))
        create_network(params)

        logging.debug("Remove VM's interface devices.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)

        logging.info("Hotplug an interface to VM.")
        iface_dict = {
            "model": "virtio",
            "source": {
                'network': params.get("net_name")
            }
        }
        iface = create_iface(iface_dict)
        res = virsh.attach_device(vm_name, iface.xml, debug=True)
        libvirt.check_exit_status(res, status_error)
        if not pf_status:
            logging.info("Set pf state to up then check again.")
            pf_iface_obj.up()
            virsh.attach_device(vm_name,
                                iface.xml,
                                debug=True,
                                ignore_status=False)
        libvirt_vmxml.check_guest_xml(vm.name, params["net_name"])
        sriov_base.check_vm_network_accessed(vm_session)

    def test_connection():
        """
        Test network connections

        1. Create a network
        2. Attach the interfaces and check network connections
        3. Check the network connections after detaching ifaces, restarting
            libvirtd and destroying the VM
        """
        vf_no = int(params.get("vf_no", "4"))
        net_name = params.get("net_name")
        iface_type = params.get("iface_type")

        logging.info("Define network - %s.", net_name)
        create_network(params)
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        libvirt_pcicontr.reset_pci_num(vm_name)
        vm.start()
        vm.wait_for_serial_login(timeout=240)

        logging.info("Attach 4 interfaces to the guest.")
        opts = ' '.join(
            ["network", net_name,
             params.get('attach_extra_opts', "")])
        for i in range(vf_no):
            virsh.attach_interface(vm_name,
                                   option=opts,
                                   debug=True,
                                   ignore_status=False)
            libvirt_network.check_network_connection(net_name, i + 1)

        logging.info("Try to attach one more interface.")
        res = virsh.attach_interface(vm_name, option=opts, debug=True)
        libvirt.check_exit_status(res, True)

        logging.info("Detach an interface.")
        vm_ifaces = [
            iface for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        mac_addr = vm_ifaces[0].get_mac_address()
        opts = ' '.join([iface_type, "--mac %s" % mac_addr])
        virsh.detach_interface(vm_name,
                               option=opts,
                               debug=True,
                               wait_for_event=True,
                               ignore_status=False)
        libvirt_network.check_network_connection(net_name, vf_no - 1)

        logging.info(
            "Restart libvirtd service and check the network connection.")
        utils_libvirtd.Libvirtd().restart()
        libvirt_network.check_network_connection(net_name, vf_no - 1)
        logging.info("Destroy the VM and check the network connection.")
        vm.destroy(gracefully=False)
        libvirt_network.check_network_connection(net_name, 0)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    status_error = "yes" == params.get("status_error", "no")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    sriov_base.setup_vf(pf_pci, params)

    vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
    params['vf_iface'] = utils_sriov.get_iface_name(vf_pci)
    pf_status = "active" == params.get("pf_status", "active")
    pf_name = utils_sriov.get_pf_info_by_pci(pf_pci).get('iface')
    params['pf_name'] = pf_name
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        run_test()

    finally:
        logging.info("Recover test enviroment.")
        if not pf_status:
            pf_iface_obj = utils_net.Interface(pf_name)
            pf_iface_obj.up()
        sriov_base.recover_vf(pf_pci, params)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        libvirt_network.create_or_del_network(
            {"net_name": params.get("net_name")}, True)
Exemplo n.º 4
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        if vlan_id:
            new_iface.vlan = new_iface.new_vlan(**vlan_id)
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        # cleanup env and create vfs
        cmd = "echo 0 > %s/sriov_numvfs" % pci_address
        if driver == "mlx4_core":
            cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core"
        process.run(cmd, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address)
        if driver == "mlx4_core":
            cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \
                    % (vf_num, vf_num)
        test_res = process.run(cmd, shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                net_count = len(net_diff)
                if ((driver != "mlx4_core" and net_count != vf_num) or
                    (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail(
                    "Get net list with 'virsh nodedev-list' failed\n")

        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300)
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()
        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        if not net_diff:
            test.fail("Get net list with 'virsh nodedev-list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            mac = ':'.join(net[length - 6:])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if driver != "mlx4_core" and sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        if vlan_id:
            new_iface.vlan = new_iface.new_vlan(**vlan_id)
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
        Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
            new_iface.model = "virtio"
            if vlan_id:
                new_iface.vlan = new_iface.new_vlan(**vlan_id)
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        def check_addr_attrs():
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device = live_xml.devices
            hostdev_list = device.by_device_tag("hostdev")
            for hostdev in hostdev_list:
                addr = hostdev.source.untyped_address
                hostdev_addr_attrs = {
                    "domain": addr.domain,
                    "bus": addr.bus,
                    "slot": addr.slot,
                    "function": addr.function
                }
                if hostdev_addr_attrs == vf_addr_attrs:
                    return False
            return True

        result = utils_misc.wait_for(
            lambda: virsh.detach_device(vm_name, new_iface.xml), 30, first=10)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if vf_type == "hostdev":
            check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60)
            if not check_ret:
                test.fail("The hostdev device detach failed from xml\n")
        else:
            utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)

        if vf_type != "hostdev":
            get_ip_by_mac(mac_addr, timeout=60)

        device = live_xml.devices

        if vf_type == "hostdev":
            hostdev_list = device.by_device_tag("hostdev")
            if len(hostdev_list) == 0:
                test.fail("The hostdev device attach failed from xml\n")
            else:
                for hostdev in hostdev_list:
                    if hostdev.type == "pci":
                        break
                interface = hostdev

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, pci_addr,
                             "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: controller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {
                    'type': 'pci',
                    'domain': '0',
                    'slot': '0',
                    'bus': index - 1,
                    'function': '0'
                }
                controller.address = controller.new_controller_address(
                    **{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu mode='host-model'><numa/></cpu>"
        vmxml_cpu.numa_cell = vmxml_cpu.dicts_to_cells(numa_dict_list)
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {
                'type': 'pci',
                'domain': '0',
                'slot': '0',
                'bus': bus_id,
                'function': '0'
            }
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" %
                                    vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' %
                                           vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    dev_type = params.get("dev_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))
    vlan_id = eval(params.get("vlan_id", "None"))
    trunk = params.get("trunk", "no") == "yes"

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model == "pci-bridge":
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers and machine_type != 'q35':
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()

    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    driver = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pci_id = utils_sriov.get_pf_pci()
        if not pci_id:
            test.cancel("NO available pf found.")
        pci_info = utils_sriov.get_pf_info_by_pci(pci_id)
        pf_name = pci_info.get('iface')
        driver = pci_info.get('driver')
        pci_address = os.path.join("/sys/bus/pci/drivers", driver, pci_id)
        bus_slot = ':'.join(pci_id.split(':')[1:])
        if not utils_package.package_install('pciutils'):
            test.error('Failed to install "pciutils" which provides '
                       'command "lspci"')
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = int(max_vfs // 2 + 1)
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []
        vf_mac_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name),
                      'r') as f:
                vf_mac = f.readline().strip()
            vf_name_list.append(vf_name)
            vf_mac_list.append(vf_mac)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            if dev_type:
                mac_addr = vf_mac_list[0]
                new_iface = utils_test.libvirt.create_hostdev_xml(
                    vf_addr, managed=managed)
            else:
                new_iface = create_interface()
                mac_addr = new_iface.mac_address
            if inactive_pool or trunk:
                result = virsh.attach_device(vm_name,
                                             file_opt=new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_result(result, expected_error)
            else:
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if vf_type == "hostdev":
                        addr = interface.source.untyped_address
                        vf_addr_attrs = {
                            "domain": addr.domain,
                            "bus": addr.bus,
                            "slot": addr.slot,
                            "function": addr.function
                        }
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    # Get max index of all pcie-root-port
                    pcie_ctls = vmxml.get_controllers('pci', 'pcie-root-port')
                    pcie_indexes = [
                        int(port.get('index')) for port in pcie_ctls
                    ]

                    def _add_pcie_root_port(index):
                        """
                        Add pcie root port with given index
                        :param index: index of port that is going to be added
                        :return:
                        """
                        pcie_root_port = Controller("controller")
                        pcie_root_port.type = "pci"
                        pcie_root_port.index = index
                        pcie_root_port.model = "pcie-root-port"
                        vmxml.add_device(pcie_root_port)
                        vmxml.sync()

                    for i in range(len(vf_list)):
                        _add_pcie_root_port(max(pcie_indexes) + 1 + i)
                    vm.start()
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_result(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_result(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_result(result, expected_error)
        if nfv:
            vf_driver = os.readlink(
                os.path.join(pci_device_dir, vf_list[0],
                             "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            osxml = vmxml.os
            if "i440fx" in vmxml.os.machine:
                osxml.machine = "q35"
                vmxml.os = osxml
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                txt = process.run("cat %s" % iface.xml, shell=True).stdout_text
                logging.debug("iface_xml for attach device is %s" % txt)
                result = virsh.attach_device(vm_name,
                                             file_opt=iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if driver == "mlx4_core":
            # Reload mlx4 driver to default setting
            process.run(
                "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core",
                shell=True)
            process.run(
                "modprobe mlx4_core; modprobe mlx4_ib;  modprobe mlx4_en",
                shell=True)
        else:
            process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
Exemplo n.º 5
0
def run(test, params, env):
    """
    Sriov net failover related test.
    """
    def setup_hotplug_hostdev_iface_with_teaming():
        logging.info("Create hostdev network.")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_hostdev_dict = {
            "net_name": net_hostdev_name,
            "net_forward": net_hostdev_fwd,
            "net_forward_pf": '{"dev": "%s"}' % pf_name
        }
        libvirt_network.create_or_del_network(net_hostdev_dict)

        logging.info("Clear up VM interface.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            iface.xml,
                            flagstr='--persistent',
                            debug=True,
                            ignore_status=False)
        vm.start()
        vm.wait_for_serial_login(timeout=180).close()

    def teardown_hotplug_hostdev_iface_with_teaming():
        logging.info("Delete hostdev network.")
        net_hostdev_dict = {"net_name": net_hostdev_name}
        libvirt_network.create_or_del_network(net_hostdev_dict, is_del=True)

    def test_hotplug_hostdev_iface_with_teaming():
        logging.info("Attach a hostdev interface.")
        hostdev_iface_xml = create_hostdev_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            hostdev_iface_xml,
                            debug=True,
                            ignore_status=False)
        check_ifaces(vm_name, expected_ifaces={"bridge", "hostdev"})

        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

        logging.info("Detach the hostdev interface.")
        hostdev_iface = interface.Interface("network")
        for ifc in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface"):
            if ifc.type_name == "hostdev":
                ifc.del_address()
                hostdev_iface = ifc
        virsh.detach_device(vm_name,
                            hostdev_iface.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        check_ifaces(vm_name, expected_ifaces={"hostdev"}, status_error=True)

        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)

        libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
        logging.info("Re-attach the hostdev interface.")
        virsh.attach_device(vm_name,
                            hostdev_iface.xml,
                            debug=True,
                            ignore_status=False)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def setup_hotplug_hostdev_device_with_teaming():
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vm.start()
        vm.wait_for_serial_login(timeout=240).close()

    def test_hotplug_hostdev_device_with_teaming():
        default_vf_mac = utils_sriov.get_vf_mac(pf_name)
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        logging.info("Attach the bridge interface.")
        brg_iface_xml = create_bridge_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            brg_iface_xml,
                            debug=True,
                            ignore_status=False)
        # Wait for 10s before attaching the hostdev device
        time.sleep(10)
        logging.info("Attach the hostdev device.")
        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        logging.debug("Recover vf's mac to %s.", default_vf_mac)
        utils_sriov.set_vf_mac(pf_name, default_vf_mac)

        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)
        libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)

    def setup_save_restore_hostdev_device_with_teaming():
        logging.info("Start a VM with bridge iface and hostdev device.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)

        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        vmxml.add_device(hostdev_dev)
        vmxml.sync()
        vm.start()
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        vm.wait_for_serial_login(timeout=240).close()

    def test_save_restore_hostdev_device_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")
        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login()
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        hostdev_dev = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name).devices.\
            by_device_tag("hostdev")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)

        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)
        logging.info("Attach the hostdev device.")
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def setup_save_restore_hostdev_iface_with_teaming():
        logging.info("Create hostdev network.")
        net_hostdev_fwd = params.get("net_hostdev_fwd",
                                     '{"mode": "hostdev", "managed": "yes"}')
        net_hostdev_dict = {
            "net_name": net_hostdev_name,
            "net_forward": net_hostdev_fwd,
            "net_forward_pf": '{"dev": "%s"}' % pf_name
        }
        libvirt_network.create_or_del_network(net_hostdev_dict)
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)
        iface.xml = create_hostdev_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)
        vmxml.sync()
        logging.debug("VMXML after updating ifaces: %s.",
                      vm_xml.VMXML.new_from_dumpxml(vm_name))
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def teardown_save_restore_hostdev_iface_with_teaming():
        teardown_hotplug_hostdev_iface_with_teaming()

    def test_save_restore_hostdev_iface_with_teaming():
        logging.info("Save/restore VM.")
        save_file = os.path.join(data_dir.get_tmp_dir(), "save_file")
        virsh.save(vm_name,
                   save_file,
                   debug=True,
                   ignore_status=False,
                   timeout=10)
        if not libvirt.check_vm_state(vm_name, "shut off"):
            test.fail("The guest should be down after executing 'virsh save'.")
        virsh.restore(save_file, debug=True, ignore_status=False)
        if not libvirt.check_vm_state(vm_name, "running"):
            test.fail(
                "The guest should be running after executing 'virsh restore'.")

        vm.cleanup_serial_console()
        vm.create_serial_console()
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr, False)
        logging.debug(ping_ip)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)

    def check_vm_iface_num(session, exp_num=3):
        """
        Check he number of interfaces

        :param session: The session to the guest
        :param exp_num: The expected number
        :return: True when interfaces' number is equal to exp_num
        """
        p_iface = utils_net.get_remote_host_net_ifs(session)[0]
        logging.debug("Ifaces in VM: %s", p_iface)

        return len(p_iface) == exp_num

    def check_vm_network_accessed(vm_session,
                                  expected_iface_no=3,
                                  ping_dest="8.8.8.8",
                                  timeout=30,
                                  tcpdump_iface=None,
                                  tcpdump_status_error=False):
        """
        Test VM's network by checking ifaces' number and the accessibility

        :param vm_session: The session object to the guest
        :param expected_iface_no: The expected number of ifaces
        :param ping_dest: The destination to be ping
        :param timeout: The timeout of the checking
        :param tcpdump_iface: The interface to check
        :param tcpdump_status_error: Whether the tcpdump's output should include
            the string "ICMP echo request"
        :raise: test.fail when ifaces' number is incorrect or ping fails.
        """
        if not utils_misc.wait_for(
                lambda: check_vm_iface_num(vm_session, expected_iface_no),
                first=3,
                timeout=timeout):
            test.fail("%d interfaces should be found on the vm." %
                      expected_iface_no)
        if tcpdump_iface:
            cmd = "tcpdump  -i %s icmp" % tcpdump_iface
            tcpdump_session = aexpect.ShellSession('bash')
            tcpdump_session.sendline(cmd)

        if not utils_misc.wait_for(
                lambda: not utils_test.ping(ping_dest,
                                            count=3,
                                            timeout=5,
                                            output_func=logging.debug,
                                            session=vm_session)[0],
                first=5,
                timeout=timeout):
            test.fail("Failed to ping %s." % ping_dest)
        if tcpdump_iface:
            output = tcpdump_session.get_stripped_output()
            logging.debug("tcpdump's output: %s.", output)
            pat_str = "ICMP echo request"
            if re.search(pat_str, output):
                if tcpdump_status_error:
                    test.fail(
                        "Get incorrect tcpdump output: {}, it should not "
                        "include '{}'.".format(output, pat_str))
            else:
                if not tcpdump_status_error:
                    test.fail("Get incorrect tcpdump output: {}, it should "
                              "include '{}'.".format(output, pat_str))

    def get_ping_dest(vm_session, mac_addr, restart_network=True):
        """
        Get an ip address to ping

        :param vm_session: The session object to the guest
        :param mac_addr: mac address of given interface
        :param restart_network:  Whether to restart guest's network
        :return: ip address
        """
        if restart_network:
            utils_misc.cmd_status_output("dhclient -r; sleep 5; dhclient",
                                         shell=True,
                                         session=vm_session)
        vm_iface_info = utils_net.get_linux_iface_info(
            mac_addr, vm_session)['addr_info'][0]['local']
        return re.sub('\d+$', '1', vm_iface_info)

    def create_bridge_iface_xml(vm, mac_addr, params):
        """
        Create xml of bridge interface

        :param vm: The vm object
        :param mac_address: The mac address
        :param params: Dictionary with the test parameters
        :return: The interface xml
        """
        net_bridge_name = params.get("net_bridge_name", "host-bridge")
        iface_bridge_dict = {
            "type": "network",
            "source": "{'network': '%s'}" % net_bridge_name,
            "mac": mac_addr,
            "model": "virtio",
            "teaming": '{"type":"persistent"}',
            "alias": '{"name": "ua-backup0"}'
        }
        return libvirt.modify_vm_iface(vm.name, "get_xml", iface_bridge_dict)

    def create_hostdev_iface_xml(vm, mac_addr, params):
        """
        Create xml of hostdev interface

        :param vm: The vm object
        :param mac_address: The mac address
        :param params: Dictionary with the test parameters
        :return: The interface xml
        """
        net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
        hostdev_iface_dict = {
            "type": "network",
            "source": "{'network': '%s'}" % net_hostdev_name,
            "mac": mac_addr,
            "teaming": '{"type":"transient", "persistent": "ua-backup0"}'
        }
        return libvirt.modify_vm_iface(vm.name, "get_xml", hostdev_iface_dict,
                                       4)

    def check_ifaces(vm_name,
                     expected_ifaces={"bridge", "hostdev"},
                     status_error=False):
        """
        Check VM's interfaces

        :param vm_name: The name of VM
        :param expected_ifaces: The expected interfaces
        :param status_error: Whether the ifaces should be same with the expected_ifaces
        :raise: test.fail if the interface(s) is(are) as expected
        """
        if not expected_ifaces:
            return
        else:
            expected_ifaces = set(expected_ifaces)
        vm_ifaces = [
            iface for iface in vm_xml.VMXML.new_from_dumpxml(
                vm_name).devices.by_device_tag("interface")
        ]
        ifaces_net = {iface.get_type_name() for iface in vm_ifaces}
        if expected_ifaces.issubset(ifaces_net) == status_error:
            test.fail(
                "Unable to get expected interface. The interface %s "
                "should%s be %s." %
                (ifaces_net, ' not' if status_error else '', expected_ifaces))
        else:
            logging.debug("{}Found iface(s) as expected: {}.".format(
                'Not ' if status_error else '', expected_ifaces))

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    setup_test = eval("setup_%s" % test_case) if "setup_%s" % test_case in \
        locals() else "setup_%s" % test_case
    teardown_test = eval("teardown_%s" % test_case) if "teardown_%s" % \
        test_case in locals() else "teardown_%s" % test_case
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])

    driver = params.get("driver", "ixgbe")
    bridge_name = params.get("bridge_name", "br0")
    net_bridge_name = params.get("net_bridge_name", "host-bridge")
    net_bridge_fwd = params.get("net_bridge_fwd", '{"mode": "bridge"}')
    net_hostdev_name = params.get("net_hostdev_name", "hostdev-net")
    bridge_name = params.get("bridge_name", "br0")
    hostdev_teaming_dict = params.get("hostdev_device_teaming_dict", '{}')

    default_vf = 0
    try:
        vf_no = int(params.get("vf_no", "4"))
    except ValueError as e:
        test.error(e)

    libvirt_version.is_libvirt_feature_supported(params)

    mac_addr = utils_net.generate_mac_address_simple()
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    pf_name = utils_sriov.get_pf_info_by_pci(pf_pci).get('iface')
    brg_dict = {'pf_name': pf_name, 'bridge_name': bridge_name}
    bridge_dict = {
        "net_name": net_bridge_name,
        "net_forward": net_bridge_fwd,
        "net_bridge": '{"name": "%s"}' % bridge_name
    }
    pf_pci_path = utils_misc.get_pci_path(pf_pci)
    cmd = "cat %s/sriov_numvfs" % (pf_pci_path)
    default_vf = process.run(cmd, shell=True, verbose=True).stdout_text

    new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = new_xml.copy()

    try:
        if not utils_sriov.set_vf(pf_pci_path, vf_no):
            test.error("Failed to set vf.")
        utils_sriov.add_or_del_connection(brg_dict, is_del=False)
        libvirt_network.create_or_del_network(bridge_dict)

        vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
        exec_function(setup_test)
        run_test()

    finally:
        logging.info("Recover test enviroment.")
        utils_sriov.add_or_del_connection(brg_dict, is_del=True)
        libvirt_network.create_or_del_network(bridge_dict, is_del=True)
        if 'pf_pci_path' in locals() and default_vf != vf_no:
            utils_sriov.set_vf(pf_pci_path, default_vf)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        try:
            orig_config_xml.sync()
        except:
            # FIXME: Workaround for 'save'/'managedsave' hanging issue
            utils_libvirtd.Libvirtd().restart()
            orig_config_xml.sync()

        exec_function(teardown_test)
Exemplo n.º 6
0
def run(test, params, env):
    """
    Test VFs when PF is down
    """
    def setup_default():
        """
        Default setup
        """
        test.log.info("Set pf state to down.")
        pf_iface_obj = utils_net.Interface(pf_name)
        pf_iface_obj.down()

    def teardown_default():
        """
        Default cleanup
        """
        pf_iface_obj = utils_net.Interface(pf_name)
        pf_iface_obj.up()

    def test_at_dt():
        """
        Test attach-detach interfaces
        """
        options = '' if vm.is_alive() else '--config'
        iface_dict = eval(params.get('iface_dict')
                          % utils_sriov.pci_to_addr(vf_pci))
        iface = interface_base.create_iface('hostdev', iface_dict)
        result = virsh.attach_device(vm_name, iface.xml,
                                     flagstr=options, debug=True)
        if not start_vm:
            result = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(result, status_error)
        if error_msg:
            libvirt.check_result(result, error_msg)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    start_vm = "yes" == params.get("start_vm")
    status_error = "yes" == params.get("status_error", "no")
    error_msg = params.get("error_msg")

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    pf_pci = utils_sriov.get_pf_pci()
    if not pf_pci:
        test.cancel("NO available pf found.")
    sriov_base.setup_vf(pf_pci, params)

    vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
    pf_name = utils_sriov.get_pf_info_by_pci(pf_pci).get('iface')

    setup_test = eval("setup_%s" % test_case) if "setup_%s" % test_case in \
        locals() else setup_default
    teardown_test = eval("teardown_%s" % test_case) if "teardown_%s" % \
        test_case in locals() else teardown_default
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        setup_test()
        run_test()

    finally:
        test.log.info("Recover test enviroment.")
        orig_config_xml.sync()
        teardown_test()
Exemplo n.º 7
0
def run(test, params, env):
    """
    Test interfaces attached from network
    """
    def setup_default():
        """
        Default setup
        """
        pass

    def teardown_default():
        """
        Default cleanup
        """
        pass

    def setup_max_vfs():
        """
        Setup for max_vfs case

        1. Check test environment
        2. Enable VFs
        3. Create networks
        """
        if not utils_misc.compare_qemu_version(4, 0, 0, False):
            test.cancel("This test is supported from qemu-kvm 4.0.0.")
        if len(pf_info) < 2:
            test.cancel("This test requires at least 2 PFs.")

        pf_id_list = get_pf_id_list(pf_info, driver)
        for pf_pci in pf_id_list:
            sriov_base.recover_vf(pf_pci, params)
            sriov_base.setup_vf(pf_pci, params)

        net_info = get_net_dict(pf_info)
        for pf_dev, net_name in net_info.items():
            create_network(net_name, pf_dev, params)

    def teardown_max_vfs():
        """
        Teardown for max_vfs case

        1. Disable VFs
        2. Clean up networks
        """
        pf_id_list = get_pf_id_list(pf_info, driver)
        for pf_pci in pf_id_list:
            sriov_base.recover_vf(pf_pci, params, timeout=240)
        net_info = get_net_dict(pf_info)
        for pf_dev in net_info:
            libvirt_network.create_or_del_network(
                {"net_name": net_info[pf_dev]}, True)

    def test_max_vfs():
        """
        Hotplug MAX VFs to guest

        1. Start vm with 64 vfio interfaces
        2. Check networks
        3. Try to hot plug the 65th hostdev interface
        4. Destroy vm and cold plug 1 hostdev interface
        """
        net_info = get_net_dict(pf_info)
        vf_no = int(params.get("vf_no", "63"))

        logging.debug("Remove VM's interface devices.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        compare_vm_iface(test, get_vm_iface_num(vm_name), 0)

        logging.info("Cold plug 64 interfaces to VM.")
        opts = "network %s --config" % list(net_info.values())[0]
        for i in range(vf_no):
            virsh.attach_interface(vm_name,
                                   opts,
                                   debug=True,
                                   ignore_status=False)
        net_name_2 = list(net_info.values())[1]
        opts = "network %s --config" % net_name_2
        virsh.attach_interface(vm_name, opts, debug=True, ignore_status=False)
        compare_vm_iface(test, get_vm_iface_num(vm_name), vf_no + 1)

        logging.info("Start VM and check networks.")
        vm.start()
        vm_session = vm.wait_for_serial_login(timeout=240)
        res = vm_session.cmd_status_output(
            'lspci |grep Ether')[1].strip().splitlines()
        compare_vm_iface(test, len(res), vf_no + 1)

        logging.info("Hot Plug the 65th iface.")
        opts_hotplug = "network %s" % net_name_2
        res = virsh.attach_interface(vm_name, opts_hotplug)
        libvirt.check_exit_status(res, True)

        logging.info("Destroy vm and cold plug the 65th hostdev interface.")
        vm.destroy()
        virsh.attach_interface(vm_name, opts, ignore_status=False)

        compare_vm_iface(test, get_vm_iface_num(vm_name), vf_no + 2)
        res = virsh.start(vm_name, debug=True)
        libvirt.check_exit_status(res, True)

    def get_net_dict(pf_info):
        """
        Get network dict from pfs info

        :param pf_info: PFs info
        :return: Network parameters
        """
        pf_id_list = get_pf_id_list(pf_info, driver)
        return dict(
            zip([utils_sriov.get_iface_name(pf_pci) for pf_pci in pf_id_list],
                ['hostdevnet' + str(x) for x in range(len(pf_id_list))]))

    def compare_vm_iface(test, vm_iface_num, expr_no):
        """
        Compare the number of VM interfaces with the expected number

        :param test: test object
        :param vm_iface_num: The number of vm interface
        :param expr_no: Expected number of ifaces
        """
        if expr_no != vm_iface_num:
            test.fail("The number of vm ifaces is incorrect! Expected: %d, "
                      "Actual: %d." % (expr_no, vm_iface_num))
        else:
            logging.debug("The number of VM ifaces is %d.", vm_iface_num)

    test_case = params.get("test_case", "")
    run_test = eval("test_%s" % test_case)
    setup_test = eval("setup_%s" % test_case) if "setup_%s" % test_case in \
        locals() else setup_default
    teardown_test = eval("teardown_%s" % test_case) if "teardown_%s" % \
        test_case in locals() else teardown_default

    pf_info = utils_sriov.get_pf_info()
    pf_pci = utils_sriov.get_pf_pci()
    driver = utils_sriov.get_pf_info_by_pci(pf_pci).get('driver')

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    orig_config_xml = vmxml.copy()

    try:
        setup_test()
        run_test()

    finally:
        logging.info("Recover test enviroment.")
        if vm.is_alive():
            vm.destroy(gracefully=False)
        orig_config_xml.sync()
        teardown_test()