Exemple #1
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
Exemple #2
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {
                'dev': disk_path
            }})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_path = scsi_disk
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        find_scsi = "no"
        controllers = vmxml.xmltreefile.findall("devices/controller")
        for controller in controllers:
            if controller.get("type") == "scsi":
                find_scsi = "yes"
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        if find_scsi == "no":
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
Exemple #4
0
    def recompose_xml(vm_name, scsi_disk):
        """
        Add scsi disk, guest agent and scsi controller for guest
        :param: vm_name: Name of domain
        :param: scsi_disk: scsi_debug disk name
        """
        # Get disk path of scsi_disk
        path_cmd = "udevadm info --name %s | grep /dev/disk/by-path/ | " \
                   "cut -d' ' -f4" % scsi_disk
        disk_path = utils.run(path_cmd).stdout.strip()

        # Add qemu guest agent in guest xml
        vm_xml.VMXML.set_agent_channel(vm_name)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        # Add scsi disk xml
        scsi_disk = Disk(type_name="block")
        scsi_disk.device = "lun"
        scsi_disk.source = scsi_disk.new_disk_source(
                                            **{'attrs': {'dev': disk_path}})
        scsi_disk.target = {'dev': "sdb", 'bus': "scsi"}
        vmxml.add_device(scsi_disk)

        # Add scsi disk controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine guest
        vmxml.sync()
 def add_device(type="usb", index="0", model="qemu-xhci"):
     """
     Add new device.
     """
     newcontroller = Controller("controller")
     newcontroller.type = type
     newcontroller.index = index
     newcontroller.model = model
     logging.debug("New controller is:%s", newcontroller)
     return newcontroller
def run(test, params, env):
    """
    Test for adding controller for usb.
    """
    # get the params from params
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    index = params.get("index", "1")
    index_conflict = "yes" == params.get("index_conflict", "no")
    model = params.get("model", "nec-xhci")

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    controllers = vm_xml.get_devices(device_type="controller")
    devices = vm_xml.get_devices()
    for dev in controllers:
        if dev.type == "usb":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "usb"
    controller.index = index
    controller.model = model
    devices.append(controller)
    if index_conflict:
        controller_1 = Controller("controller")
        controller_1.type = "usb"
        controller_1.index = index
        devices.append(controller)

    vm_xml.set_devices(devices)

    try:
        try:
            vm_xml.sync()
            vm.start()

            if status_error:
                raise error.TestFail(
                    "Add controller successfully in negative case.")
            else:
                try:
                    session = vm.wait_for_login()
                except (LoginError, ShellError), e:
                    error_msg = "Test failed in positive case.\n error: %s\n" % e
                    raise error.TestFail(error_msg)
                cmd = "dmesg -c | grep %s" % model.split('-')[-1]
                stat_dmesg = session.cmd_status(cmd)
                if stat_dmesg != 0:
                    raise error.TestNAError("Fail to run dmesg in guest")
                session.close()
        except (LibvirtXMLError, VMStartError), e:
            if not status_error:
                raise error.TestFail("Add controller failed. Detail: %s" % e)
    finally:
        vm_xml_backup.sync()
Exemple #7
0
 def _add_pcie_root_port(index):
     """
     Add pcie root port with given index
     :param index: index of port that is going to be added
     :return:
     """
     pcie_root_port = Controller("controller")
     pcie_root_port.type = "pci"
     pcie_root_port.index = index
     pcie_root_port.model = "pcie-root-port"
     vmxml.add_device(pcie_root_port)
     vmxml.sync()
    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()
    def ppc_controller_update():
        """
        Update controller of ppc vm to 'virtio-scsi' to support 'scsi' type

        :return:
        """
        if params.get('machine_type') == 'pseries' and device_bus == 'scsi':
            if not vmxml.get_controllers(device_bus, 'virtio-scsi'):
                vmxml.del_controller(device_bus)
                ppc_controller = Controller('controller')
                ppc_controller.type = device_bus
                ppc_controller.index = '0'
                ppc_controller.model = 'virtio-scsi'
                vmxml.add_device(ppc_controller)
                vmxml.sync()
    def set_vm_controller_xml(vmxml):
        """
        Set VM scsi controller xml.

        :param vmxml. Domain xml object.
        """
        # Add disk scsi controller
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

        # Redefine domain
        vmxml.sync()
    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()
    def add_ctrls(vm_xml, dev_type="pci", dev_index="0", dev_model="pci-root"):
        """
        Add multiple devices

        :param dev_type: the type of the device to be added
        :param dev_index: the maximum index of the device to be added
        :param dev_model: the model of the device to be added
        """
        for inx in range(0, int(dev_index) + 1):
            newcontroller = Controller("controller")
            newcontroller.type = dev_type
            newcontroller.index = inx
            newcontroller.model = dev_model
            logging.debug("New device is added:\n%s", newcontroller)
            vm_xml.add_device(newcontroller)
        vm_xml.sync()
    def prepare_usb_controller(vmxml, usb_models):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        :param usb_models: The usb models will be used in usb controller(s).
        """
        if not usb_models:
            test.error("No usb model provided.")
        # Add disk usb controller(s)
        for usb_model in usb_models:
            usb_controller = Controller("controller")
            usb_controller.type = "usb"
            usb_controller.index = "0"
            usb_controller.model = usb_model
            vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()
    def prepare_usb_controller(vmxml, usb_models):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        :param usb_models: The usb models will be used in usb controller(s).
        """
        if not usb_models:
            test.error("No usb model provided.")
        # Add disk usb controller(s)
        for usb_model in usb_models:
            usb_controller = Controller("controller")
            usb_controller.type = "usb"
            usb_controller.index = "0"
            usb_controller.model = usb_model
            vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()
def run(test, params, env):
    """
    Test for adding controller for usb.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    index = params.get("index", "1")
    index_conflict = "yes" == params.get("index_conflict", "no")
    model = params.get("model", "nec-xhci")

    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    controllers = vm_xml.get_devices(device_type="controller")
    devices = vm_xml.get_devices()
    for dev in controllers:
        if dev.type == "usb":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "usb"
    controller.index = index
    controller.model = model
    devices.append(controller)
    if index_conflict:
        controller_1 = Controller("controller")
        controller_1.type = "usb"
        controller_1.index = index
        devices.append(controller)

    vm_xml.set_devices(devices)

    try:
        try:
            vm_xml.sync()
            vm.start()

            if status_error:
                raise error.TestFail("Add controller successfully in negative case.")
            else:
                try:
                    session = vm.wait_for_login()
                except (LoginError, ShellError), e:
                    error_msg = "Test failed in positive case.\n error: %s\n" % e
                    raise error.TestFail(error_msg)
                cmd = "dmesg -c | grep %s" % model.split('-')[-1]
                stat_dmesg = session.cmd_status(cmd)
                if stat_dmesg != 0:
                    raise error.TestNAError("Fail to run dmesg in guest")
                session.close()
        except (LibvirtXMLError, VMStartError), e:
            if not status_error:
                raise error.TestFail("Add controller failed. Detail: %s" % e)
    finally:
        vm_xml_backup.sync()
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError), e:
        raise error.TestFail("Test failed: %s" % str(e))
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = "yes" == params.get("status_error", "no")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError), e:
        raise error.TestFail("Test failed: %s" % str(e))
Exemple #18
0
    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge
    def prepare_usb_controller(vmxml, index, addr):
        """
        Add usb controllers into vm's xml.

        :param vmxml: The vm's xml.
        """
        # Add disk usb controller(s)
        usb_controller = Controller("controller")
        usb_controller.type = "usb"
        usb_controller.index = str(index)
        usb_controller.model = 'qemu-xhci'
        addr_dict = {
            "domain": '0x0000',
            'funtion': '0x0',
            'bus': addr['bus'],
            'slot': addr['slot']
        }
        usb_controller.address = usb_controller.new_controller_address(
            **{"attrs": addr_dict})
        vmxml.add_device(usb_controller)
        # Redefine domain
        vmxml.sync()
Exemple #20
0
    def create_pci_device(pci_model, pci_model_name, **kwargs):
        """
        Create a pci/pcie bridge

        :param pci_model: model of pci controller device
        :param pci_model_name: model name of pci controller device
        :param kwargs: other k-w args that needed to create device
        :return: the newly created device object
        """
        pci_bridge = Controller('pci')
        pci_bridge.model = pci_model
        pci_bridge.model_name = {'name': pci_model_name}
        if 'index' in kwargs:
            pci_bridge.index = kwargs['index']
        if 'address' in kwargs:
            pci_bridge.address = pci_bridge.new_controller_address(
                attrs=eval(kwargs['address']))

        logging.debug('pci_bridge: %s', pci_bridge)
        return pci_bridge
Exemple #21
0
def run(test, params, env):
    """
    Test for PCI single function device(NIC or Infiniband)
    passthrough to libvirt guest in hotplug mode.

    a). NIC Or Infiniband:
        1. Get params.
        2. Get the pci device function.
        3. Start guest
        4. prepare device xml to be attached
        5. hotplug the device
        6. check device hotplugged or not
        7. Ping to server_ip from guest
        8. test flood ping
        9. test guest life cycle
        10. test virsh dumpxml
        11. hotunplug the device
        12. test stress
           to verify the new network device.
    """
    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    device_name = params.get("libvirt_pci_net_dev_name", "ENTER_YOUR.DEV.NAME")
    pci_id = params.get("libvirt_pci_net_dev_label", "ENTER_YOUR.DEV.LABEL")
    net_ip = params.get("libvirt_pci_net_ip", "ENTER_YOUR.IP")
    server_ip = params.get("libvirt_pci_server_ip",
                           "ENTER_YOUR.SERVER.IP")
    netmask = params.get("libvirt_pci_net_mask", "ENTER_YOUR.MASK")
    stress_val = params.get("stress_val", "1")
    stress = params.get("stress", "no")
    timeout = int(params.get("timeout", "ENTER_YOUR.TIMEOUT.VALUE"))
    suspend_operation = params.get("suspend_operation", "no")
    reboot_operation = params.get("reboot_operation", "no")
    virsh_dumpxml = params.get("virsh_dumpxml", "no")
    virsh_dump = params.get("virsh_dump", "no")
    flood_ping = params.get("flood_ping", "no")
    # Check the parameters from configuration file.
    for each_param in params.itervalues():
        if "ENTER_YOUR" in each_param:
            test.cancel("Please enter the configuration details of %s."
                        % each_param)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    devices = vmxml.get_devices()
    pci_devs = []
    dargs = {'debug': True, 'ignore_status': True}
    controller = Controller("controller")
    controller.type = "pci"
    controller.index = params.get("index", "1")
    controller.model = params.get("model", "pci-root")
    devices.append(controller)
    vmxml.set_devices(devices)
    vmxml.sync()
    if not vm.is_alive():
        vm.start()
        session = vm.wait_for_login()
    if not utils_package.package_install(["ppc64-diag",
                                          "librtas", "powerpc-utils"],
                                         session, 360):
        test.cancel('Fail on dependencies installing')
    if virsh_dump == "yes":
        dump_file = os.path.join(data_dir.get_tmp_dir(), "virshdump.xml")
    output = session.cmd_output("ip link")
    logging.debug("checking for output - %s", output)
    nic_list_before = str(output.splitlines())
    logging.debug("nic_list before hotplug %s", nic_list_before)
    obj = PciAssignable()
    # get all functions id's
    pci_ids = obj.get_same_group_devs(pci_id)
    for val in pci_ids:
        temp = val.replace(":", "_")
        pci_devs.extend(["pci_"+temp])
    pci_val = pci_devs[0].replace(".", "_")
    pci_xml = NodedevXML.new_from_dumpxml(pci_val)
    pci_address = pci_xml.cap.get_address_dict()
    dev = VMXML.get_device_class('hostdev')()
    dev.mode = 'subsystem'
    dev.type = 'pci'
    dev.managed = 'no'
    dev.source = dev.new_source(**pci_address)

    def detach_device(pci_devs, pci_ids):
        # detaching the device from host
        for pci_value, pci_node in map(None, pci_devs, pci_ids):
            pci_value = pci_value.replace(".", "_")
            cmd = "lspci -ks %s | grep 'Kernel driver in use' |\
                   awk '{print $5}'" % pci_node
            driver_name = process.run(cmd, shell=True).stdout_text.strip()
            if driver_name == "vfio-pci":
                logging.debug("device alreay detached")
            else:
                if virsh.nodedev_detach(pci_value).exit_status:
                    test.error("Hostdev node detach failed")
                driver_name = process.run(cmd, shell=True).stdout_text.strip()
                if driver_name != "vfio-pci":
                    test.error("driver bind failed after detach")

    def reattach_device(pci_devs, pci_ids):
        # reattach the device to host
        for pci_value, pci_node in map(None, pci_devs, pci_ids):
            pci_value = pci_value.replace(".", "_")
            cmd = "lspci -ks %s | grep 'Kernel driver in use' |\
                   awk '{print $5}'" % pci_node
            driver_name = process.run(cmd, shell=True).stdout_text.strip()
            if driver_name != "vfio-pci":
                logging.debug("device alreay attached")
            else:
                if virsh.nodedev_reattach(pci_value).exit_status:
                    test.fail("Hostdev node reattach failed")
                driver_name = process.run(cmd, shell=True).stdout_text.strip()
                if driver_name == "vfio-pci":
                    test.error("driver bind failed after reattach")

    def check_attach_pci():
        session = vm.wait_for_login()
        output = session.cmd_output("ip link")
        nic_list_after = str(output.splitlines())
        logging.debug(nic_list_after)
        return nic_list_after != nic_list_before

    def device_hotplug():
        if not libvirt_version.version_compare(3, 10, 0):
            detach_device(pci_devs, pci_ids)
        # attach the device in hotplug mode
        result = virsh.attach_device(vm_name, dev.xml,
                                     flagstr="--live", debug=True)
        if result.exit_status:
            test.error(result.stdout.strip())
        else:
            logging.debug(result.stdout.strip())
        if not utils_misc.wait_for(check_attach_pci, timeout):
            test.fail("timeout value is not sufficient")

    # detach hot plugged device
    def device_hotunplug():
        result = virsh.detach_device(vm_name, dev.xml,
                                     flagstr="--live", debug=True)
        if result.exit_status:
            test.fail(result.stdout.strip())
        else:
            logging.debug(result.stdout.strip())
        # Fix me
        # the purpose of waiting here is after detach the device from
        #  guest it need time to perform any other operation on the device
        time.sleep(timeout)
        if not libvirt_version.version_compare(3, 10, 0):
            pci_devs.sort()
            reattach_device(pci_devs, pci_ids)

    def test_ping():
        try:
            output = session.cmd_output("lspci -nn | grep %s" % device_name)
            nic_id = str(output).split(' ', 1)[0]
            nic_name = str(utils_misc.get_interface_from_pci_id(nic_id,
                                                                session))
            session.cmd("ip addr flush dev %s" % nic_name)
            session.cmd("ip addr add %s/%s dev %s"
                        % (net_ip, netmask, nic_name))
            session.cmd("ip link set %s up" % nic_name)
            s_ping, o_ping = utils_net.ping(dest=server_ip, count=5,
                                            interface=net_ip)
            logging.info(s_ping)
            logging.info(o_ping)
            if s_ping:
                test.fail("Ping test failed")
        except aexpect.ShellError, detail:
            test.error("Succeed to set ip on guest, but failed "
                       "to bring up interface.\n"
                       "Detail: %s." % detail)
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output("fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file", path, size="1M", disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" % (i, path))

                    result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK") == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += ("id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" % (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type, usb_type, i)

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(**{"attrs": {'file': path}})
                    dev_xml.driver = {"name": "qemu", "type": 'qcow2', "cache": "none"}
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(**{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(**{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name, attach_cmd, options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Exemple #23
0
    def setup_controller_xml(index, addr_target=None):
        """
        Prepare controller devices of VM XML.

        :param index: The index of controller
        :param addr_target: The controller address

        """
        ctrl = Controller(type_name=cntlr_type)
        if model:
            ctrl.model = model
        if pcihole:
            ctrl.pcihole64 = pcihole
        if vectors:
            ctrl.vectors = vectors
        if index:
            ctrl.index = index
        if chassisNr:
            ctrl.target = {'chassisNr': chassisNr}
        if model_name:
            ctrl.model_name = {'name': model_name}

        if addr_target:
            match = re.match(
                r"(?P<bus>[0-9]*):(?P<slot>[0-9a-f]*).(?P<function>[0-9])",
                addr_target)
            if match:
                addr_dict = match.groupdict()
                addr_dict['bus'] = hex(int(addr_dict['bus'], 16))
                addr_dict['slot'] = hex(int(addr_dict['slot'], 16))
                addr_dict['function'] = hex(int(addr_dict['function'], 16))
                addr_dict['domain'] = '0x0000'
                ctrl.address = ctrl.new_controller_address(attrs=addr_dict)

        logging.debug("Controller XML is:%s", ctrl)
        vm_xml.add_device(ctrl)

        if cmpnn_cntlr_model is not None:
            for num in range(int(cmpnn_cntlr_num)):
                ctrl = Controller(type_name=cntlr_type)
                ctrl.model = cmpnn_cntlr_model + str(num + 1)
                ctrl.index = index
                logging.debug("Controller XML is:%s", ctrl)
                vm_xml.add_device(ctrl)
Exemple #24
0
    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {
                    'type': 'pci',
                    'domain': '0',
                    'slot': '0',
                    'bus': index - 1,
                    'function': '0'
                }
                controller.address = controller.new_controller_address(
                    **{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1
Exemple #25
0
def run(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controllers = []
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controllers.append(device)

    if not scsi_controllers:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)

    # Add disk with bus of scsi into vmxml.
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test "
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg",
                                 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {'dev': partition}})
        vmxml.add_device(partition_disk)
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.target = {'dev': "vde",
                           'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        try:
            cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
            cdrom.close()
        except process.CmdError, detail:
            raise error.TestNAError("Failed to create cdrom disk: %s" % detail)

        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf",
                             'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {'file': cdrom_path}})
        vmxml.add_device(cdrom_disk)
    def setup_controller_xml():
        """
        Prepare controller devices of VM XML according to params.
        """
        if cntlr_type is None:
            return

        ctrl = Controller(type_name=cntlr_type)

        if model is not None:
            ctrl.model = model
        if pcihole is not None:
            ctrl.pcihole64 = pcihole
        if vectors is not None:
            ctrl.vectors = vectors
        if index is not None:
            ctrl.index = index
        if addr_str is not None:
            match = re.match(r"(?P<bus>[0-9]*):(?P<slot>[0-9]*).(?P<function>[0-9])", addr_str)
            if match:
                addr_dict = match.groupdict()
                addr_dict['bus'] = hex(int(addr_dict['bus']))
                addr_dict['slot'] = hex(int(addr_dict['slot']))
                addr_dict['function'] = hex(int(addr_dict['function']))
                addr_dict['domain'] = '0x0000'
                ctrl.address = ctrl.new_controller_address(attrs=addr_dict)

        logging.debug("Controller XML is:%s", ctrl)
        vm_xml.add_device(ctrl)

        if usb_cntlr_model is not None:
            ctrl = Controller(type_name='usb')
            ctrl.model = usb_cntlr_model
            if usb_cntlr_addr is not None:
                match = re.match(r"(?P<bus>[0-9]*):(?P<slot>[0-9]*).(?P<function>[0-9])", usb_cntlr_addr)
                if match:
                    addr_dict = match.groupdict()
                    addr_dict['bus'] = hex(int(addr_dict['bus']))
                    addr_dict['slot'] = hex(int(addr_dict['slot']))
                    addr_dict['function'] = hex(int(addr_dict['function']))
                    addr_dict['domain'] = '0x0000'
                    ctrl.address = ctrl.new_controller_address(attrs=addr_dict)
            vm_xml.add_device(ctrl)
Exemple #27
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address,
                               shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        test_res = process.run("echo %d > %s/sriov_numvfs" %
                               (vf_num, pci_address),
                               shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                if len(net_diff) != int(vf_num):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail("Get net list with 'virsh list' failed\n")

        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60)
        if not net_diff:
            test.fail("Get net list with 'virsh list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
            Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     file_opt=new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model == "pci-bridge":
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()

    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/0000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = max_vfs // 2 + 1
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            vf_name_list.append(vf_name)

        if attach == "yes":
            vf_addr = vf_list[0]
            new_iface = create_interface()
            if inactive_pool:
                result = virsh.attach_device(vm_name,
                                             file_opt=new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                mac_addr = new_iface.mac_address
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
        backup_xml.sync()
Exemple #28
0
 def generate_controller(controller_dict):
     controller_xml = Controller("controller")
     controller_xml.model = controller_dict['model']
     controller_xml.type = controller_dict['type']
     controller_xml.index = controller_dict['index']
     return controller_xml
Exemple #29
0
def run_libvirt_scsi(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILIBLE.PARTITION")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controller_exists = False
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controller_exists = True
            break
    if not scsi_controller_exists:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)
    # Add disk with bus of scsi into vmxml.
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {'file': img_path}})
        img_disk.target = {'dev': "vde",
                           'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
        cdrom.close()
        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf",
                             'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {'file': cdrom_path}})
        vmxml.add_device(cdrom_disk)
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test"
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg",
                                 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {'dev': partition}})
        vmxml.add_device(partition_disk)
    # sync the vmxml with VM.
    vmxml.sync()
    # Check the result of scsi disk.
    try:
        try:
            vm.start()
            # Start VM successfully.
            if status_error:
                raise error.TestFail('Starting VM successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up.
        backup_xml.sync()
Exemple #30
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {'type': 'pci', 'domain': device_domain, 'slot': device_slot,
                 'bus': device_bus, 'function': device_function}
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(**{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        test_res = process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        test_res = process.run("echo %d > %s/sriov_numvfs" % (vf_num, pci_address), shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")
        pci_list_sriov = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                if len(net_diff) != int(vf_num):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail("Get net list with 'virsh list' failed\n")

        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=60)
        if not net_diff:
            test.fail("Get net list with 'virsh list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length-6])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" % (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" % (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
            Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail("The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail("The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail("The num of vf list show in nodedev-dumpxml is wrong\n")
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain+":"+vf_addr.bus+":"+vf_addr.slot+"."+vf_addr.function
                    addr_list.append(addr)
                logging.debug("The vf addr list show in nodedev-dumpxml is %s\n", addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail("The vf addr list show in nodedev-dumpxml is not sorted correctly\n")
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail("The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain+":"+pf_addr_bus+":"+pf_addr_slot+"."+pf_addr_function
            if pf_pci != pci_id:
                test.fail("The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail("The hostdev interface still in the guest xml after detach\n")
                    break
            driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n", driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail("The vf pci driver is not vfio-pci after detached from guest with managed as no\n")
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result, expect_error=False)
            elif driver != origin_driver:
                test.fail("The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n" % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail("The macvtap interface still exist in the guest xml after detach\n")
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option, debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)
        get_ip_by_mac(mac_addr, timeout=60)
        device = live_xml.devices
        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail("The driver of the hostdev interface is not vfio\n")
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail("The dev name or mode of macvtap interface is wrong after attach\n")
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {'type': 'pci', 'domain': '0', 'slot': '0',
                         'bus': index - 1, 'function': '0'}
                controller.address = controller.new_controller_address(**{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {'type': 'pci', 'domain': '0', 'slot': '0',
                     'bus': bus_id, 'function': '0'}
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" % vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' % vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address',
                     'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model == "pci-bridge":
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()

    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/0000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = max_vfs // 2 + 1
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            vf_name_list.append(vf_name)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            new_iface = create_interface()
            if inactive_pool:
                result = virsh.attach_device(vm_name, file_opt=new_iface.xml, flagstr=option,
                                             ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                mac_addr = new_iface.mac_address
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
                logging.debug("The driver of vf before attaching to guest is %s\n", origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if nfv:
            for os_machine_type in (machine_type, vmxml.os.machine):
                'q35' in os_machine_type or test.cancel("nfv only run with q35 machine type")
            vf_driver = os.readlink(os.path.join(pci_device_dir, vf_list[0], "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                process.run("cat %s" % iface.xml, shell=True).stdout_text
                result = virsh.attach_device(vm_name, file_opt=iface.xml, flagstr=option,
                                             ignore_status=True, debug=True)
                utils_test.libvirt.check_exit_status(result, expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
        backup_xml.sync()
Exemple #31
0
    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {'type': 'pci', 'domain': '0', 'slot': '0',
                         'bus': index - 1, 'function': '0'}
                controller.address = controller.new_controller_address(**{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1
Exemple #32
0
def run(test, params, env):
    """
    Test libvirt usb feature based on the following matrix:
        the combination usage of machine type q35/i440fx, pci/pcie
    bus controller and usb controller

    bus controller on q35 machine:
        pcie-root,pcie-root-port,pcie-to-pci-bridge,pci-bridge
        pcie-root,pcie-root-port,pcie-switch-upstream-port, pcie-switch-downstream-port
        pcie-root,dmi-to-pci-bridge,pci-bridge
    bus controller on i440fx machine:
        pci-root,pci-bridge

    usb30_controller:
        nec-xhci
        qemu-xhci
    usb20_controller:
        ich9-ehci1,ich9-uhci1,ich9-uhci2,ich9-uhci3

    1. cold-plug/hot-unplug USB host device to/from VM
    2. passthrough host usb device with vid/pid or bus/device hostdev
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    usb_index = params.get("usb_index", "0")
    bus_controller = params.get("bus_controller", "")
    usb_model = params.get("usb_model", "")
    start_timeout = int(params.get("start_timeout", "60"))
    usb_hub = "yes" == params.get("usb_hub", "no")
    status_error = "yes" == params.get("status_error", "no")
    passthrough = "yes" == params.get("passthrough", "no")
    vid_pid = "yes" == params.get("vid_pid", "no")
    bus_dev = "yes" == params.get("bus_dev", "no")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def get_usb_source(lsusb_list):
        """
        calculate a dict of the source xml of usb device based on the output from command lsusb

        :param lsusb_list: a list of the output from command lsusb
        :return: a dict of the source xml of usb device
        """

        logging.debug("lsusb command result: {}".format(lsusb_list))
        source_list = []
        product_list = []
        for line in lsusb_list:
            source = {}
            product = {}
            src = {}
            if re.search("hub", line, re.IGNORECASE):
                continue
            if len(line.split()[5].split(':')) == 2:
                vendor_id, product_id = line.split()[5].split(':')
            if not (vendor_id and product_id):
                test.fail("vendor/product id is not available")
            product['vendor_id'] = "0x" + vendor_id
            product['product_id'] = "0x" + product_id
            product_list.append(product.copy())
            if vid_pid:
                source = product.copy()
            if bus_dev:
                source['bus'] = line.split()[1]
                source['device'] = line.split()[3].rstrip(':')
            source_list.append(source.copy())
        logging.debug("usb device product dict {}, source dict {}".format(
            product_list, source_list))
        if not source_list or not product_list:
            test.fail("no available usb device in host")
        src['source'] = source_list
        src['product'] = product_list
        return src

    def usb_disk_check(session, src_guest=None):
        """
        :param session: a console session of vm
        :param src_guest: a dict of the source xml of usb device from vm
        """

        # check and write the usb disk
        status, output = session.cmd_status_output("udevadm info /dev/sda")
        if status:
            test.fail("no available usb storage device")
        if session.cmd_status("dd if=/dev/zero of=/dev/sda bs=1M count=100",
                              timeout=300):
            test.fail("usb storage device write fail")

        # check whether passthrough the right usb device
        if passthrough and src_guest:
            output = output.strip().splitlines()
            for guest in src_guest['product']:
                pattern = "ID_MODEL_ID={}".format(
                    guest['product_id'].lstrip("0x"))
                for line in output:
                    if pattern in line:
                        return
            test.fail("passthrough the wrong usb device")

    def usb_device_check(session, src_host=None):
        """
        :param session: a console session of vm
        :param src_host: a dict of the source xml of usb device from host
        """
        if passthrough:
            # check usb device xml
            for addr in src_host['source']:
                if vid_pid:
                    pattern = 'product id="{}"'.format(addr['product_id'])
                if bus_dev:
                    pattern = 'address bus="{}" device="{}"'.format(
                        int(addr['bus']), int(addr['device']))
                vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
                if pattern not in str(vmxml):
                    test.fail("the xml check of usb device fails")

            # check the pid and vid of usb passthrough device in vm
            output = session.get_command_output("lsusb")
            src_guest = get_usb_source(output.strip().splitlines())
            for host in src_host['product']:
                flag = False
                for guest in src_guest['product']:
                    if (guest['product_id'] == host['product_id']
                            and guest['vendor_id'] == host['vendor_id']):
                        flag = True
                        break
                if not flag:
                    test.fail("usb passthrough device check fail")

        # check usb disk /dev/sda
        if passthrough:
            usb_disk_check(session, src_guest)

    try:
        # remove usb controller/device from xml
        controllers = vmxml.get_devices(device_type="controller")
        for dev in controllers:
            if dev.type == "usb" or dev.type == "pci":
                vmxml.del_device(dev)

        hubs = vmxml.get_devices(device_type="hub")
        for hub in hubs:
            if hub.type_name == "usb":
                vmxml.del_device(hub)

        # assemble the xml of pci/pcie bus
        for model in bus_controller.split(','):
            pci_bridge = Controller('pci')
            pci_bridge.type = "pci"
            pci_bridge.model = model
            vmxml.add_device(pci_bridge)

        # assemble the xml of usb controller
        for model in usb_model.split(','):
            controller = Controller("controller")
            controller.type = "usb"
            controller.index = usb_index
            controller.model = model
            vmxml.add_device(controller)

        if usb_hub:
            hub = Hub("usb")
            vmxml.add_device(hub)

        # install essential package usbutils in host
        pkg = 'usbutils'
        if not utils_package.package_install(pkg):
            test.fail("package usbutils installation fail")

        # assemble the xml of usb passthrough device
        if passthrough:
            hostdevs = vmxml.get_devices(device_type="hostdev")
            for dev in hostdevs:
                vmxml.del_device(dev)
            lsusb_list = process.run('lsusb').stdout_text.splitlines()
            src_host = get_usb_source(lsusb_list)
            for addr in src_host['source']:
                dev = vmxml.get_device_class('hostdev')()
                source_xml = dev.Source()
                dev.mode = 'subsystem'
                dev.hostdev_type = 'usb'
                dev.managed = 'no'
                if vid_pid:
                    source_xml.vendor_id = addr['vendor_id']
                    source_xml.product_id = addr['product_id']
                if bus_dev:
                    source_xml.untyped_address = source_xml.new_untyped_address(
                        **addr)
                dev.source = source_xml
                vmxml.add_device(dev)

        # start vm
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login(timeout=start_timeout)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("vm xml after starting up {}".format(vmxml))

        # check usb controller in vm
        for model_type in usb_model.split(','):
            model_type = model_type.split('-')[-1].rstrip("1,2,3")
            logging.debug("check usb controller {} in vm".format(model_type))
            if session.cmd_status("dmesg | grep {}".format(model_type)):
                test.fail("usb controller check fail")

        # install package usbutils in vm
        if not utils_package.package_install(pkg, session):
            test.fail("package usbutils installation fails")

        # check usb device
        usb_device_check(session, src_host)

        if passthrough:
            # detach usb passthrough device from vm
            hostdevs = vmxml.get_devices('hostdev')
            logging.debug("detach usb device {}".format(hostdevs))
            for dev in hostdevs:
                if dev.hostdev_type == "usb":
                    virsh.detach_device(vm_name,
                                        dev.xml,
                                        flagstr="--live",
                                        debug=True,
                                        ignore_status=False)

            # check the hostdev element in xml after detaching
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            hostdevs = vmxml.get_devices('hostdev')
            logging.debug("hostdevs: {}".format(hostdevs))
            for dev in hostdevs:
                if dev.hostdev_type == "usb":
                    test.fail("detach usb device fail")

    finally:
        if 'session' in locals():
            session.close()
        vmxml_backup.sync()
Exemple #33
0
def run(test, params, env):
    """
    Sriov basic test:

    1.create max vfs;
    2.Check the nodedev info;
    3.Start a guest with vf;
    4.Reboot a guest with vf;
    5.suspend/resume a guest with vf
    """
    def find_pf():
        pci_address = ""
        for pci in pci_dirs:
            temp_iface_name = os.listdir("%s/net" % pci)[0]
            operstate = utils_net.get_net_if_operstate(temp_iface_name)
            if operstate == "up":
                pf_iface_name = temp_iface_name
                pci_address = pci
                break
        if pci_address == "":
            return False
        else:
            return pci_address

    def create_address_dict(pci_id):
        """
            Use pci_xxxx_xx_xx_x to create address dict.
        """
        device_domain = pci_id.split(':')[0]
        device_domain = "0x%s" % device_domain
        device_bus = pci_id.split(':')[1]
        device_bus = "0x%s" % device_bus
        device_slot = pci_id.split(':')[-1].split('.')[0]
        device_slot = "0x%s" % device_slot
        device_function = pci_id.split('.')[-1]
        device_function = "0x%s" % device_function
        attrs = {
            'type': 'pci',
            'domain': device_domain,
            'slot': device_slot,
            'bus': device_bus,
            'function': device_function
        }
        return attrs

    def addr_to_pci(addr):
        """
            Convert address dict to pci address: xxxxx:xx.x.
        """
        pci_domain = re.findall(r"0x(.+)", addr['domain'])[0]
        pci_bus = re.findall(r"0x(.+)", addr['bus'])[0]
        pci_slot = re.findall(r"0x(.+)", addr['slot'])[0]
        pci_function = re.findall(r"0x(.+)", addr['function'])[0]
        pci_addr = pci_domain + ":" + pci_bus + ":" + pci_slot + "." + pci_function
        return pci_addr

    def create_hostdev_interface(pci_id, managed, model):
        """
            Create hostdev type interface xml.
        """
        attrs = create_address_dict(pci_id)
        new_iface = Interface('hostdev')
        new_iface.managed = managed
        if model != "":
            new_iface.model = model
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        new_iface.hostdev_address = new_iface.new_iface_address(
            **{"attrs": attrs})
        chars = string.ascii_letters + string.digits + '-_'
        alias_name = 'ua-' + ''.join(
            random.choice(chars) for _ in list(range(64)))
        new_iface.alias = {'name': alias_name}
        return new_iface

    def create_vfs(vf_num):
        """
            Create max vfs.
        """
        net_device = []
        net_name = []
        # cleanup env and create vfs
        cmd = "echo 0 > %s/sriov_numvfs" % pci_address
        if driver == "mlx4_core":
            cmd = "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core"
        process.run(cmd, shell=True)
        pci_list = virsh.nodedev_list(cap='pci').stdout.strip().splitlines()
        net_list = virsh.nodedev_list(cap='net').stdout.strip().splitlines()
        pci_list_before = set(pci_list)
        net_list_before = set(net_list)
        cmd = "echo %d > %s/sriov_numvfs" % (vf_num, pci_address)
        if driver == "mlx4_core":
            cmd = "modprobe -v mlx4_core num_vfs=%d port_type_array=2,2 probe_vf=%d" \
                    % (vf_num, vf_num)
        test_res = process.run(cmd, shell=True)
        if test_res.exit_status != 0:
            test.fail("Fail to create vfs")

        def _vf_init_completed():
            try:
                net_list_sriov = virsh.nodedev_list(
                    cap='net').stdout.strip().splitlines()
                net_list_sriov = set(net_list_sriov)
                net_diff = list(net_list_sriov.difference(net_list_before))
                net_count = len(net_diff)
                if ((driver != "mlx4_core" and net_count != vf_num) or
                    (driver == "mlx4_core" and net_count != 2 * (vf_num + 1))):
                    net_diff = []
                    return False
                return net_diff
            except process.CmdError:
                raise test.fail(
                    "Get net list with 'virsh nodedev-list' failed\n")

        net_diff = utils_misc.wait_for(_vf_init_completed, timeout=300)
        pci_list_sriov = virsh.nodedev_list(
            cap='pci').stdout.strip().splitlines()
        pci_list_sriov = set(pci_list_sriov)
        pci_diff = list(pci_list_sriov.difference(pci_list_before))
        if not net_diff:
            test.fail("Get net list with 'virsh nodedev-list' failed\n")
        for net in net_diff:
            net = net.split('_')
            length = len(net)
            net = '_'.join(net[1:length - 6])
            mac = ':'.join(net[length - 6:])
            net_name.append(net)
        for pci_addr in pci_diff:
            temp_addr = pci_addr.split("_")
            pci_addr = ':'.join(temp_addr[1:4]) + '.' + temp_addr[4]
            vf_net_name = os.listdir("%s/%s/net" %
                                     (pci_device_dir, pci_addr))[0]
            net_device.append(vf_net_name)
        logging.debug(sorted(net_name))
        logging.debug(sorted(net_device))
        if driver != "mlx4_core" and sorted(net_name) != sorted(net_device):
            test.fail("The net name get from nodedev-list is wrong\n")

    def get_ip_by_mac(mac_addr, timeout=120):
        """
        Get interface IP address by given MAC address.
        """
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)

        def get_ip():
            return utils_net.get_guest_ip_addr(session, mac_addr)

        try:
            ip_addr = ""
            iface_name = utils_net.get_linux_ifname(session, mac_addr)
            if iface_name is None:
                test.fail("no interface with MAC address %s found" % mac_addr)
            session.cmd("pkill -9 dhclient", ignore_all_errors=True)
            session.cmd("dhclient %s " % iface_name, ignore_all_errors=True)
            ip_addr = utils_misc.wait_for(get_ip, 20)
            logging.debug("The ip addr is %s", ip_addr)
        except Exception:
            logging.warning("Find %s with MAC address %s but no ip for it" %
                            (iface_name, mac_addr))
        finally:
            session.close()
        return ip_addr

    def create_nodedev_pci(pci_address):
        """
            Convert xxxx:xx.x to pci_xxxx_xx_xx_x.
        """
        nodedev_addr = pci_address.split(':')[0:2]
        slot_function = pci_address.split(':')[2]
        nodedev_addr.append(slot_function.split('.')[0])
        nodedev_addr.append(slot_function.split('.')[1])
        nodedev_addr.insert(0, "pci")
        nodedev_addr = "_".join(nodedev_addr)
        return nodedev_addr

    def create_network_interface(name):
        """
            Create network type interface xml.
        """
        new_iface = Interface('network')
        new_iface.source = {'network': name}
        new_iface.model = "virtio"
        new_iface.mac_address = utils_net.generate_mac_address_simple()
        return new_iface

    def create_hostdev_network():
        """
            Create hostdev type with vf pool network xml.
        """
        vf_addr_list = []
        netxml = network_xml.NetworkXML()
        if vf_pool_source == "vf_list":
            for vf in vf_list:
                attrs = create_address_dict(vf)
                new_vf = netxml.new_vf_address(**{'attrs': attrs})
                vf_addr_list.append(new_vf)
            netxml.driver = {'name': 'vfio'}
            netxml.forward = {"mode": "hostdev", "managed": managed}
            netxml.vf_list = vf_addr_list
        else:
            netxml.pf = {"dev": pf_name}
            netxml.forward = {"mode": "hostdev", "managed": managed}
        netxml.name = net_name
        logging.debug(netxml)
        return netxml

    def create_macvtap_network():
        """
        Create macvtap type network xml.
        """
        forward_interface_list = []
        for vf_name in vf_name_list:
            forward_interface = {'dev': vf_name}
            forward_interface_list.append(forward_interface)
        netxml = network_xml.NetworkXML()
        netxml.name = net_name
        netxml.forward = {'dev': vf_name_list[0], 'mode': 'passthrough'}
        netxml.forward_interface = forward_interface_list
        logging.debug(netxml)
        return netxml

    def do_operation():
        """
            Do operation in guest os with vf and check the os behavior after operation.
        """
        if operation == "resume_suspend":
            try:
                virsh.suspend(vm.name, debug=True, ignore_status=False)
                virsh.resume(vm.name, debug=True, ignore_statue=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Suspend-Resume %s with vf failed: %s" % (vm_name,
                                                                    detail)
                test.fail(err_msg)
        if operation == "reboot":
            try:
                if vm.serial_console is not None:
                    vm.cleanup_serial_console()
                    vm.create_serial_console()
                virsh.reboot(vm.name, ignore_status=False)
                get_ip_by_mac(mac_addr, timeout=120)
            except process.CmdError as detail:
                err_msg = "Reboot %s with vf failed: %s" % (vm_name, detail)
                test.fail(err_msg)
        if operation == "save":
            result = virsh.managedsave(vm_name, ignore_status=True, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=True)

        if operation == "restart_libvirtd":
            detach_interface()
            utils_libvirtd.libvirtd_restart()
            interface = attach_interface()

    def check_info():
        """
            Check the pf or vf info after create vfs.
        """
        if info_type == "pf_info" or info_type == "vf_order":
            nodedev_pci = create_nodedev_pci(pci_address.split("/")[-1])
            xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            if info_type == "pf_info":
                product_info = xml.cap.product_info
                max_count = xml.max_count
                if pci_info.find(product_info) == -1:
                    test.fail(
                        "The product_info show in nodedev-dumpxml is wrong\n")
                if int(max_count) != max_vfs:
                    test.fail(
                        "The maxCount show in nodedev-dumpxml is wrong\n")
            if info_type == "vf_order":
                vf_addr_list = xml.cap.virt_functions
                if len(vf_addr_list) != max_vfs:
                    test.fail(
                        "The num of vf list show in nodedev-dumpxml is wrong\n"
                    )
                addr_list = []
                for vf_addr in vf_addr_list:
                    addr = vf_addr.domain + ":" + vf_addr.bus + ":" + vf_addr.slot + "." + vf_addr.function
                    addr_list.append(addr)
                logging.debug(
                    "The vf addr list show in nodedev-dumpxml is %s\n",
                    addr_list)
                if sorted(addr_list) != addr_list:
                    test.fail(
                        "The vf addr list show in nodedev-dumpxml is not sorted correctly\n"
                    )
        elif info_type == "vf_info":
            vf_addr = vf_list[0]
            nodedev_pci = create_nodedev_pci(vf_addr)
            vf_xml = NodedevXML.new_from_dumpxml(nodedev_pci)
            vf_bus_slot = ':'.join(vf_addr.split(':')[1:])
            res = process.run("lspci -s %s -vv" % vf_bus_slot)
            vf_pci_info = res.stdout_text
            vf_product_info = vf_xml.cap.product_info
            if vf_pci_info.find(vf_product_info) == -1:
                test.fail(
                    "The product_info show in nodedev-dumpxml is wrong\n")
            pf_addr = vf_xml.cap.virt_functions[0]
            pf_addr_domain = re.findall(r"0x(.+)", pf_addr.domain)[0]
            pf_addr_bus = re.findall(r"0x(.+)", pf_addr.bus)[0]
            pf_addr_slot = re.findall(r"0x(.+)", pf_addr.slot)[0]
            pf_addr_function = re.findall(r"0x(.+)", pf_addr.function)[0]
            pf_pci = pf_addr_domain + ":" + pf_addr_bus + ":" + pf_addr_slot + "." + pf_addr_function
            if pf_pci != pci_id:
                test.fail(
                    "The pf address show in vf nodedev-dumpxml is wrong\n")

    def create_interface():
        """
            Call different function to create interface according to the type
        """
        new_iface = Interface('network')
        if vf_type == "vf":
            new_iface = create_hostdev_interface(vf_addr, managed, model)
        if vf_type == "vf_pool":
            netxml = create_hostdev_network()
            virsh.net_define(netxml.xml, ignore_status=True)
            if not inactive_pool:
                virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        if vf_type == "macvtap":
            new_iface = Interface('direct')
            new_iface.source = {"dev": vf_name, "mode": "passthrough"}
            new_iface.mac_address = utils_net.generate_mac_address_simple()
            new_iface.model = "virtio"
        if vf_type == "macvtap_network":
            netxml = create_macvtap_network()
            result = virsh.net_define(netxml.xml, ignore_status=True)
            virsh.net_start(netxml.name)
            new_iface = create_network_interface(netxml.name)
        return new_iface

    def detach_interface():
        """
            Detach interface:

            1.Detach interface from xml;
            2.Check the live xml after detach interface;
            3.Check the vf driver after detach interface.
        """
        def _detach_completed():
            result = virsh.domiflist(vm_name, "", ignore_status=True)
            return result.stdout.find(mac_addr) == -1

        def check_addr_attrs():
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device = live_xml.devices
            hostdev_list = device.by_device_tag("hostdev")
            for hostdev in hostdev_list:
                addr = hostdev.source.untyped_address
                hostdev_addr_attrs = {
                    "domain": addr.domain,
                    "bus": addr.bus,
                    "slot": addr.slot,
                    "function": addr.function
                }
                if hostdev_addr_attrs == vf_addr_attrs:
                    return False
            return True

        result = virsh.detach_device(vm_name, new_iface.xml)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if vf_type == "hostdev":
            check_ret = utils_misc.wait_for(check_addr_attrs, timeout=60)
            if not check_ret:
                test.fail("The hostdev device detach failed from xml\n")
        else:
            utils_misc.wait_for(_detach_completed, timeout=60)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        device = live_xml.devices
        logging.debug("Domain xml after detach interface:\n %s", live_xml)

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.hostdev_address.attrs == vf_addr_attrs:
                        test.fail(
                            "The hostdev interface still in the guest xml after detach\n"
                        )
                    break
            driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            logging.debug("The driver after vf detached from guest is %s\n",
                          driver)
            if managed == "no":
                if driver != "vfio-pci":
                    test.fail(
                        "The vf pci driver is not vfio-pci after detached from guest with managed as no\n"
                    )
                result = virsh.nodedev_reattach(nodedev_pci_addr)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            elif driver != origin_driver:
                test.fail(
                    "The vf pci driver is not reset to the origin driver after detach from guest: %s vs %s\n"
                    % (driver, origin_driver))
        else:
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if interface.source["dev"] == vf_name:
                        test.fail(
                            "The macvtap interface still exist in the guest xml after detach\n"
                        )
                    break

    def attach_interface():
        """
            Attach interface:

            1.Attach interface from xml;
            2.Check the vf driver after attach interface;
            3.Check the live xml after attach interface;
        """
        if managed == "no":
            result = virsh.nodedev_detach(nodedev_pci_addr)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        logging.debug("attach interface xml:\n %s", new_iface)
        result = virsh.attach_device(vm_name,
                                     new_iface.xml,
                                     flagstr=option,
                                     debug=True)
        utils_test.libvirt.check_exit_status(result, expect_error=False)
        if option == "--config":
            result = virsh.start(vm_name)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        # For option == "--persistent", after VM destroyed and then start, the device should still be there.
        if option == "--persistent":
            virsh.destroy(vm_name)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
        live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug(live_xml)

        if vf_type != "hostdev":
            get_ip_by_mac(mac_addr, timeout=60)

        device = live_xml.devices

        if vf_type == "hostdev":
            hostdev_list = device.by_device_tag("hostdev")
            if len(hostdev_list) == 0:
                test.fail("The hostdev device attach failed from xml\n")
            else:
                for hostdev in hostdev_list:
                    if hostdev.type == "pci":
                        break
                interface = hostdev

        if vf_type == "vf" or vf_type == "vf_pool":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "hostdev":
                    if interface.driver.driver_attr['name'] != 'vfio':
                        test.fail(
                            "The driver of the hostdev interface is not vfio\n"
                        )
                    break
            vf_addr_attrs = interface.hostdev_address.attrs
            pci_addr = addr_to_pci(vf_addr_attrs)
            nic_driver = os.readlink(
                os.path.join(pci_device_dir, vf_addr, "driver")).split('/')[-1]
            if nic_driver != "vfio-pci":
                test.fail("The driver of the hostdev interface is not vfio\n")
        elif vf_type == "macvtap" or vf_type == "macvtap_network":
            for interface in device.by_device_tag("interface"):
                if interface.type_name == "direct":
                    if vf_type == "macvtap":
                        if interface.source["dev"] == new_iface.source["dev"]:
                            match = "yes"
                            vf_name = interface.source["dev"]
                    elif interface.source['dev'] in vf_name_list:
                        match = "yes"
                        vf_name = interface.source["dev"]
                if match != "yes":
                    test.fail(
                        "The dev name or mode of macvtap interface is wrong after attach\n"
                    )
        return interface

    def setup_controller(nic_num, controller_index, ctl_models):
        """
        Create controllers bond to numa node in the guest xml

        :param nic_num: number of nic card bond to numa node
        :param controller_index: index num used to create controllers
        :param ctl_models: contoller topo for numa bond
        """
        index = controller_index
        if nic_num == 2:
            ctl_models.append('pcie-switch-upstream-port')
            ctl_models.append('pcie-switch-downstream-port')
            ctl_models.append('pcie-switch-downstream-port')
        for i in range(index):
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = i
            if i == 0:
                controller.model = 'pcie-root'
            else:
                controller.model = 'pcie-root-port'
            vmxml.add_device(controller)
        set_address = False
        for model in ctl_models:
            controller = Controller("controller")
            controller.type = "pci"
            controller.index = index
            controller.model = model
            if set_address or model == "pcie-switch-upstream-port":
                attrs = {
                    'type': 'pci',
                    'domain': '0',
                    'slot': '0',
                    'bus': index - 1,
                    'function': '0'
                }
                controller.address = controller.new_controller_address(
                    **{"attrs": attrs})
                logging.debug(controller)
            if controller.model == "pcie-expander-bus":
                controller.node = "0"
                controller.target = {'busNr': '100'}
                set_address = True
            else:
                set_address = False
            logging.debug(controller)
            vmxml.add_device(controller)
            index += 1
        return index - 1

    def add_numa(vmxml):
        """
        Add numa node in the guest xml

        :param vmxml: The instance of VMXML clas
        """
        vcpu = vmxml.vcpu
        max_mem = vmxml.max_mem
        max_mem_unit = vmxml.max_mem_unit
        numa_dict = {}
        numa_dict_list = []
        # Compute the memory size for each numa node
        if vcpu == 1:
            numa_dict['id'] = '0'
            numa_dict['cpus'] = '0'
            numa_dict['memory'] = str(max_mem)
            numa_dict['unit'] = str(max_mem_unit)
            numa_dict_list.append(numa_dict)
        else:
            for index in range(2):
                numa_dict['id'] = str(index)
                numa_dict['memory'] = str(max_mem // 2)
                numa_dict['unit'] = str(max_mem_unit)
                if vcpu == 2:
                    numa_dict['cpus'] = str(index)
                else:
                    if index == 0:
                        if vcpu == 3:
                            numa_dict['cpus'] = str(index)
                        if vcpu > 3:
                            numa_dict['cpus'] = "%s-%s" % (index,
                                                           vcpu // 2 - 1)
                    else:
                        numa_dict['cpus'] = "%s-%s" % (vcpu // 2,
                                                       str(vcpu - 1))
                numa_dict_list.append(numa_dict)
                numa_dict = {}
        # Add cpu device with numa node setting in domain xml
        vmxml_cpu = vm_xml.VMCPUXML()
        vmxml_cpu.xml = "<cpu><numa/></cpu>"
        vmxml_cpu.numa_cell = numa_dict_list
        vmxml.cpu = vmxml_cpu

    def create_iface_list(bus_id, nic_num, vf_list):
        """
            Create hostdev interface list bond to numa node

            :param bus_id: bus_id in pci address which decides the controller attached to
            :param nic_num: number of nic card bond to numa node
            :param vf_list: sriov vf list
        """
        iface_list = []
        for num in range(nic_num):
            vf_addr = vf_list[num]
            iface = create_hostdev_interface(vf_addr, managed, model)
            bus_id -= num
            attrs = {
                'type': 'pci',
                'domain': '0',
                'slot': '0',
                'bus': bus_id,
                'function': '0'
            }
            iface.address = iface.new_iface_address(**{"attrs": attrs})
            iface_list.append(iface)
        return iface_list

    def check_guestos(iface_list):
        """
            Check whether vf bond to numa node can get ip successfully in guest os

            :param iface_list: hostdev interface list
        """
        for iface in iface_list:
            mac_addr = iface.mac_address
            get_ip_by_mac(mac_addr, timeout=60)

    def check_numa(vf_driver):
        """
        Check whether vf bond to correct numa node in guest os

        :param vf_driver: vf driver
        """
        if vm.serial_console:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        vf_pci = "/sys/bus/pci/drivers/%s" % vf_driver
        vf_dir = session.cmd_output("ls -d %s/00*" %
                                    vf_pci).strip().split('\n')
        for vf in vf_dir:
            numa_node = session.cmd_output('cat %s/numa_node' %
                                           vf).strip().split('\n')[-1]
            logging.debug("The vf is attached to numa node %s\n", numa_node)
            if numa_node != "0":
                test.fail("The vf is not attached to numa node 0\n")
        session.close()

    def remove_devices(vmxml, device_type):
        """
        Remove all addresses for all devices who has one.

        :param vm_xml: The VM XML to be modified
        :param device_type: The device type for removing

        :return: True if success, otherwise, False
        """
        if device_type not in ['address', 'usb']:
            return
        type_dict = {'address': '/devices/*/address', 'usb': '/devices/*'}
        try:
            for elem in vmxml.xmltreefile.findall(type_dict[device_type]):
                if device_type == 'usb':
                    if elem.get('bus') == 'usb':
                        vmxml.xmltreefile.remove(elem)
                else:
                    vmxml.xmltreefile.remove(elem)
        except (AttributeError, TypeError) as details:
            test.error("Fail to remove '%s': %s" % (device_type, details))
        vmxml.xmltreefile.write()

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    machine_type = params.get("machine_type", "pc")
    operation = params.get("operation")
    driver = params.get("driver", "ixgbe")
    status_error = params.get("status_error", "no") == "yes"
    model = params.get("model", "")
    managed = params.get("managed", "yes")
    attach = params.get("attach", "")
    option = params.get("option", "")
    vf_type = params.get("vf_type", "")
    dev_type = params.get("dev_type", "")
    info_check = params.get("info_check", "no")
    info_type = params.get("info_type", "")
    vf_pool_source = params.get("vf_pool_source", "vf_list")
    loop_times = int(params.get("loop_times", "1"))
    start_vm = "yes" == params.get("start_vm", "yes")
    including_pf = "yes" == params.get("including_pf", "no")
    max_vfs_attached = "yes" == params.get("max_vfs_attached", "no")
    inactive_pool = "yes" == params.get("inactive_pool", "no")
    duplicate_vf = "yes" == params.get("duplicate_vf", "no")
    expected_error = params.get("error_msg", "")
    nic_num = int(params.get("nic_num", "1"))
    nfv = params.get("nfv", "no") == "yes"
    ctl_models = params.get("ctl_models", "").split(' ')
    controller_index = int(params.get("controller_index", "12"))

    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    vmxml.remove_all_device_by_type('interface')
    vmxml.sync()
    if max_vfs_attached:
        controller_devices = vmxml.get_devices("controller")
        pci_bridge_controllers = []
        for device in controller_devices:
            logging.debug(device)
            if device.type == 'pci' and device.model in ("pci-bridge",
                                                         "pcie-root-port"):
                pci_bridge_controllers.append(device)
        if not pci_bridge_controllers:
            pci_bridge_controller = Controller("controller")
            pci_bridge_controller.type = "pci"
            pci_bridge_controller.index = "1"
            pci_bridge_controller.model = "pci-bridge"
            vmxml.add_device(pci_bridge_controller)
            vmxml.sync()

    if start_vm:
        if not vm.is_dead():
            vm.destroy()
        vm.start()
        if vm.serial_console is not None:
            vm.cleanup_serial_console()
        vm.create_serial_console()
        session = vm.wait_for_serial_login(timeout=240)
        session.close()
    else:
        if not vm.is_dead():
            vm.destroy()
    driver_dir = "/sys/bus/pci/drivers/%s" % driver
    pci_dirs = glob.glob("%s/000*" % driver_dir)
    pci_device_dir = "/sys/bus/pci/devices"
    pci_address = ""
    net_name = "test-net"

    # Prepare interface xml
    try:
        pf_iface_name = ""
        pci_address = utils_misc.wait_for(find_pf, timeout=60)
        if not pci_address:
            test.cancel("no up pf found in the test machine")
        pci_id = pci_address.split("/")[-1]
        pf_name = os.listdir('%s/net' % pci_address)[0]
        bus_slot = ':'.join(pci_address.split(':')[1:])
        pci_info = process.run("lspci -s %s -vv" % bus_slot).stdout_text
        logging.debug("The pci info of the sriov card is:\n %s", pci_info)
        max_vfs = int(re.findall(r"Total VFs: (.+?),", pci_info)[0]) - 1
        if info_check == 'yes' or max_vfs < 32:
            vf_num = max_vfs
            create_vfs(vf_num)
        else:
            vf_num = int(max_vfs // 2 + 1)
            create_vfs(vf_num)

        vf_list = []
        vf_name_list = []
        vf_mac_list = []

        for i in range(vf_num):
            vf = os.readlink("%s/virtfn%s" % (pci_address, str(i)))
            vf = os.path.split(vf)[1]
            vf_list.append(vf)
            vf_name = os.listdir('%s/%s/net' % (pci_device_dir, vf))[0]
            with open('%s/%s/net/%s/address' % (pci_device_dir, vf, vf_name),
                      'r') as f:
                vf_mac = f.readline().strip()
            vf_name_list.append(vf_name)
            vf_mac_list.append(vf_mac)

        if attach == "yes" and not nfv:
            vf_addr = vf_list[0]
            if dev_type:
                mac_addr = vf_mac_list[0]
                new_iface = utils_test.libvirt.create_hostdev_xml(
                    vf_addr, managed=managed, xmlfile=False)
            else:
                new_iface = create_interface()
                mac_addr = new_iface.mac_address
            if inactive_pool:
                result = virsh.attach_device(vm_name,
                                             new_iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result, expected_error)
            else:
                nodedev_pci_addr = create_nodedev_pci(vf_addr)
                origin_driver = os.readlink(
                    os.path.join(pci_device_dir, vf_addr,
                                 "driver")).split('/')[-1]
                logging.debug(
                    "The driver of vf before attaching to guest is %s\n",
                    origin_driver)
                count = 0
                while count < loop_times:
                    interface = attach_interface()
                    if vf_type in ["vf", "vf_pool"]:
                        vf_addr_attrs = interface.hostdev_address.attrs
                    if vf_type == "hostdev":
                        addr = interface.source.untyped_address
                        vf_addr_attrs = {
                            "domain": addr.domain,
                            "bus": addr.bus,
                            "slot": addr.slot,
                            "function": addr.function
                        }
                    if operation != "":
                        do_operation()
                    detach_interface()
                    count += 1
                if max_vfs_attached:
                    interface_list = []
                    for vf_addr in vf_list:
                        new_iface = create_interface()
                        mac_addr = new_iface.mac_address
                        nodedev_pci_addr = create_nodedev_pci(vf_addr)
                        attach_interface()
                        interface_list.append(new_iface)
                    count = 0
                    for new_iface in interface_list:
                        vf_addr = vf_list[count]
                        vf_addr_attrs = new_iface.hostdev_address.attrs
                        detach_interface()
                        count += 1
        if info_check == "yes":
            check_info()
        if including_pf:
            vf_list = []
            pf_addr = pci_id
            vf_list.append(pf_addr)
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if duplicate_vf:
            vf_list.append(vf_list[0])
            netxml = create_hostdev_network()
            result = virsh.net_define(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
            result = virsh.net_create(netxml.xml,
                                      ignore_status=True,
                                      debug=True)
            utils_test.libvirt.check_exit_status(result, expected_error)
        if nfv:
            vf_driver = os.readlink(
                os.path.join(pci_device_dir, vf_list[0],
                             "driver")).split('/')[-1]
            vmxml.remove_all_device_by_type('controller')
            remove_devices(vmxml, 'address')
            remove_devices(vmxml, 'usb')
            osxml = vmxml.os
            if "i440fx" in vmxml.os.machine:
                osxml.machine = "q35"
                vmxml.os = osxml
            add_numa(vmxml)
            bus_id = setup_controller(nic_num, controller_index, ctl_models)
            vmxml.sync()
            logging.debug(vmxml)
            iface_list = create_iface_list(bus_id, nic_num, vf_list)
            for iface in iface_list:
                process.run("cat %s" % iface.xml, shell=True).stdout_text
                result = virsh.attach_device(vm_name,
                                             iface.xml,
                                             flagstr=option,
                                             ignore_status=True,
                                             debug=True)
                utils_test.libvirt.check_exit_status(result,
                                                     expect_error=False)
            result = virsh.start(vm_name, debug=True)
            utils_test.libvirt.check_exit_status(result, expect_error=False)
            live_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug(live_xml)
            check_guestos(iface_list)
            check_numa(vf_driver)
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if driver == "mlx4_core":
            # Reload mlx4 driver to default setting
            process.run(
                "modprobe -r mlx4_en ; modprobe -r mlx4_ib ; modprobe -r mlx4_core",
                shell=True)
            process.run(
                "modprobe mlx4_core; modprobe mlx4_ib;  modprobe mlx4_en",
                shell=True)
        else:
            process.run("echo 0 > %s/sriov_numvfs" % pci_address, shell=True)
        if vf_type == "vf_pool" or vf_type == "macvtap_network":
            virsh.net_destroy(net_name)
            virsh.net_undefine(net_name, ignore_status=True)
    def setup_controller_xml():
        """
        Prepare controller devices of VM XML according to params.
        """

        if cntlr_type is None:
            type = 'pci'
        else:
            type = cntlr_type
        curcntlr = cntlr_cur
        while curcntlr < cntlr_num:
            ctrl = Controller(type_name=type)
            if cntlr_node:
                ctrl.node = cntlr_node
            if cntlr_model:
                ctrl.model = cntlr_model
                if cntlr_model == 'pci-bridge':
                    ctrl.model_name = {'name': 'pci-bridge'}
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    for i in range(
                            1,
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1):
                        vm_xml.add_device(add_device('pci', str(i),
                                                     'pci-root'))
                    ctrl.index = str(
                        int(match_new_addr(addr_str[curcntlr])['bus'], 16) + 1)
                else:
                    ctrl.index = str(curcntlr)
            if target_index is not None:
                ctrl.target = {'index': target_index}
            elif with_index:
                if cntlr_model == 'pci-bridge':
                    ctrl.target = {
                        'chassisNr':
                        str(
                            int(match_new_addr(addr_str[curcntlr])['bus'], 16)
                            + 1)
                    }
                else:
                    ctrl.target = {'index': str(curcntlr)}
            if addr_str is not None:
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)
            curcntlr += 1
        if special_num:
            spe_num = int(special_num)
            ctrl = Controller(type_name=type)

            if cntlr_model:
                ctrl.model = cntlr_model
            ctrl.index = spe_num
            ctrl.target = {'index': spe_num}
            if addr_str is not None and cntlr_model != 'pci-root':
                for address in addr_str:
                    ctrl.address = ctrl.new_controller_address(
                        attrs=match_new_addr(address))

            logging.debug("Controller XML is:%s", ctrl)
            vm_xml.add_device(ctrl)
Exemple #35
0
def run(test, params, env):
    """
    Verify hotplug feature for char device
    """

    vm_name = params.get("main_vm", "vm1")
    status_error = "yes" == params.get("status_error", "no")
    char_dev = params.get("char_dev", "file")
    hotplug_type = params.get("hotplug_type", "qmp")
    dup_charid = "yes" == params.get("dup_charid", "no")
    dup_devid = "yes" == params.get("dup_devid", "no")
    diff_devid = "yes" == params.get("diff_devid", "no")

    tmp_dir = os.path.join(test.tmpdir, "hotplug_serial")
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    os.chmod(tmp_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # add controller for each char device
    devices = vm_xml.get_devices()
    controllers = vm_xml.get_devices(device_type="controller")
    for dev in controllers:
        if dev.type == "virtio-serial":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "virtio-serial"
    controller.index = 0
    devices.append(controller)
    vm_xml.set_devices(devices)
    vm_xml.sync()

    # start and login vm
    vm = env.get_vm(vm_name)
    vm.start()
    session = vm.wait_for_login()

    def prepare_channel_xml(to_file, char_type, id=0):
        params = {}
        mode = ''
        if char_type == "file":
            channel_type = char_type
            channel_path = os.path.join(tmp_dir, char_type)
        elif char_type == "socket":
            channel_type = 'unix'
            channel_path = os.path.join(tmp_dir, char_type)
            mode = 'bind'
        elif char_type == "pty":
            channel_type = char_type
            channel_path = ("/dev/pts/%s" % id)
        params = {'channel_type_name': channel_type,
                  'source_path': channel_path,
                  'source_mode': mode,
                  'target_type': 'virtio',
                  'target_name': char_type}
        channel_xml = utlv.create_channel_xml(params, alias=True, address=True)
        shutil.copyfile(channel_xml.xml, to_file)

    def hotplug_device(type, char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                char_add_opt += "file,path=%s,id=file" % tmp_file
                dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
            elif char_dev == "socket":
                char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
            elif char_dev == "pty":
                char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
                dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
            result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add chardev %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add device %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            if char_dev in ["file", "socket"]:
                prepare_channel_xml(xml_file, char_dev)
            elif char_dev == "pty":
                prepare_channel_xml(xml_file, char_dev, id)
            result = virsh.attach_device(vm_name, xml_file)
            # serial device was introduced by the following commit,
            # http://libvirt.org/git/?
            # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
            if "unknown device type" in result.stderr:
                raise error.TestNAError('Failed to attach %s to %s. Result:\n %s'
                                        % (char_dev, vm_name, result))
        return result

    def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                if dup_charid:
                    char_add_opt += "file,path=%s,id=file" % tmp_file
                if dup_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
                if diff_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
            elif char_dev == "socket":
                if dup_charid:
                    char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                if dup_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
                if diff_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
            elif char_dev == "pty":
                if dup_charid:
                    char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
                if dup_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
                if diff_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
            if dup_charid:
                result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if dup_devid or diff_devid:
                result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
        elif type == "attach":
            if dup_devid:
                result = hotplug_device(type, char_dev, id)
        return result

    def confirm_hotplug_result(char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        if not h_o.count("name = \"%s\"" % char_dev):
            raise error.TestFail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            f = open(tmp_file, "r")
            r_o = f.read()
            f.close()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            r_o = sock.recv(1024)
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                raise error.TestFail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file],
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            session.cmd("echo test >> /tmp/file &")
            while True:
                r_o = p.stdout.readline()
                if r_o or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not r_o.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
            raise error.TestFail(err_info)

    def unhotplug_serial_device(type, char_dev):
        if type == "qmp":
            del_dev_opt = "device_del %s" % char_dev
            del_char_opt = "chardev-remove %s" % char_dev
            result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to del device %s from %s.Result:\n%s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            result = virsh.detach_device(vm_name, xml_file)

    def confirm_unhotplug_result(char_dev):
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        uh_o = result.stdout.strip()
        if uh_o.count("chardev = \"%s\"" % char_dev):
            raise error.TestFail("Still can get serial device(%s) from: '%s'"
                                 % (char_dev, uh_o))
        if os.path.exists(serial_file):
            raise error.TestFail("File '%s' still exists after unhotplug" % serial_file)

    # run test case
    try:
        if char_dev in ['file', 'socket']:
            # if char_dev is file or socket, it doesn't need pts index
            pts_id = 0
        else:
            pts_id = str(utils_misc.aton(utils_misc.get_dev_pts_max_id()) + 1)
            if os.path.exists("/dev/pts/%s" % pts_id):
                raise error.TestError('invalid pts index(%s) provided.' % pts_id)
        if status_error:
            hotplug_device(hotplug_type, char_dev, pts_id)
            ret = dup_hotplug(hotplug_type, char_dev, pts_id, dup_charid, dup_devid, diff_devid)
            dup_o = ret.stdout.strip()
            if hotplug_type == "qmp":
                # although it has failed, ret.exit_status will be returned 0.
                err_o1 = "Duplicate ID"
                err_o2 = "Parsing chardev args failed"
                err_o3 = "Property 'virtserialport.chardev' can't"
                if (err_o1 not in dup_o) and (err_o2 not in dup_o) and (err_o3 not in dup_o):
                    raise error.TestFail("Expect fail, but run successfully:\n%s" % ret)
            else:
                if "chardev already exists" not in dup_o:
                    logging.info("Expect fail,but run successfully:\n%s" % ret)
        else:
            if char_dev != "all":
                #1.hotplug serial device
                hotplug_device(hotplug_type, char_dev, pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result(char_dev, pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, char_dev)

                #4.confirm unhotplug result
                confirm_unhotplug_result(char_dev)
            else:
                #1.hotplug serial device
                hotplug_device(hotplug_type, "file")
                hotplug_device(hotplug_type, "socket")
                hotplug_device(hotplug_type, "pty", pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result("file")
                confirm_hotplug_result("socket")
                confirm_hotplug_result("pty", pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, "file")
                unhotplug_serial_device(hotplug_type, "socket")
                unhotplug_serial_device(hotplug_type, "pty")

                #4.confirm unhotplug result
                confirm_unhotplug_result("file")
                confirm_unhotplug_result("socket")
                confirm_unhotplug_result("pty")
    finally:
        vm_xml_backup.sync()
        if os.path.exists(tmp_dir):
            shutil.rmtree(tmp_dir)
                iface.model = "virtio"
                del iface.address

            if bootdisk_snapshot != "":
                disk.snapshot = bootdisk_snapshot

            disk.target = {"dev": bootdisk_target, "bus": bootdisk_bus}
            device_source = disk.source.attrs["file"]

            del disk.address
            vmxml.devices = xml_devices
            vmxml.define()

        # Add virtio_scsi controller.
        if virtio_scsi_controller:
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            ctl_model = params.get("virtio_scsi_controller_model")
            if ctl_model:
                scsi_controller.model = ctl_model
            if virtio_scsi_controller_driver != "":
                driver_dict = {}
                for driver_option in virtio_scsi_controller_driver.split(','):
                    if driver_option != "":
                        d = driver_option.split('=')
                        driver_dict.update({d[0].strip(): d[1].strip()})
                scsi_controller.driver = driver_dict
            vmxml.del_controller("scsi")
            vmxml.add_device(scsi_controller)
Exemple #37
0
def run(test, params, env):
    """
    Test for hotplug usb device.
    """
    # get the params from params
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)

    usb_type = params.get("usb_type", "kbd")
    attach_type = params.get("attach_type", "attach_device")
    attach_count = int(params.get("attach_count", "1"))
    if usb_type == "storage":
        model = params.get("model", "nec-xhci")
        index = params.get("index", "1")
    status_error = ("yes" == params.get("status_error", "no"))

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status("permissive")

    if usb_type == "storage":
        controllers = vm_xml.get_devices(device_type="controller")
        devices = vm_xml.get_devices()
        for dev in controllers:
            if dev.type == "usb" and dev.index == "1":
                devices.remove(dev)
        controller = Controller("controller")
        controller.type = "usb"
        controller.index = index
        controller.model = model
        devices.append(controller)
        vm_xml.set_devices(devices)

    try:
        session = vm.wait_for_login()
    except (LoginError, VMError, ShellError) as e:
        test.fail("Test failed: %s" % str(e))

    def is_hotplug_ok():
        try:
            output = session.cmd_output(
                "fdisk -l | grep -c '^Disk /dev/.* 1 M'")
            if int(output.strip()) != attach_count:
                return False
            else:
                return True
        except ShellTimeoutError as detail:
            test.fail("unhotplug failed: %s, " % detail)

    tmp_dir = os.path.join(data_dir.get_tmp_dir(), "usb_hotplug_files")
    if not os.path.isdir(tmp_dir):
        os.mkdir(tmp_dir)

    try:
        result = None
        dev_xml = None
        opt = "--hmp"
        for i in range(attach_count):
            if usb_type == "storage":
                path = os.path.join(tmp_dir, "%s.img" % i)
                libvirt.create_local_disk("file",
                                          path,
                                          size="1M",
                                          disk_format="qcow2")
                os.chmod(path, 0o666)

            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd = "drive_add"
                    attach_cmd += (" 0 id=drive-usb-%s,if=none,file=%s" %
                                   (i, path))

                    result = virsh.qemu_monitor_command(vm_name,
                                                        attach_cmd,
                                                        options=opt)
                    if result.exit_status or (result.stdout.strip().find("OK")
                                              == -1):
                        raise process.CmdError(result.command, result)

                    attach_cmd = "device_add usb-storage,"
                    attach_cmd += (
                        "id=drive-usb-%s,bus=usb1.0,drive=drive-usb-%s" %
                        (i, i))
                else:
                    attach_cmd = "device_add"
                    attach_cmd += " usb-%s,bus=usb1.0,id=%s%s" % (usb_type,
                                                                  usb_type, i)

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                attributes = {'type_name': "usb", 'bus': "1", 'port': "0"}
                if usb_type == "storage":
                    dev_xml = Disk(type_name="file")
                    dev_xml.device = "disk"
                    dev_xml.source = dev_xml.new_disk_source(
                        **{"attrs": {
                            'file': path
                        }})
                    dev_xml.driver = {
                        "name": "qemu",
                        "type": 'qcow2',
                        "cache": "none"
                    }
                    dev_xml.target = {"dev": 'sdb', "bus": "usb"}
                    dev_xml.address = dev_xml.new_disk_address(
                        **{"attrs": attributes})
                else:
                    if usb_type == "mouse":
                        dev_xml = Input("mouse")
                    elif usb_type == "tablet":
                        dev_xml = Input("tablet")
                    else:
                        dev_xml = Input("keyboard")

                    dev_xml.input_bus = "usb"
                    dev_xml.address = dev_xml.new_input_address(
                        **{"attrs": attributes})

                result = virsh.attach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)

        if status_error and usb_type == "storage":
            if utils_misc.wait_for(is_hotplug_ok, timeout=30):
                # Sometimes we meet an error but the ret in $? is 0.
                test.fail("\nAttach device successfully in negative case."
                          "\nExcept it fail when attach count exceed maximum."
                          "\nDetail: %s" % result)

        for i in range(attach_count):
            attach_cmd = "device_del"
            if attach_type == "qemu_monitor":
                if usb_type == "storage":
                    attach_cmd += (" drive-usb-%s" % i)
                else:
                    if usb_type == "mouse":
                        attach_cmd += " mouse"
                    elif usb_type == "tablet":
                        attach_cmd += " tablet"
                    else:
                        attach_cmd += " keyboard"

                result = virsh.qemu_monitor_command(vm_name,
                                                    attach_cmd,
                                                    options=opt)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
            else:
                result = virsh.detach_device(vm_name, dev_xml.xml)
                if result.exit_status:
                    raise process.CmdError(result.command, result)
    except process.CmdError as e:
        if not status_error:
            # live attach of device 'input' is not supported
            ret = result.stderr.find("Operation not supported")
            if usb_type != "storage" and ret > -1:
                pass
            else:
                test.fail("failed to attach device.\nDetail: %s." % result)
    finally:
        session.close()
        if os.path.isdir(tmp_dir):
            shutil.rmtree(tmp_dir)
        utils_selinux.set_status(backup_sestatus)
        vm_xml_backup.sync()
Exemple #38
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --disk-only --diskspec vda,"
            "file=/tmp/testvm-snap1" % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(
            vm_name, "%s --memspec file=%s,snapshot=external"
            " --diskspec vda,file=/tmp/testvm-snap2" % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' " "in command line" % cmd)

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(
                    3, 9, 0):
                test.cancel(
                    "place auth in source is not supported in current libvirt version"
                )
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size=emulated_size,
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = (
                "qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s" %
                (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_uuid": uuid
            }
        elif auth_usage:
            auth_dict = {
                "auth_user": chap_user,
                "secret_type": secret_usage_type,
                "secret_usage": secret_usage_target
            }
        disk_source = disk_xml.new_disk_source(
            **{
                "attrs": {
                    "protocol": "iscsi",
                    "name": "%s/%s" % (iscsi_target, lun_num)
                },
                "hosts": [{
                    "name": iscsi_host,
                    "port": iscsi_port
                }]
            })
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update(
                        {addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start." "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(),
                                         "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
Exemple #39
0
def run(test, params, env):
    """
    Test watchdog device:

    1.Add watchdog device to the guest xml.
    2.Start the guest.
    3.Trigger the watchdog in the guest.
    4.Confirm the guest status.
    """

    def trigger_watchdog(model):
        """
        Trigger watchdog

        :param model: action when watchdog triggered
        """
        watchdog_device = "device %s" % model
        if action == "dump":
            watchdog_action = "watchdog-action pause"
        else:
            watchdog_action = "watchdog-action %s" % action
        if not hotplug_test:
            vm_pid = vm.get_pid()
            with open("/proc/%s/cmdline" % vm_pid) as vm_cmdline_file:
                vm_cmdline = vm_cmdline_file.read()
                vm_cmdline = vm_cmdline.replace('\x00', ' ')
                if not all(option in vm_cmdline for option in (watchdog_device, watchdog_action)):
                    test.fail("Can not find %s or %s in qemu cmd line"
                              % (watchdog_device, watchdog_action))
        cmd = "gsettings set org.gnome.settings-daemon.plugins.power button-power shutdown"
        session.cmd(cmd, ignore_all_errors=True)
        try:
            if model == "ib700":
                try:
                    session.cmd("modprobe ib700wdt")
                except aexpect.ShellCmdError:
                    session.close()
                    test.fail("Failed to load module ib700wdt")
            session.cmd("dmesg | grep -i %s && lsmod | grep %s" % (model, model))
            session.cmd("echo 1 > /dev/watchdog")
        except aexpect.ShellCmdError as e:
            session.close()
            test.fail("Failed to trigger watchdog: %s" % e)

    def watchdog_attached(vm_name):
        """
        Confirm whether watchdog device is attached to vm by checking domain dumpxml

        :param vm_name: vm name
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if vmxml.xmltreefile.find('devices/watchdog'):
            return True
        else:
            return False

    def confirm_guest_status():
        """
        Confirm the guest status after watchdog triggered
        """
        def _booting_completed():
            session = vm.wait_for_login()
            status = None
            second_boot_time = None
            try:
                status, second_boot_time = session.cmd_status_output("uptime --since")
                logging.debug("The second boot time is %s", second_boot_time)
            except (aexpect.ShellStatusError, aexpect.ShellProcessTerminatedError) as e:
                logging.error("Exception caught:%s", e)

            session.close()
            return second_boot_time > first_boot_time

        def _inject_nmi():
            session = vm.wait_for_login()
            status, output = session.cmd_status_output("dmesg | grep -i nmi")
            session.close()
            if status == 0:
                logging.debug(output)
                return True
            return False

        def _inject_nmi_event():
            virsh_session.send_ctrl("^C")
            output = virsh_session.get_stripped_output()
            if "inject-nmi" not in output:
                return False
            return True

        def _check_dump_file(dump_path, domain_id):
            dump_file = glob.glob('%s%s-*' % (dump_path, domain_id))
            if len(dump_file):
                logging.debug("Find the auto core dump file:\n%s", dump_file[0])
                os.remove(dump_file[0])
                return True
            return False

        if action in ["poweroff", "shutdown"]:
            if not utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10):
                test.fail("Guest not shutdown after watchdog triggered")
            else:
                logging.debug("Guest is in shutdown state after watchdog triggered")
        elif action == "reset":
            if not utils_misc.wait_for(_booting_completed, 600, 10):
                test.fail("Guest not reboot after watchdog triggered")
            else:
                logging.debug("Guest is rebooted after watchdog triggered")
        elif action == "pause":
            if utils_misc.wait_for(lambda: vm.state() == "paused", 180, 10):
                logging.debug("Guest is in paused status after watchdog triggered.")
                cmd_output = virsh.domstate(vm_name, '--reason').stdout.strip()
                logging.debug("Check guest status: %s\n", cmd_output)
                if cmd_output != "paused (watchdog)":
                    test.fail("The domstate is not correct after dump by watchdog")
            else:
                test.fail("Guest not pause after watchdog triggered")
        elif action == "none":
            if utils_misc.wait_for(lambda: vm.state() == "shut off", 180, 10):
                test.fail("Guest shutdown unexpectedly")
            else:
                logging.debug("Guest is not in shutoff state since watchdog action is none.")
        elif action == "inject-nmi":
            if not utils_misc.wait_for(_inject_nmi, 180, 10):
                test.fail("Guest not receive inject-nmi after watchdog triggered\n")
            elif not utils_misc.wait_for(_inject_nmi_event, 180, 10):
                test.fail("No inject-nmi watchdog event caught")
            else:
                logging.debug("Guest received inject-nmi and inject-nmi watchdog event "
                              " has been caught.")
            virsh_session.close()
        elif action == "dump":
            domain_id = vm.get_id()
            dump_path = "/var/lib/libvirt/qemu/dump/"
            if not utils_misc.wait_for(lambda: _check_dump_file(dump_path, domain_id), 180, 10):
                test.fail("No auto core dump file found after watchdog triggered")
            else:
                logging.debug("VM core has been dumped after watchdog triggered.")

    name_length = params.get("name_length", "default")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(params["main_vm"])
    model = params.get("model")
    action = params.get("action")
    model_test = params.get("model_test") == "yes"
    hotplug_test = params.get("hotplug_test") == "yes"
    hotunplug_test = params.get("hotunplug_test") == "yes"
    machine_type = params.get("machine_type")

    if machine_type == "q35" and model == "ib700":
        test.cancel("ib700wdt watchdog device is not supported "
                    "on guest with q35 machine type")

    # Backup xml file
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Rename the guest name to the length defined in the config file
    if name_length != "default":
        origin_name = vm_name
        name_length = int(params.get("name_length", "1"))
        vm_name = ''.join([random.choice(string.ascii_letters+string.digits)
                           for _ in range(name_length)])
        vm_xml.VMXML.vm_rename(vm, vm_name)
        # Generate the renamed xml file
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Coldplug pcie-to-pci-bridge to vm xml for q35 guest as i6300esb watchdog
    # device can only be plugged to conventional PCI slot
    if (machine_type == 'q35'
            and not vmxml.get_controllers(controller_type='pci', model='pcie-to-pci-bridge')):
        logging.debug("Add pcie-root-port and pcie-to-pci-bridge controller to vm")
        pcie_root_port = Controller("pci")
        pcie_pci_bridge = Controller("pci")
        pcie_root_port.model = "pcie-root-port"
        pcie_pci_bridge.model = "pcie-to-pci-bridge"
        pcie_root_port.model_name = {'name': 'pcie-root-port'}
        pcie_pci_bridge.model_name = {'name': 'pcie-pci-bridge'}
        vmxml.add_device(pcie_root_port)
        vmxml.add_device(pcie_pci_bridge)
        vmxml.sync()

    if hotplug_test:
        vm.start()
        session = vm.wait_for_login()

    # Add watchdog device to domain
    vmxml.remove_all_device_by_type('watchdog')
    watchdog_dev = Watchdog()
    watchdog_dev.model_type = model
    watchdog_dev.action = action
    chars = string.ascii_letters + string.digits + '-_'
    alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64)))
    watchdog_dev.alias = {'name': alias_name}

    try:
        if model_test or hotunplug_test:
            vmxml.add_device(watchdog_dev)
            vmxml.sync()
            try:
                vm.start()
            except Exception:
                test.fail("VM startup after adding watchdog device failed!")

        elif hotplug_test:
            watchdog_xml = watchdog_dev.xml
            attach_result = virsh.attach_device(vm_name, watchdog_xml,
                                                ignore_status=False, debug=True)
            if not utils_misc.wait_for(lambda: watchdog_attached(vm.name), 60):
                test.fail("Failed to hotplug watchdog device.")
        session = vm.wait_for_login()

        # No need to trigger watchdog after hotunplug
        if hotunplug_test:
            cur_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            cur_watchdog = cur_xml.xmltreefile.find('devices/watchdog')
            cur_watchdog_xml = Watchdog.new_from_element(cur_watchdog).xml
            detach_result = virsh.detach_device(vm_name, cur_watchdog_xml,
                                                ignore_status=True, debug=True)
            if detach_result.exit_status:
                test.fail("i6300esb watchdog device can NOT be detached successfully, "
                          "result:\n%s" % detach_result)
            elif not utils_misc.wait_for(lambda: not watchdog_attached(vm.name), 60):
                test.fail("Failed to hotunplug watchdog device.")
            return

        if action == "reset":
            status, first_boot_time = session.cmd_status_output("uptime --since")
            logging.info("The first boot time is %s\n", first_boot_time)
        if action == "inject-nmi":
            virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True)
            event_cmd = "event --event watchdog --all --loop"
            virsh_session.sendline(event_cmd)
        trigger_watchdog(model)
        confirm_guest_status()
    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if name_length != "default":
            vm_xml.VMXML.vm_rename(vm, origin_name)
        backup_xml.sync()
Exemple #40
0
def run(test, params, env):
    """
    Verify hotplug feature for char device
    """

    vm_name = params.get("main_vm", "vm1")
    status_error = "yes" == params.get("status_error", "no")
    char_dev = params.get("char_dev", "file")
    hotplug_type = params.get("hotplug_type", "qmp")
    dup_charid = "yes" == params.get("dup_charid", "no")
    dup_devid = "yes" == params.get("dup_devid", "no")
    diff_devid = "yes" == params.get("diff_devid", "no")
    xml_file = params.get("xml_file", "/tmp/xml_file")

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # add controller for each char device
    devices = vm_xml.get_devices()
    controllers = vm_xml.get_devices(device_type="controller")
    for dev in controllers:
        if dev.type == "virtio-serial":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "virtio-serial"
    controller.index = 0
    devices.append(controller)
    vm_xml.set_devices(devices)
    vm_xml.sync()

    # start and login vm
    vm = env.get_vm(vm_name)
    vm.start()
    session = vm.wait_for_login()

    def create_channel_xml(vm_name, char_type, id=0):
        """
        Create a XML contains channel information.
        """
        channel_type = char_type
        if char_type == "file":
            channel_path = ("/tmp/%s" % char_type)
            channel_source = {'path': channel_path}
            channel_target = {'type': 'virtio', 'name': 'file'}
        if char_type == "socket":
            channel_type = 'unix'
            channel_path = ("/tmp/%s" % char_type)
            channel_source = {'mode': 'bind', 'path': channel_path}
            channel_target = {'type': 'virtio', 'name': 'socket'}
        if char_type == "pty":
            channel_path = ("/dev/pts/%s" % id)
            channel_source = {'path': channel_path}
            channel_target = {'type': 'virtio', 'name': 'pty'}

        channel_alias = {'name': char_type}
        channel_address = {'type': 'virtio-serial', 'controller': '0', 'bus': '0'}
        channel_params = {'type_name': channel_type, 'source': channel_source,
                          'target': channel_target, 'alias': channel_alias,
                          'address': channel_address}
        channelxml = channel.Channel.new_from_dict(channel_params)
        logging.debug("Channel XML:\n%s", channelxml)

        xmlf = open(channelxml.xml)
        try:
            xml_lines = xmlf.read()
        finally:
            xmlf.close()
        return xml_lines

    def hotplug_device(type, char_dev, id=0):
        tmp_file = "/tmp/%s" % char_dev
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                char_add_opt += "file,path=/tmp/file,id=file"
                dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
            elif char_dev == "socket":
                char_add_opt += "socket,path=/tmp/socket,server,nowait,id=socket"
                dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
            elif char_dev == "pty":
                char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
                dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
            result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add chardev %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to add device %s to %s. Result:\n %s'
                                      % (char_dev, vm_name, result))
        elif type == "attach":
            if char_dev in ["file", "socket"]:
                xml_info = create_channel_xml(vm_name, char_dev)
            elif char_dev == "pty":
                xml_info = create_channel_xml(vm_name, char_dev, id)
            f = open(xml_file, "w")
            f.write(xml_info)
            f.close()
            if os.path.exists(tmp_file):
                os.chmod(tmp_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
            result = virsh.attach_device(vm_name, xml_file)
        return result

    def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                if dup_charid:
                    char_add_opt += "file,path=/tmp/file,id=file"
                if dup_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
                if diff_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
            elif char_dev == "socket":
                if dup_charid:
                    char_add_opt += "socket,path=/tmp/socket,server,nowait,id=socket"
                if dup_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
                if diff_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
            elif char_dev == "pty":
                if dup_charid:
                    char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
                if dup_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
                if diff_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
            if dup_charid:
                result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if dup_devid or diff_devid:
                result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
        elif type == "attach":
            if dup_devid:
                result = hotplug_device(type, char_dev, id)
        return result

    def confirm_hotplug_result(char_dev, id=0):
        tmp_file = "/tmp/%s" % char_dev
        serial_file = "/dev/virtio-ports/%s" % char_dev

        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        if not h_o.count("name = \"%s\"" % char_dev):
            raise error.TestFail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            f = open(tmp_file, "r")
            r_o = f.read()
            f.close()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            r_o = sock.recv(1024)
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                raise error.TestFail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file],
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
            session.cmd("echo test >> /tmp/file &")
            while True:
                r_o = p.stdout.readline()
                if r_o or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not r_o.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
            raise error.TestFail(err_info)

    def unhotplug_serial_device(type, char_dev):
        if type == "qmp":
            del_dev_opt = "device_del %s" % char_dev
            del_char_opt = "chardev-remove %s" % char_dev
            result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
            if result.exit_status:
                raise error.TestError('Failed to del device %s from %s.Result:\n%s'
                                      % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
        elif type == "attach":
            result = virsh.detach_device(vm_name, xml_file)

    def confirm_unhotplug_result(char_dev):
        serial_file = "/dev/virtio-ports/%s" % char_dev
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        uh_o = result.stdout.strip()
        if uh_o.count("chardev = \"%s\"" % char_dev):
            raise error.TestFail("Still can get serial device(%s) from: '%s'"
                                 % (char_dev, uh_o))
        if os.path.exists(serial_file):
            raise error.TestFail("File '%s' still exists after unhotplug" % serial_file)

    # run test case
    try:
        if char_dev in ['file', 'socket']:
            # if char_dev is file or socket, it doesn't need pts index
            pts_id = 0
        else:
            pts_id = str(utils_misc.aton(utils_misc.get_dev_pts_max_id()) + 1)
            if os.path.exists("/dev/pts/%s" % pts_id):
                raise error.TestError('invalid pts index(%s) provided.' % pts_id)
        if status_error:
            hotplug_device(hotplug_type, char_dev, pts_id)
            ret = dup_hotplug(hotplug_type, char_dev, pts_id, dup_charid, dup_devid, diff_devid)
            dup_o = ret.stdout.strip()
            if hotplug_type == "qmp":
                # although it has failed, ret.exit_status will be returned 0.
                err_o1 = "Duplicate ID"
                err_o2 = "Parsing chardev args failed"
                err_o3 = "Property 'virtserialport.chardev' can't"
                if (err_o1 not in dup_o) and (err_o2 not in dup_o) and (err_o3 not in dup_o):
                    raise error.TestFail("Expect fail, but run successfully:\n%s" % ret)
            else:
                if "chardev already exists" not in dup_o:
                    logging.info("Expect fail,but run successfully:\n%s" % ret)
        else:
            if char_dev != "all":
                #1.hotplug serial device
                hotplug_device(hotplug_type, char_dev, pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result(char_dev, pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, char_dev)

                #4.confirm unhotplug result
                confirm_unhotplug_result(char_dev)
            else:
                #1.hotplug serial device
                hotplug_device(hotplug_type, "file")
                hotplug_device(hotplug_type, "socket")
                hotplug_device(hotplug_type, "pty", pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result("file")
                confirm_hotplug_result("socket")
                confirm_hotplug_result("pty", pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, "file")
                unhotplug_serial_device(hotplug_type, "socket")
                unhotplug_serial_device(hotplug_type, "pty")

                #4.confirm unhotplug result
                confirm_unhotplug_result("file")
                confirm_unhotplug_result("socket")
                confirm_unhotplug_result("pty")
    finally:
        vm_xml_backup.sync()
        if os.path.exists(xml_file):
            os.remove(xml_file)
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment, destroy or suspend a VM.
    2.Prepare tgtd and secret config.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def check_save_restore(save_file):
        """
        Test domain save and restore.
        """
        # Save the domain.
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Restore the domain.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def check_snapshot():
        """
        Test domain snapshot operation.
        """
        snapshot1 = "s1"
        snapshot2 = "s2"

        ret = virsh.snapshot_create_as(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --disk-only --diskspec vda,"
                                       "file=/tmp/testvm-snap1"
                                       % snapshot2)
        libvirt.check_exit_status(ret, True)

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       " --diskspec vda,file=/tmp/testvm-snap2"
                                       % (snapshot2, snapshot2))
        libvirt.check_exit_status(ret, True)

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]
            elif target.startswith("sd"):
                added_part = added_parts[0]
            if not added_part:
                logging.error("Cann't see added partition in VM")
                return False

            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def check_qemu_cmd():
        """
        Check qemu-kvm command line options
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        if driver_iothread:
            cmd += " | grep iothread=iothread%s" % driver_iothread

        if process.system(cmd, ignore_status=True, shell=True):
            test.fail("Can't see disk option '%s' "
                      "in command line" % cmd)

    def check_auth_plaintext(vm_name, password):
        """
        Check if libvirt passed the plaintext of the chap authentication
        password to qemu.
        :param vm_name: The name of vm to be checked.
        :param password: The plaintext of password used for chap authentication.
        :return: True if using plaintext, False if not.
        """
        cmd = ("ps -ef | grep -v grep | grep qemu-kvm | grep %s | grep %s"
               % (vm_name, password))
        return process.system(cmd, ignore_status=True, shell=True) == 0

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")

    # Controller specific attributes.
    cntlr_type = params.get('controller_type', None)
    cntlr_model = params.get('controller_model', None)
    cntlr_index = params.get('controller_index', None)
    controller_addr_options = params.get('controller_addr_options', None)

    driver_iothread = params.get("driver_iothread")

    # iscsi options.
    iscsi_target = params.get("iscsi_target")
    iscsi_host = params.get("iscsi_host")
    iscsi_port = params.get("iscsi_port")
    emulated_size = params.get("iscsi_image_size", "1")
    uuid = params.get("uuid", "")
    auth_uuid = "yes" == params.get("auth_uuid", "")
    auth_usage = "yes" == params.get("auth_usage", "")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error", "no")
    test_save_snapshot = "yes" == params.get("test_save_snapshot", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")

    secret_uuid = ""

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        chap_user = ""
        chap_passwd = ""
        if auth_uuid or auth_usage:
            auth_place_in_location = params.get("auth_place_in_location")
            if 'source' in auth_place_in_location and not libvirt_version.version_compare(3, 9, 0):
                test.cancel("place auth in source is not supported in current libvirt version")
            auth_type = params.get("auth_type")
            secret_usage_target = params.get("secret_usage_target")
            secret_usage_type = params.get("secret_usage_type")
            chap_user = params.get("iscsi_user")
            chap_passwd = params.get("iscsi_password")

            sec_xml = secret_xml.SecretXML("no", "yes")
            sec_xml.description = "iSCSI secret"
            sec_xml.auth_type = auth_type
            sec_xml.auth_username = chap_user
            sec_xml.usage = secret_usage_type
            sec_xml.target = secret_usage_target
            sec_xml.xmltreefile.write()

            ret = virsh.secret_define(sec_xml.xml)
            libvirt.check_exit_status(ret)

            secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                     ret.stdout.strip())[0].lstrip()
            logging.debug("Secret uuid %s", secret_uuid)
            if secret_uuid == "":
                test.error("Failed to get secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            ret = virsh.secret_set_value(secret_uuid, secret_string,
                                         **virsh_dargs)
            libvirt.check_exit_status(ret)

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size=emulated_size,
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=iscsi_host)

        # If we use qcow2 disk format, should format iscsi disk first.
        if device_format == "qcow2":
            cmd = ("qemu-img create -f qcow2 iscsi://%s:%s/%s/%s %s"
                   % (iscsi_host, iscsi_port, iscsi_target, lun_num, emulated_size))
            process.run(cmd, shell=True)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device

        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}

        # For lun type device, iothread attribute need to be set in controller.
        if driver_iothread and device != "lun":
            driver_dict.update({"iothread": driver_iothread})
            vmxml.iothreads = int(driver_iothread)
        elif driver_iothread:
            vmxml.iothreads = int(driver_iothread)

        disk_xml.driver = driver_dict
        # Check if we want to use a faked uuid.
        if not uuid:
            uuid = secret_uuid
        auth_dict = {}
        if auth_uuid:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {"protocol": "iscsi",
                         "name": "%s/%s" % (iscsi_target, lun_num)},
               "hosts": [{"name": iscsi_host, "port": iscsi_port}]})
        if auth_dict:
            disk_auth = disk_xml.new_auth(**auth_dict)
            if 'source' in auth_place_in_location:
                disk_source.auth = disk_auth
            if 'disk' in auth_place_in_location:
                disk_xml.auth = disk_auth

        disk_xml.source = disk_source
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so additional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
                ctrl.driver = ctrl_driver_dict
            logging.debug("Controller XML is:%s", ctrl)
            if cntlr_type:
                vmxml.del_controller(cntlr_type)
            else:
                vmxml.del_controller("scsi")
            vmxml.add_device(ctrl)

        try:
            # Start the VM and check status.
            vmxml.sync()
            vm.start()
            if status_error:
                test.fail("VM started unexpectedly.")

            # Check Qemu command line
            if test_qemu_cmd:
                check_qemu_cmd()

        except virt_vm.VMStartError as e:
            if status_error:
                if re.search(uuid, str(e)):
                    pass
            else:
                test.fail("VM failed to start."
                          "Error: %s" % str(e))
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % xml_error)
        else:
            # Check partitions in VM.
            if check_partitions:
                if not check_in_vm(device_target, old_parts):
                    test.fail("Check disk partitions in VM failed")
            # Test domain save/restore/snapshot.
            if test_save_snapshot:
                save_file = os.path.join(data_dir.get_tmp_dir(), "%.save" % vm_name)
                check_save_restore(save_file)
                check_snapshot()
                if os.path.exists(save_file):
                    os.remove(save_file)
            # Test libvirt doesn't pass the plaintext of chap password to qemu,
            # this function is implemented in libvirt 4.3.0-1.
            if (libvirt_version.version_compare(4, 3, 0) and
                    (auth_uuid or auth_usage) and
                    chap_passwd):
                if(check_auth_plaintext(vm_name, chap_passwd)):
                    test.fail("Libvirt should not pass plaintext of chap "
                              "password to qemu-kvm.")

    finally:
        # Delete snapshots.
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Delete the tmp files.
        libvirt.setup_or_cleanup_iscsi(is_setup=False)

        # Clean up secret
        if secret_uuid:
            virsh.secret_undefine(secret_uuid)
                         "secret_type": secret_usage_type,
                         "secret_uuid": uuid}
        elif auth_usage:
            auth_dict = {"auth_user": chap_user,
                         "secret_type": secret_usage_type,
                         "secret_usage": secret_usage_target}
        if auth_dict:
            disk_xml.auth = disk_xml.new_auth(**auth_dict)
        # Sync VM xml.
        vmxml.add_device(disk_xml)

        # After virtio 1.0 is enabled, lun type device need use virtio-scsi
        # instead of virtio, so addtional controller is needed.
        # Add controller.
        if device == "lun":
            ctrl = Controller(type_name=cntlr_type)
            if cntlr_model is not None:
                ctrl.model = cntlr_model
            if cntlr_index is not None:
                ctrl.index = cntlr_index
            ctrl_addr_dict = {}
            for addr_option in controller_addr_options.split(','):
                if addr_option != "":
                    addr_part = addr_option.split('=')
                    ctrl_addr_dict.update({addr_part[0].strip(): addr_part[1].strip()})
            ctrl.address = ctrl.new_controller_address(attrs=ctrl_addr_dict)

            # If driver_iothread is true, need add iothread attribute in controller.
            if driver_iothread:
                ctrl_driver_dict = {}
                ctrl_driver_dict.update({"iothread": driver_iothread})
Exemple #43
0
def run(test, params, env):
    """
    Verify hotplug feature for char device
    """

    vm_name = params.get("main_vm", "vm1")
    status_error = "yes" == params.get("status_error", "no")
    char_dev = params.get("char_dev", "file")
    hotplug_type = params.get("hotplug_type", "qmp")
    dup_charid = "yes" == params.get("dup_charid", "no")
    dup_devid = "yes" == params.get("dup_devid", "no")
    diff_devid = "yes" == params.get("diff_devid", "no")

    tmp_dir = os.path.join(test.tmpdir, "hotplug_serial")
    if not os.path.exists(tmp_dir):
        os.mkdir(tmp_dir)
    os.chmod(tmp_dir, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()

    # add controller for each char device
    devices = vm_xml.get_devices()
    controllers = vm_xml.get_devices(device_type="controller")
    for dev in controllers:
        if dev.type == "virtio-serial":
            devices.remove(dev)
    controller = Controller("controller")
    controller.type = "virtio-serial"
    controller.index = 0
    devices.append(controller)
    vm_xml.set_devices(devices)
    vm_xml.sync()

    # start and login vm
    vm = env.get_vm(vm_name)
    vm.start()
    session = vm.wait_for_login()

    def prepare_channel_xml(to_file, char_type, id=0):
        params = {}
        mode = ''
        if char_type == "file":
            channel_type = char_type
            channel_path = os.path.join(tmp_dir, char_type)
        elif char_type == "socket":
            channel_type = 'unix'
            channel_path = os.path.join(tmp_dir, char_type)
            mode = 'bind'
        elif char_type == "pty":
            channel_type = char_type
            channel_path = ("/dev/pts/%s" % id)
        params = {'channel_type_name': channel_type,
                  'source_path': channel_path,
                  'source_mode': mode,
                  'target_type': 'virtio',
                  'target_name': char_type}
        channel_xml = utlv.create_channel_xml(params, alias=True, address=True)
        shutil.copyfile(channel_xml.xml, to_file)

    def hotplug_device(type, char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                char_add_opt += "file,path=%s,id=file" % tmp_file
                dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
            elif char_dev == "socket":
                char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
            elif char_dev == "pty":
                char_add_opt += ("pty,path=/dev/pts/%s,id=pty" % id)
                dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
            result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to add chardev %s to %s. Result:\n %s'
                           % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to add device %s to %s. Result:\n %s'
                           % (char_dev, vm_name, result))
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            if char_dev in ["file", "socket"]:
                prepare_channel_xml(xml_file, char_dev)
            elif char_dev == "pty":
                prepare_channel_xml(xml_file, char_dev, id)
            result = virsh.attach_device(vm_name, xml_file)
            # serial device was introduced by the following commit,
            # http://libvirt.org/git/?
            # p=libvirt.git;a=commit;h=b63ea467617e3cbee4282ab2e5e780b4119cef3d
            if "unknown device type" in result.stderr:
                test.cancel('Failed to attach %s to %s. Result:\n %s'
                            % (char_dev, vm_name, result))
        return result

    def dup_hotplug(type, char_dev, id, dup_charid=False, dup_devid=False, diff_devid=False):
        tmp_file = os.path.join(tmp_dir, char_dev)
        if type == "qmp":
            char_add_opt = "chardev-add "
            dev_add_opt = "device_add virtserialport,chardev="
            if char_dev == "file":
                if dup_charid:
                    char_add_opt += "file,path=%s,id=file" % tmp_file
                if dup_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file"
                if diff_devid:
                    dev_add_opt += "file,name=file,bus=virtio-serial0.0,id=file1"
            elif char_dev == "socket":
                if dup_charid:
                    char_add_opt += "socket,path=%s,server,nowait,id=socket" % tmp_file
                if dup_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket"
                if diff_devid:
                    dev_add_opt += "socket,name=socket,bus=virtio-serial0.0,id=socket1"
            elif char_dev == "pty":
                if dup_charid:
                    char_add_opt += "pty,path=/dev/pts/%s,id=pty" % id
                if dup_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty"
                if diff_devid:
                    dev_add_opt += "pty,name=pty,bus=virtio-serial0.0,id=pty1"
            if dup_charid:
                result = virsh.qemu_monitor_command(vm_name, char_add_opt, "--hmp")
            if dup_devid or diff_devid:
                result = virsh.qemu_monitor_command(vm_name, dev_add_opt, "--hmp")
        elif type == "attach":
            if dup_devid:
                result = hotplug_device(type, char_dev, id)
        return result

    def confirm_hotplug_result(char_dev, id=0):
        tmp_file = os.path.join(tmp_dir, char_dev)
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        h_o = result.stdout.strip()
        if not h_o.count("name = \"%s\"" % char_dev):
            test.fail("Cann't find device(%s) from:\n%s" % (char_dev, h_o))
        if char_dev == "file":
            session.cmd("echo test > %s" % serial_file)
            with open(tmp_file, "r") as f:
                r_o = f.read()
        elif char_dev == "socket":
            session.cmd("echo test > /tmp/file")
            sock = socket.socket(socket.AF_UNIX)
            sock.connect(tmp_file)
            session.cmd("dd if=/tmp/file of=%s" % serial_file)
            r_o = sock.recv(1024)
        elif char_dev == "pty":
            session.cmd("echo test > /tmp/file")
            session.cmd("dd if=/tmp/file of=%s &" % serial_file)
            dev_file = "/dev/pts/%s" % id
            if not os.path.exists(dev_file):
                test.fail("%s doesn't exist." % dev_file)
            p = subprocess.Popen(["/usr/bin/cat", dev_file], universal_newlines=True,
                                 stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
            session.cmd("echo test >> /tmp/file &")
            while True:
                r_o = p.stdout.readline()
                if r_o or p.poll():
                    break
                time.sleep(0.2)
            p.kill()
        if not r_o.count("test"):
            err_info = "%s device file doesn't match 'test':%s" % (char_dev, r_o)
            test.fail(err_info)

    def unhotplug_serial_device(type, char_dev):
        if type == "qmp":
            del_dev_opt = "device_del %s" % char_dev
            del_char_opt = "chardev-remove %s" % char_dev
            result = virsh.qemu_monitor_command(vm_name, del_dev_opt, "--hmp")
            if result.exit_status:
                test.error('Failed to del device %s from %s.Result:\n%s'
                           % (char_dev, vm_name, result))
            result = virsh.qemu_monitor_command(vm_name, del_char_opt, "--hmp")
        elif type == "attach":
            xml_file = os.path.join(tmp_dir, "xml_%s" % char_dev)
            result = virsh.detach_device(vm_name, xml_file)

    def confirm_unhotplug_result(char_dev):
        serial_file = os.path.join("/dev/virtio-ports", char_dev)
        result = virsh.qemu_monitor_command(vm_name, "info qtree", "--hmp")
        uh_o = result.stdout.strip()
        if uh_o.count("chardev = \"%s\"" % char_dev):
            test.fail("Still can get serial device(%s) from: '%s'"
                      % (char_dev, uh_o))
        if os.path.exists(serial_file):
            test.fail("File '%s' still exists after unhotplug" % serial_file)

    # run test case
    try:
        if char_dev in ['file', 'socket']:
            # if char_dev is file or socket, it doesn't need pts index
            pts_id = 0
        else:
            pts_id = str(utils_misc.aton(utils_misc.get_dev_pts_max_id()) + 1)
            if os.path.exists("/dev/pts/%s" % pts_id):
                test.error('invalid pts index(%s) provided.' % pts_id)
        if status_error:
            hotplug_device(hotplug_type, char_dev, pts_id)
            ret = dup_hotplug(hotplug_type, char_dev, pts_id, dup_charid, dup_devid, diff_devid)
            dup_o = ret.stdout.strip()
            if hotplug_type == "qmp":
                # although it has failed, ret.exit_status will be returned 0.
                err_o1 = "Duplicate ID"
                err_o2 = "Parsing chardev args failed"
                err_o3 = "Property 'virtserialport.chardev' can't"
                if (err_o1 not in dup_o) and (err_o2 not in dup_o) and (err_o3 not in dup_o):
                    test.fail("Expect fail, but run successfully:\n%s" % ret)
            else:
                if "chardev already exists" not in dup_o:
                    logging.info("Expect fail,but run successfully:\n%s" % ret)
        else:
            if char_dev != "all":
                #1.hotplug serial device
                hotplug_device(hotplug_type, char_dev, pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result(char_dev, pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, char_dev)

                #4.confirm unhotplug result
                confirm_unhotplug_result(char_dev)
            else:
                #1.hotplug serial device
                hotplug_device(hotplug_type, "file")
                hotplug_device(hotplug_type, "socket")
                hotplug_device(hotplug_type, "pty", pts_id)

                #2.confirm hotplug result
                confirm_hotplug_result("file")
                confirm_hotplug_result("socket")
                confirm_hotplug_result("pty", pts_id)

                #3.unhotplug serial device
                unhotplug_serial_device(hotplug_type, "file")
                unhotplug_serial_device(hotplug_type, "socket")
                unhotplug_serial_device(hotplug_type, "pty")

                #4.confirm unhotplug result
                confirm_unhotplug_result("file")
                confirm_unhotplug_result("socket")
                confirm_unhotplug_result("pty")
    finally:
        vm_xml_backup.sync()
        if os.path.exists(tmp_dir):
            shutil.rmtree(tmp_dir)
Exemple #44
0
def run(test, params, env):
    """
    Test pci/pcie-to-pci bridge

    Hotplug interface to pci/pcie-to-pci bridge, then check xml and
    inside vm.
    Hotunplug interface, then check xml and inside vm
    """
    vm_name = params.get('main_vm')
    pci_model = params.get('pci_model', 'pci')
    hotplug = 'yes' == params.get('hotplug', 'no')

    pci_model_name = params.get('pci_model_name')
    pci_br_has_device = 'yes' == params.get('pci_br_has_device', 'no')
    sound_dev_model_type = params.get('sound_dev_model_type', '')
    sound_dev_address = params.get('sound_dev_address', '')
    iface_model = params.get('iface_model', '')
    iface_source = params.get('iface_source', '')

    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    bkxml = vmxml.copy()
    vm = env.get_vm(vm_name)

    try:

        # Check if there is a pci/pcie-to-pci bridge, if so,
        # just use the existing pci/pcie-to-pci-bridge to test
        ori_pci_br = [
            dev for dev in vmxml.get_devices('controller')
            if dev.type == 'pci' and dev.model == pci_model
        ]

        # If there is not a pci/pcie-to-pci bridge to test,
        # create one and add to vm
        if not ori_pci_br:
            logging.info('No %s on vm, create one', pci_model)
            pci_bridge = Controller('pci')
            pci_bridge.model = pci_model
            pci_bridge.model_name = {'name': pci_model_name}

            vmxml.add_device(pci_bridge)
            vmxml.sync()
            logging.debug(virsh.dumpxml(vm_name))

        # Check if pci/pcie-to-pci bridge is successfully added
        vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
        cur_pci_br = [
            dev for dev in vmxml.get_devices('controller')
            if dev.type == 'pci' and dev.model == pci_model
        ]
        if not cur_pci_br:
            test.error('Failed to add %s controller to vm xml' % pci_model)

        pci_br = cur_pci_br[0]
        logging.debug(pci_br)
        pci_br_index = pci_br.index

        # If test scenario requires another pci device on pci/pcie-to-pci
        # bridge before hotplug, add a sound device and make sure
        # the 'bus' is same with pci bridge index
        if pci_br_has_device:
            sound_dev = Sound()
            sound_dev.model_type = sound_dev_model_type
            sound_dev.address = eval(sound_dev_address % pci_br_index)
            logging.debug(sound_dev.address)
            vmxml.add_device(sound_dev)
            vmxml.sync()

        # Test hotplug scenario
        if hotplug:
            vm.start()
            vm.wait_for_login().close()

            # Create interface to be hotplugged
            logging.info('Create interface to be hotplugged')
            iface = Interface('network')
            iface.model = iface_model
            iface.source = eval(iface_source)
            mac = utils_net.generate_mac_address_simple()
            iface.mac_address = mac
            logging.debug(iface)

            result = virsh.attach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            xml_after_attach = VMXML.new_from_dumpxml(vm_name)
            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address is successfully attached
            iface_list = [
                iface for iface in xml_after_attach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after attach: %s', iface_list)
            if not iface_list:
                test.error('Failed to attach interface %s' % iface)

            # Check inside vm
            def check_inside_vm(session, expect=True):
                ip_output = session.cmd('ip a')
                logging.debug(ip_output)

                return expect if mac in ip_output else not expect

            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, True),
                                       timeout=60,
                                       step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully attached:'
                          'not found mac address %s' % mac)
            session.close()

            # Test hotunplug
            result = virsh.detach_device(vm_name, iface.xml, debug=True)
            libvirt.check_exit_status(result)

            logging.debug(virsh.dumpxml(vm_name))

            # Check if the iface with given mac address has been
            # successfully detached
            xml_after_detach = VMXML.new_from_dumpxml(vm_name)
            iface_list_after_detach = [
                iface for iface in xml_after_detach.get_devices('interface')
                if iface.mac_address == mac
            ]

            logging.debug('iface list after detach: %s',
                          iface_list_after_detach)
            if iface_list_after_detach:
                test.fail('Failed to detach device: %s', iface)

            # Check again inside vm
            session = vm.wait_for_serial_login()
            if not utils_misc.wait_for(lambda: check_inside_vm(session, False),
                                       timeout=60,
                                       step=5):
                test.fail('Check interface inside vm failed,'
                          'interface not successfully detached:'
                          'found mac address %s' % mac)
            session.close()

    finally:
        bkxml.sync()
Exemple #45
0
def run(test, params, env):
    """
    please insert a usb disk into the host machine before test

    test libvirt usb feature based on the following matrix:
        the combination usage of machine type q35/i440fx, pci/pcie
    bus controller and usb controller

    bus controller on q35 machine:
        pcie-root,pcie-root-port,pcie-to-pci-bridge,pci-bridge
        pcie-root,pcie-root-port,pcie-switch-upstream-port, pcie-switch-downstream-port
        pcie-root,dmi-to-pci-bridge,pci-bridge
    bus controller on i440fx machine:
        pci-root,pci-bridge

    usb30_controller:
        nec-xhci
        qemu-xhci
    usb20_controller:
        ich9-ehci1,ich9-uhci1,ich9-uhci2,ich9-uhci3

    Test scenarios:
    1. by default, cold-plug/hot-unplug usb host device to/from guest
    2. passthrough usb host device with vid/pid or bus/device hostdev
    3. cold-plug/unplug usb host device to/from guest
    4. hot-plug/unplug usb host device to/from guest
    5. by default, cold-plug/hot-unplug usb redirdev device to/from guest
    6. add usb redirdev device by type spicevm or tcp
    7. hot-plug/unplug usb redirdev device to/from guest
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    usb_index = params.get("usb_index", "0")
    bus_controller = params.get("bus_controller", "")
    usb_model = params.get("usb_model", "")
    start_timeout = int(params.get("start_timeout", "60"))
    device_name = params.get("device_name", "")
    device_type = params.get("device_type", "")
    device_mode = params.get("device_mode", "")
    port_num = params.get("port_num", "")
    pkgs_host = params.get("pkgs_host", "")
    pkgs_guest = params.get("pkgs_guest", "")
    usb_hub = "yes" == params.get("usb_hub", "no")
    status_error = "yes" == params.get("status_error", "no")
    vid_pid = "yes" == params.get("vid_pid", "no")
    bus_dev = "yes" == params.get("bus_dev", "no")
    hotplug = "yes" == params.get("hotplug", "no")
    coldunplug = "yes" == params.get("coldunplug", "no")
    usb_alias = "yes" == params.get("usb_alias", "no")
    redirdev_alias = "yes" == params.get("redirdev_alias", "no")
    set_addr = params.get("set_addr", "yes")
    ctrl_addr_domain = params.get("ctrl_addr_domain", "")
    ctrl_addr_slot = params.get("ctrl_addr_slot", "")
    ctrl_addr_function = params.get("ctrl_addr_function", "")

    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    def get_usb_source(lsusb_list, session=None):
        """
        calculate a dict of the source xml of usb device based on the output from command lsusb

        :param lsusb_list: a list of the output from command lsusb
        :param session: a console session of guest
        :return: a dict of the source xml of usb device
        """

        logging.debug("lsusb command result: {}".format(lsusb_list))
        source_list = []
        product_list = []
        for line in lsusb_list:
            source = {}
            product = {}
            src = {}
            # filter out the usb hub device without vendor/product id
            if re.search("hub", line, re.IGNORECASE):
                continue
            if len(line.split()[5].split(':')) == 2:
                vendor_id, product_id = line.split()[5].split(':')
            if not (vendor_id and product_id):
                test.fail("vendor/product id is not available")
            # filter out the remaining usb hub devcie not catched above
            cmd = "lsusb -v -d {}:{}".format(vendor_id, product_id)
            if session:
                output = session.get_command_output(cmd)
            else:
                output = process.run(cmd).stdout_text
            if "hub" in output:
                continue
            product['vendor_id'] = "0x" + vendor_id
            product['product_id'] = "0x" + product_id
            product_list.append(product.copy())
            if vid_pid:
                source = product.copy()
            if bus_dev:
                source['bus'] = int(line.split()[1])
                source['device'] = int(line.split()[3].rstrip(':'))
            source_list.append(source.copy())
        logging.debug("usb device product dict {}, source dict {}".format(
            product_list, source_list))
        if not source_list or not product_list:
            test.fail("no available usb device in host")
        src['source'] = source_list
        src['product'] = product_list
        return src

    def usb_disk_check(session, src_guest):
        """
        check usb storage disks passed from host with dd operation and product id

        :param session: a console session of guest
        :param src_guest: a dict of the source xml of usb device from guest
        """

        # check and write the usb disk
        status, output = session.cmd_status_output("udevadm info /dev/sda")
        if status:
            test.fail("no available usb storage device")
        if session.cmd_status("dd if=/dev/zero of=/dev/sda bs=1M count=100",
                              timeout=300):
            test.fail("usb storage device write fail")

        # check whether the guest got the right usb device
        output = output.strip().splitlines()
        for guest in src_guest['product']:
            pattern = "ID_MODEL_ID={}".format(guest['product_id'].lstrip("0x"))
            for line in output:
                if pattern in line:
                    return
        test.fail("usb device {} is NOT found in output {}".format(
            src_guest['product'], output))

    def usb_device_check(session, src_host):
        """
        check usb devices passed from host with xml file, output of lsusb, and
        usb storage disk.

        :param session: a console session of guest
        :param src_host: a dict of the source xml of usb device from host
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        output = session.get_command_output("lsusb")

        # check usb device xml
        for addr in src_host['source']:
            if device_name == "redirdev":
                pattern = 'redirdev bus="usb" type="{}"'.format(device_type)
            if device_name == "hostdev":
                if vid_pid:
                    pattern = 'product id="{}"'.format(addr['product_id'])
                if bus_dev:
                    pattern = 'address bus="{}" device="{}"'.format(
                        int(addr['bus']), int(addr['device']))
            if pattern not in str(vmxml):
                test.fail("the xml check of usb device fails")

        if device_name == "hostdev" or device_type == "tcp":
            # check the pid and vid of usb passthrough device in guest
            src_guest = get_usb_source(output.strip().splitlines(), session)
            for host in src_host['product']:
                flag = False
                for guest in src_guest['product']:
                    if (guest['product_id'] == host['product_id']
                            and guest['vendor_id'] == host['vendor_id']):
                        flag = True
                        break
                if not flag:
                    test.fail("the check of usb device in guest fails")

            # check usb disk /dev/sda
                usb_disk_check(session, src_guest)

    def check_alias(device_alias):
        """
        check usb controller alias from qemu command line with xml config file

        :param device_alias: a {model:alias} dict of the usb controller or
                             a {port:alias} dict of the usb redirdev device
        """
        output = process.run("ps -ef | grep {}".format(vm_name),
                             shell=True).stdout_text
        logging.debug('"ps -ef | grep {}" output {}'.format(vm_name, output))
        if usb_alias:
            for model in usb_model.split(','):
                device = (model if model == "qemu-xhci" else ('-').join(
                    [model.split('-')[0], "usb",
                     model.split('-')[1]]))
                pattern = ("masterbus={}".format(device_alias['ich9-ehci1'])
                           if "ich9-uhci" in model else "id={}".format(
                               device_alias[model]))
                pattern = "-device {},".format(device) + pattern
                logging.debug("usb controller model {}, pattern {}".format(
                    model, pattern))
                if not re.search(pattern, output):
                    test.fail("the check of controller alias fails")
        if redirdev_alias:
            for alias in device_alias.values():
                pattern = "-device usb-redir,chardev=char{0},id={0}".format(
                    alias)
                if not re.search(pattern, output):
                    test.fail("the check of controller alias fails")

    try:
        # remove usb controller/device from xml
        controllers = vmxml.get_devices(device_type="controller")
        for dev in controllers:
            if dev.type == "usb" or dev.type == "pci":
                vmxml.del_device(dev)

        # clean device address when the address type of device is pci
        for element in vmxml.xmltreefile.findall("/devices/*/address"):
            if element.get('type') == "pci":
                vmxml.xmltreefile.remove(element)
        vmxml.xmltreefile.write()

        hubs = vmxml.get_devices(device_type="hub")
        for hub in hubs:
            if hub.type_name == "usb":
                vmxml.del_device(hub)

        # assemble the xml of pci/pcie bus
        for model in bus_controller.split(','):
            pci_bridge = Controller('pci')
            pci_bridge.type = "pci"
            pci_bridge.model = model
            vmxml.add_device(pci_bridge)
        # find the pci endpoint's name that usb controller will attach
        pci_endpoint = bus_controller.split(",")[-1]
        # find the pci's index that usb controller will attach
        pci_index_for_usb_controller = len(bus_controller.split(",")) - 1

        device_alias = {}
        random_id = process.run("uuidgen").stdout_text.strip()
        # assemble the xml of usb controller
        for i, model in enumerate(usb_model.split(',')):
            controller = Controller("controller")
            controller.type = "usb"
            controller.index = usb_index
            controller.model = model
            if usb_alias:
                alias_str = "ua-usb" + str(i) + random_id
                device_alias[model] = alias_str
                alias = {"name": alias_str}
                if "ich9" not in model:
                    controller.index = i
                controller.alias = alias
            # for 'usb_all' case, will not set addr
            if set_addr == "yes":
                ctrl_addr_dict = {
                    'type': 'pci',
                    'domain': ctrl_addr_domain,
                    'bus': '0x0' + str(pci_index_for_usb_controller),
                    'slot': ctrl_addr_slot,
                    'function': ctrl_addr_function
                }
                if "uhci" in controller.model:
                    ctrl_addr_dict['function'] = "0x0" + str(i)
                # pcie-switch-downstream-port only supports slot 0
                if pci_endpoint == "pcie-switch-downstream-port":
                    ctrl_addr_dict['slot'] = "0x00"
                controller.address = controller.new_controller_address(
                    attrs=ctrl_addr_dict)
            vmxml.add_device(controller)

        if usb_hub:
            hub = Hub("usb")
            vmxml.add_device(hub)

        # install essential package usbutils in host
        for pkg in pkgs_host.split(','):
            if not utils_package.package_install(pkg):
                test.fail("package {} installation fail".format(pkg))

        # prepare to assemble the xml of usb device
        devs = vmxml.get_devices(device_name)
        for dev in devs:
            if dev.type == device_type:
                vmxml.del_device(dev)
        lsusb_list = process.run('lsusb').stdout_text.splitlines()
        src_host = get_usb_source(lsusb_list)
        dev_list = []

        # assemble the xml of usb passthrough device
        if device_name == "hostdev":
            for addr in src_host['source']:
                device_xml = vmxml.get_device_class(device_name)()
                device_xml.type = device_type
                source_xml = device_xml.Source()
                device_xml.mode = device_mode
                device_xml.managed = 'no'
                if vid_pid:
                    source_xml.vendor_id = addr['vendor_id']
                    source_xml.product_id = addr['product_id']
                if bus_dev:
                    source_xml.untyped_address = source_xml.new_untyped_address(
                        **addr)
                device_xml.source = source_xml
                if hotplug:
                    dev_list.append(device_xml)
                else:
                    vmxml.add_device(device_xml)

        # assemble the xml of usb redirdev device
        if device_name == "redirdev":
            for i, addr in enumerate(src_host['product']):
                device_xml = vmxml.get_device_class(device_name)()
                device_xml.type = device_type
                device_xml.bus = "usb"
                if device_type == "tcp":
                    source_xml = device_xml.Source()
                    source_xml.mode = device_mode
                    source_xml.host = "localhost"
                    port = str(int(port_num) + i)
                    source_xml.service = port
                    source_xml.tls = "no"
                    device_xml.source = source_xml
                    # start usbredirserver
                    vendor_id = addr['vendor_id'].lstrip("0x")
                    product_id = addr['product_id'].lstrip("0x")
                    ps = process.SubProcess(
                        "usbredirserver -p {} {}:{}".format(
                            port, vendor_id, product_id),
                        shell=True)
                    server_id = ps.start()
                if redirdev_alias:
                    alias_str = "ua-redir" + str(i) + random_id
                    device_alias[port] = alias_str
                    alias = {"name": alias_str}
                    device_xml.alias = alias
                if hotplug:
                    dev_list.append(device_xml)
                else:
                    vmxml.add_device(device_xml)

        # start guest
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login(timeout=start_timeout)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug("vm xml after starting up {}".format(vmxml))

        # check usb controller in guest
        for model_type in usb_model.split(','):
            model_type = model_type.split('-')[-1].rstrip("1,2,3")
            logging.debug(
                "check usb controller {} in guest".format(model_type))
            if session.cmd_status("dmesg | grep {}".format(model_type)):
                test.fail("usb controller check fail")
        if usb_alias or redirdev_alias:
            check_alias(device_alias)

        # install package usbutils in guest
        for pkg in pkgs_guest.split(','):
            if not utils_package.package_install(pkg, session):
                test.fail("package {} installation fails in guest".format(pkg))

        # hotplug usb device
        if hotplug:
            for dev in dev_list:
                virsh.attach_device(vm_name,
                                    dev.xml,
                                    flagstr="--live",
                                    debug=True,
                                    ignore_status=False)
                if device_name == "hostdev":
                    utils_misc.wait_for(
                        lambda: not session.cmd_status(
                            "lsusb | grep {}".format(dev.source.product_id)),
                        10)
            vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            logging.debug("vmxml after attaching {}".format(vmxml))

        # check usb device
        usb_device_check(session, src_host)

        # detach usb device from guest
        devs = vmxml.get_devices(device_name)
        if coldunplug:
            vm.destroy()

        for dev in devs:
            if dev.type == device_type:
                if coldunplug:
                    vmxml.del_device(dev)
                else:
                    virsh.detach_device(vm_name,
                                        dev.xml,
                                        flagstr="--live",
                                        debug=True,
                                        ignore_status=False)

        # check the usb device element in xml after detaching
        if coldunplug:
            vmxml.sync()
            vm.start()
            vm.wait_for_login(timeout=start_timeout).close()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        devs = vmxml.get_devices(device_name)
        for dev in devs:
            if dev.type == device_type:
                test.fail("detach usb device fail")

    finally:
        if 'session' in locals():
            session.close()
        if 'server_id' in locals():
            process.run("killall usbredirserver")
        vmxml_backup.sync()
Exemple #46
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
                iface.model = "virtio"
                del iface.address

            if bootdisk_snapshot != "":
                disk.snapshot = bootdisk_snapshot

            disk.target = {"dev": bootdisk_target, "bus": bootdisk_bus}
            device_source = disk.source.attrs["file"]

            del disk.address
            vmxml.devices = xml_devices
            vmxml.define()

        # Add virtio_scsi controller.
        if virtio_scsi_controller:
            scsi_controller = Controller("controller")
            scsi_controller.type = "scsi"
            scsi_controller.index = "0"
            scsi_controller.model = "virtio-scsi"
            if virtio_scsi_controller_driver != "":
                driver_dict = {}
                for driver_option in virtio_scsi_controller_driver.split(','):
                    if driver_option != "":
                        d = driver_option.split('=')
                        driver_dict.update({d[0].strip(): d[1].strip()})
                scsi_controller.driver = driver_dict
            vmxml.add_device(scsi_controller)

        # Test usb devices.
        usb_devices = {}
        if add_usb_device:
Exemple #48
0
def run_libvirt_scsi(test, params, env):
    # Get variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    img_type = ('yes' == params.get("libvirt_scsi_img_type", "no"))
    cdrom_type = ('yes' == params.get("libvirt_scsi_cdrom_type", "no"))
    partition_type = ('yes' == params.get("libvirt_scsi_partition_type", "no"))
    partition = params.get("libvirt_scsi_partition",
                           "ENTER.YOUR.AVAILABLE.PARTITION")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    # Init a VM instance and a VMXML instance.
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    # Keep a backup of xml to restore it in cleanup.
    backup_xml = vmxml.copy()
    # Add a scsi controller if there is not.
    controller_devices = vmxml.get_devices("controller")
    scsi_controller_exists = False
    for device in controller_devices:
        if device.type == "scsi":
            scsi_controller_exists = True
            break
    if not scsi_controller_exists:
        scsi_controller = Controller("controller")
        scsi_controller.type = "scsi"
        scsi_controller.index = "0"
        scsi_controller.model = "virtio-scsi"
        vmxml.add_device(scsi_controller)
    # Add disk with bus of scsi into vmxml.
    if img_type:
        # Init a QemuImg instance.
        img_name = "libvirt_scsi"
        params['image_name'] = img_name
        image = qemu_storage.QemuImg(params, data_dir.get_tmp_dir(), img_name)
        # Create a image.
        img_path, _ = image.create(params)
        img_disk = Disk(type_name="file")
        img_disk.device = "disk"
        img_disk.source = img_disk.new_disk_source(
            **{'attrs': {
                'file': img_path
            }})
        img_disk.target = {'dev': "vde", 'bus': "scsi"}
        vmxml.add_device(img_disk)
    if cdrom_type:
        # Init a CdromDisk instance.
        cdrom_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_scsi")
        cdrom = CdromDisk(cdrom_path, data_dir.get_tmp_dir())
        cdrom.close()
        cdrom_disk = Disk(type_name="file")
        cdrom_disk.device = "cdrom"
        cdrom_disk.target = {'dev': "vdf", 'bus': "scsi"}
        cdrom_disk.source = cdrom_disk.new_disk_source(
            **{'attrs': {
                'file': cdrom_path
            }})
        vmxml.add_device(cdrom_disk)
    if partition_type:
        if partition.count("ENTER.YOUR"):
            raise error.TestNAError("Partition for partition test"
                                    "is not configured.")
        partition_disk = Disk(type_name="block")
        partition_disk.device = "disk"
        partition_disk.target = {'dev': "vdg", 'bus': "scsi"}
        partition_disk.source = partition_disk.new_disk_source(
            **{'attrs': {
                'dev': partition
            }})
        vmxml.add_device(partition_disk)
    # sync the vmxml with VM.
    vmxml.sync()
    # Check the result of scsi disk.
    try:
        try:
            vm.start()
            # Start VM successfully.
            if status_error:
                raise error.TestFail('Starting VM successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up.
        backup_xml.sync()
Exemple #49
0
def set_domain_disk(vmxml, blk_source, params, test):
    """
    Replace the domain disk with new setup device or download image

    :param vmxml: The instance of VMXML class
    :param params: Avocado params object
    :param test: Avocado test object
    :param blk_source: The domain disk image path
    """
    disk_type = params.get("disk_type", "file")
    boot_dev = params.get("boot_dev", "hd")
    target_dev = params.get("target_dev", "vdb")
    device_bus = params.get("device_bus", "virtio")
    disk_img = params.get("disk_img")
    image_size = params.get("image_size", "3G")
    vol_name = params.get("vol_name")
    disk_format = params.get("disk_format", "qcow2")
    driver_type = params.get("driver_type", "qcow2")
    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    source_protocol = params.get("source_protocol", "")
    boot_iso_file = os.path.join(data_dir.get_tmp_dir(), "boot.iso")
    non_release_os_url = params.get("non_release_os_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(),
                                      "non_released_os.qcow2")
    release_os_url = params.get("release_os_url", "")
    download_released_file_path = os.path.join(data_dir.get_tmp_dir(),
                                               "released_os.qcow2")
    brick_path = os.path.join(test.virtdir, "gluster-pool")
    usb_index = params.get("usb_index", "0")
    bus_controller = params.get("bus_controller", "")
    usb_controller = params.get("usb_controller", "")
    usb_model = params.get("usb_model", "")

    global cleanup_iscsi
    global cleanup_gluster
    disk_params = {
        'disk_type': disk_type,
        'target_dev': target_dev,
        'target_bus': device_bus,
        'driver_type': driver_type
    }
    if source_protocol == 'iscsi':
        if disk_type == 'block':
            if release_os_url:
                blk_source = download_released_file_path
            kwargs = {'image_size': image_size, 'disk_format': disk_format}
            iscsi_target = prepare_iscsi_disk(blk_source, **kwargs)
            if iscsi_target is None:
                test.error("Failed to create iscsi disk")
            else:
                cleanup_iscsi = True
                disk_params.update({'source_file': iscsi_target})
    elif source_protocol == 'usb':

        # assemble the xml of usb controller
        controllers = vmxml.get_devices(device_type="controller")
        for dev in controllers:
            if dev.type == "usb":
                vmxml.del_device(dev)

        for model in usb_model.split(','):
            controller = Controller("controller")
            controller.type = "usb"
            controller.index = usb_index
            controller.model = model
            vmxml.add_device(controller)

        # prepare virtual disk device
        dir_name = os.path.dirname(blk_source)
        device_name = os.path.join(dir_name, "usb_virtual_disk.qcow2")
        cmd = ("qemu-img convert -O {} {} {}".format(disk_format, blk_source,
                                                     device_name))
        process.run(cmd, shell=True)
        disk_params.update({'source_file': device_name})

    elif source_protocol == 'gluster':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            host_ip = prepare_gluster_disk(blk_source,
                                           test,
                                           brick_path=brick_path,
                                           **params)
            if host_ip is None:
                test.error("Failed to create glusterfs disk")
            else:
                cleanup_gluster = True
            source_name = "%s/%s" % (vol_name, disk_img)
            disk_params.update({
                'source_name': source_name,
                'source_host_name': host_ip,
                'source_host_port': '24007',
                'source_protocol': source_protocol
            })
    elif source_protocol == 'rbd':
        if disk_type == 'network':
            if release_os_url:
                blk_source = download_released_file_path
            disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
            disk_cmd = ("qemu-img convert -O %s %s %s" %
                        (disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False)
            disk_params.update({
                'source_name': disk_src_name,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port,
                'source_protocol': source_protocol
            })
    elif non_release_os_url:
        disk_params.update({'source_file': download_file_path})
    elif boot_dev == "cdrom":
        disk_params.update({
            'device_type': 'cdrom',
            'source_file': boot_iso_file
        })
    elif release_os_url:
        disk_params.update({'source_file': download_released_file_path})
    else:
        disk_params.update({'source_file': blk_source})

    new_disk = Disk(type_name=disk_type)
    new_disk.xml = open(create_disk_xml(disk_params)).read()
    vmxml.remove_all_disk()
    vmxml.add_device(new_disk)
def run(test, params, env):
    """
    Test for PCI single function device(NIC or Infiniband)
    passthrough to libvirt guest in hotplug mode.

    a). NIC Or Infiniband:
        1. Get params.
        2. Get the pci device function.
        3. Start guest
        4. prepare device xml to be attached
        5. hotplug the device
        6. check device hotplugged or not
        7. Ping to server_ip from guest
        8. test flood ping
        9. test guest life cycle
        10. test virsh dumpxml
        11. hotunplug the device
        12. test stress
           to verify the new network device.
    """
    # get the params from params
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    device_name = params.get("libvirt_pci_net_dev_name", "ENTER_YOUR.DEV.NAME")
    pci_id = params.get("libvirt_pci_net_dev_label", "ENTER_YOUR.DEV.LABEL")
    net_ip = params.get("libvirt_pci_net_ip", "ENTER_YOUR.IP")
    server_ip = params.get("libvirt_pci_server_ip",
                           "ENTER_YOUR.SERVER.IP")
    netmask = params.get("libvirt_pci_net_mask", "ENTER_YOUR.MASK")
    stress_val = params.get("stress_val", "1")
    stress = params.get("stress", "no")
    timeout = int(params.get("timeout", "ENTER_YOUR.TIMEOUT.VALUE"))
    suspend_operation = params.get("suspend_operation", "no")
    reboot_operation = params.get("reboot_operation", "no")
    virsh_dumpxml = params.get("virsh_dumpxml", "no")
    virsh_dump = params.get("virsh_dump", "no")
    flood_ping = params.get("flood_ping", "no")
    # Check the parameters from configuration file.
    for each_param in params.itervalues():
        if "ENTER_YOUR" in each_param:
            test.cancel("Please enter the configuration details of %s."
                        % each_param)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    devices = vmxml.get_devices()
    pci_devs = []
    dargs = {'debug': True, 'ignore_status': True}
    controller = Controller("controller")
    controller.type = "pci"
    controller.index = params.get("index", "1")
    controller.model = params.get("model", "pci-root")
    devices.append(controller)
    vmxml.set_devices(devices)
    vmxml.sync()
    if not vm.is_alive():
        vm.start()
        session = vm.wait_for_login()
    if not utils_package.package_install(["ppc64-diag",
                                          "librtas", "powerpc-utils"],
                                         session, 360):
        test.cancel('Fail on dependencies installing')
    if virsh_dump == "yes":
        dump_file = os.path.join(data_dir.get_tmp_dir(), "virshdump.xml")
    output = session.cmd_output("ip link")
    logging.debug("checking for output - %s", output)
    nic_list_before = str(output.splitlines())
    logging.debug("nic_list before hotplug %s", nic_list_before)
    obj = PciAssignable()
    # get all functions id's
    pci_ids = obj.get_same_group_devs(pci_id)
    for val in pci_ids:
        temp = val.replace(":", "_")
        pci_devs.extend(["pci_"+temp])
    pci_val = pci_devs[0].replace(".", "_")
    pci_xml = NodedevXML.new_from_dumpxml(pci_val)
    pci_address = pci_xml.cap.get_address_dict()
    dev = VMXML.get_device_class('hostdev')()
    dev.mode = 'subsystem'
    dev.hostdev_type = 'pci'
    dev.managed = 'no'
    dev.source = dev.new_source(**pci_address)

    def detach_device(pci_devs, pci_ids):
        # detaching the device from host
        for pci_value, pci_node in map(None, pci_devs, pci_ids):
            pci_value = pci_value.replace(".", "_")
            cmd = "lspci -ks %s | grep 'Kernel driver in use' |\
                   awk '{print $5}'" % pci_node
            driver_name = to_text(process.system_output(cmd, shell=True).strip())
            if driver_name == "vfio-pci":
                logging.debug("device alreay detached")
            else:
                if virsh.nodedev_detach(pci_value).exit_status:
                    test.error("Hostdev node detach failed")
                driver_name = to_text(process.system_output(cmd, shell=True).strip())
                if driver_name != "vfio-pci":
                    test.error("driver bind failed after detach")

    def reattach_device(pci_devs, pci_ids):
        # reattach the device to host
        for pci_value, pci_node in map(None, pci_devs, pci_ids):
            pci_value = pci_value.replace(".", "_")
            cmd = "lspci -ks %s | grep 'Kernel driver in use' |\
                   awk '{print $5}'" % pci_node
            driver_name = to_text(process.system_output(cmd, shell=True).strip())
            if driver_name != "vfio-pci":
                logging.debug("device alreay attached")
            else:
                if virsh.nodedev_reattach(pci_value).exit_status:
                    test.fail("Hostdev node reattach failed")
                driver_name = to_text(process.system_output(cmd, shell=True).strip())
                if driver_name == "vfio-pci":
                    test.error("driver bind failed after reattach")

    def check_attach_pci():
        session = vm.wait_for_login()
        output = session.cmd_output("ip link")
        nic_list_after = str(output.splitlines())
        logging.debug(nic_list_after)
        return nic_list_after != nic_list_before

    def device_hotplug():
        if not libvirt_version.version_compare(3, 10, 0):
            detach_device(pci_devs, pci_ids)
        # attach the device in hotplug mode
        result = virsh.attach_device(vm_name, dev.xml,
                                     flagstr="--live", debug=True)
        if result.exit_status:
            test.error(result.stdout.strip())
        else:
            logging.debug(result.stdout.strip())
        if not utils_misc.wait_for(check_attach_pci, timeout):
            test.fail("timeout value is not sufficient")

    # detach hot plugged device
    def device_hotunplug():
        result = virsh.detach_device(vm_name, dev.xml,
                                     flagstr="--live", debug=True)
        if result.exit_status:
            test.fail(result.stdout.strip())
        else:
            logging.debug(result.stdout.strip())
        # Fix me
        # the purpose of waiting here is after detach the device from
        #  guest it need time to perform any other operation on the device
        time.sleep(timeout)
        if not libvirt_version.version_compare(3, 10, 0):
            pci_devs.sort()
            reattach_device(pci_devs, pci_ids)

    def test_ping():
        try:
            output = session.cmd_output("lspci -nn | grep %s" % device_name)
            nic_id = str(output).split(' ', 1)[0]
            nic_name = str(utils_misc.get_interface_from_pci_id(nic_id,
                                                                session))
            session.cmd("ip addr flush dev %s" % nic_name)
            session.cmd("ip addr add %s/%s dev %s"
                        % (net_ip, netmask, nic_name))
            session.cmd("ip link set %s up" % nic_name)
            s_ping, o_ping = utils_net.ping(dest=server_ip, count=5,
                                            interface=net_ip)
            logging.info(s_ping)
            logging.info(o_ping)
            if s_ping:
                test.fail("Ping test failed")
        except aexpect.ShellError, detail:
            test.error("Succeed to set ip on guest, but failed "
                       "to bring up interface.\n"
                       "Detail: %s." % detail)