Esempio n. 1
0
def test_nic_fibre_group(vm, params):
    """
    Try to attach nic and fibre device at same time in iommu group to vm.

    1.Get original available interfaces&disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface&disk in vm.
    """
    nic_pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    fibre_pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    if nic_pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid Ethernet pci device id.")
    if fibre_pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid Fibre pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Ethernet PCI devices before:%s", before_pci_nics)
    logging.debug("Ethernet interfaces before:%s", before_interfaces)
    logging.debug("Fibre PCI devices before:%s", before_pci_fibres)
    logging.debug("Disks before:%s", before_disks)
    vm.destroy()

    prepare_devices(nic_pci_id, "Ethernet")
    prepare_devices(fibre_pci_id, "Fibre")
    try:
        nicxmlfile = utlv.create_hostdev_xml(nic_pci_id)
        virsh.attach_device(domain_opt=vm.name,
                            file_opt=nicxmlfile,
                            flagstr="--config",
                            debug=True,
                            ignore_status=False)
        fibrexmlfile = utlv.create_hostdev_xml(fibre_pci_id)
        virsh.attach_device(domain_opt=vm.name,
                            file_opt=fibrexmlfile,
                            flagstr="--config",
                            debug=True,
                            ignore_status=False)
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 2
0
def test_nic_fibre_group(vm, params):
    """
    Try to attach nic and fibre device at same time in iommu group to vm.

    1.Get original available interfaces&disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface&disk in vm.
    """
    nic_pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    fibre_pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    if nic_pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid Ethernet pci device id.")
    if fibre_pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid Fibre pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    prepare_devices(nic_pci_id, "Ethernet")
    prepare_devices(fibre_pci_id, "Fibre")
    try:
        nicxmlfile = utlv.create_hostdev_xml(nic_pci_id)
        virsh.attach_device(domain_opt=vm.name, file_opt=nicxmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        fibrexmlfile = utlv.create_hostdev_xml(fibre_pci_id)
        virsh.attach_device(domain_opt=vm.name, file_opt=fibrexmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 3
0
def test_win_fibre_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_disks = get_windows_disks(vm)
    logging.debug("Disks before:%s", before_disks)
    vm.destroy()

    xmlfile = utlv.create_hostdev_xml(pci_id)
    prepare_devices(pci_id, device_type)
    try:
        virsh.attach_device(domain_opt=vm.name,
                            file_opt=xmlfile,
                            flagstr="--config",
                            debug=True,
                            ignore_status=False)
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 4
0
def test_win_fibre_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_disks = get_windows_disks(vm)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    xmlfile = utlv.create_hostdev_xml(pci_id)
    prepare_devices(pci_id, device_type)
    try:
        virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 5
0
def add_hostdev_device(vm_name, pci):
    """
    Add hostdev device to VM

    :param vm_name: Name of VM
    :param pci_id: PCI ID of a device
    """
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    hostdev_dev = libvirt.create_hostdev_xml(pci)
    vmxml.add_device(hostdev_dev)
    vmxml.sync()
Esempio n. 6
0
    def setup_save_restore_hostdev_device_with_teaming():
        logging.info("Start a VM with bridge iface and hostdev device.")
        libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface')
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        iface = interface.Interface("network")
        iface.xml = create_bridge_iface_xml(vm, mac_addr, params)
        vmxml.add_device(iface)

        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        vmxml.add_device(hostdev_dev)
        vmxml.sync()
        vm.start()
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        vm.wait_for_serial_login(timeout=240).close()
Esempio n. 7
0
    def add_hostdev_device(vm_name, pf_pci, params):
        """
        Add a hostdev device to the guest

        :param vm_name: VM's name
        :param pf_pci: The PF's pci
        :param params: The parameters used
        """
        vf_pci = utils_sriov.get_vf_pci_id(pf_pci)
        hostdev_teaming_dict = params.get("hostdev_device_teaming_dict", '{}')
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        vmxml.add_device(hostdev_dev)
        vmxml.sync()
Esempio n. 8
0
def test_nic_single(vm, params):
    """
    Try to attach device in iommu group to vm with adding only this
    device to iommu group.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    xmlfile = utlv.create_hostdev_xml(pci_id)

    # Add only this device to corresponding iommu group
    prepare_devices(pci_id, device_type, only=True)
    try:
        virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
        # Start successfully, but not expected.
        vm.destroy(gracefully=False)
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("Start vm succesfully after attaching single "
                             "device to iommu group.Not expected.")
    except (error.CmdError, virt_vm.VMStartError), detail:
        logging.debug("Expected:New device does not work well: %s" % detail)
Esempio n. 9
0
def test_win_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_disks = get_windows_disks(vm)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    xmlfile = utlv.create_hostdev_xml(pci_id)
    prepare_devices(test, pci_id, device_type)
    try:
        virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # Get devices in vm again after attaching
    after_disks = get_windows_disks(vm)
    logging.debug("Disks after:%s",
                  after_disks)
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_disk:
            test.fail("Cannot find attached host device in vm.")
        # TODO: Support to configure windows partition
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 10
0
def test_win_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_disks = get_windows_disks(vm)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    xmlfile = utlv.create_hostdev_xml(pci_id)
    prepare_devices(test, pci_id, device_type)
    try:
        virsh.attach_device(vm.name, xmlfile.xml,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # Get devices in vm again after attaching
    after_disks = get_windows_disks(vm)
    logging.debug("Disks after:%s",
                  after_disks)
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_disk:
            test.fail("Cannot find attached host device in vm.")
        # TODO: Support to configure windows partition
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 11
0
def test_nic_single(vm, params):
    """
    Try to attach device in iommu group to vm with adding only this
    device to iommu group.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s", before_pci_nics)
    logging.debug("Ethernet interfaces before:%s", before_interfaces)
    vm.destroy()

    # Add only this device to corresponding iommu group
    prepare_devices(pci_id, device_type, only=True)
    try:
        xmlfile = utlv.create_hostdev_xml(pci_id)
        virsh.attach_device(domain_opt=vm.name,
                            file_opt=xmlfile,
                            flagstr="--config",
                            debug=True,
                            ignore_status=False)
        vm.start()
        # Start successfully, but not expected.
        vm.destroy(gracefully=False)
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("Start vm succesfully after attaching single "
                             "device to iommu group.Not expected.")
    except (error.CmdError, virt_vm.VMStartError), detail:
        logging.debug("Expected:New device does not work well: %s" % detail)
Esempio n. 12
0
    def test_hotplug_hostdev_device_with_teaming():
        default_vf_mac = utils_sriov.get_vf_mac(pf_name)
        utils_sriov.set_vf_mac(pf_name, mac_addr)
        logging.info("Attach the bridge interface.")
        brg_iface_xml = create_bridge_iface_xml(vm, mac_addr, params)
        virsh.attach_device(vm_name,
                            brg_iface_xml,
                            debug=True,
                            ignore_status=False)
        # Wait for 10s before attaching the hostdev device
        time.sleep(10)
        logging.info("Attach the hostdev device.")
        hostdev_dev = libvirt.create_hostdev_xml(vf_pci,
                                                 teaming=hostdev_teaming_dict)
        virsh.attach_device(vm_name,
                            hostdev_dev.xml,
                            debug=True,
                            ignore_status=False)
        vm_session = vm.wait_for_serial_login(timeout=240)
        ping_ip = get_ping_dest(vm_session, mac_addr)
        check_vm_network_accessed(vm_session,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=True)
        logging.info("Detach the hostdev device.")
        virsh.detach_device(vm_name,
                            hostdev_dev.xml,
                            wait_remove_event=True,
                            debug=True,
                            ignore_status=False)
        logging.debug("Recover vf's mac to %s.", default_vf_mac)
        utils_sriov.set_vf_mac(pf_name, default_vf_mac)

        check_hostdev = vm_xml.VMXML.new_from_dumpxml(vm_name)\
            .devices.by_device_tag('hostdev')
        if check_hostdev:
            test.fail("The hostdev device exists after detaching %s." %
                      check_hostdev)
        libvirt_vfio.check_vfio_pci(vf_pci, status_error=True)
        check_vm_network_accessed(vm_session,
                                  2,
                                  ping_dest=ping_ip,
                                  tcpdump_iface=bridge_name,
                                  tcpdump_status_error=False)
Esempio n. 13
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s", before_pci_nics)
    logging.debug("Ethernet interfaces before:%s", before_interfaces)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name,
                                file_opt=xmlfile,
                                flagstr="--config",
                                debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 14
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 15
0
def test_fibre_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 16
0
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))

    device_alias = "ua-" + str(uuid.uuid4())

    def get_usb_info():
        """
        Get local host usb info

        :return: usb verndor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params,
                                                device_alias).xml

    try:
        dump_option = ""
        if "--config" in detach_options:
            dump_option = "--inactive"

        # Attach xml to domain
        logging.info("Attach xml is %s" %
                     process.run("cat %s" % device_xml).stdout_text)
        virsh.attach_device(vm_name,
                            device_xml,
                            flagstr=detach_options,
                            debug=True,
                            ignore_status=False)
        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           debug=True)
        time.sleep(10)
        libvirt.check_exit_status(result)
        domxml_dt = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml in domxml_dt:
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()
Esempio n. 17
0
def test_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(vm.name, xmlfile.xml,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)
    new_pci = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_pci:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            after_disks = vm.get_disks()
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 18
0
def test_nic_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(vm.name, xmlfile.xml,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in list(iface_list.values()):
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        test.fail("Managed option can not"
                                  " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 19
0
def test_nic_fibre_group(test, vm, params):
    """
    Try to attach nic and fibre device at same time in iommu group to vm.

    1.Get original available interfaces&disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface&disk in vm.
    """
    nic_pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    fibre_pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    if nic_pci_id.count("EXAMPLE"):
        test.cancel("Invalid Ethernet pci device id.")
    if fibre_pci_id.count("EXAMPLE"):
        test.cancel("Invalid Fibre pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    prepare_devices(test, nic_pci_id, "Ethernet")
    prepare_devices(test, fibre_pci_id, "Fibre")
    try:
        nicxmlfile = utlv.create_hostdev_xml(nic_pci_id)
        virsh.attach_device(domain_opt=vm.name, file_opt=nicxmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        fibrexmlfile = utlv.create_hostdev_xml(fibre_pci_id)
        virsh.attach_device(domain_opt=vm.name, file_opt=fibrexmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
        test.fail("New device does not work well: %s" % detail)

    # Get nic devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    # Get disk devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)

    new_pci_nic = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    new_pci_fibre = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))

    try:
        if not new_pci_nic or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)

        if not new_pci_fibre:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
Esempio n. 20
0
def run(test, params, env):
    """
    Test only ppc hosts
    """
    if 'ppc64le' not in platform.machine().lower():
        test.cancel('This case is for ppc only.')
    vm_name = params.get('main_vm', 'EXAMPLE')
    status_error = 'yes' == params.get('status_error', 'no')
    case = params.get('case', '')
    error_msg = params.get('error_msg', '')

    # Backup vm xml
    bk_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

        # Assign address to panic device
        if case == 'panic_address':

            # Check if there is already a panic device on vm, remove it if true
            origin_panic = vmxml.get_devices('panic')
            if origin_panic:
                for dev in origin_panic:
                    vmxml.del_device(dev)
                vmxml.sync()

            # Create panic device to add to vm
            panic_dev = Panic()
            panic_dev.model = 'pseries'
            panic_dev.addr_type = 'isa'
            panic_dev.addr_iobase = '0x505'
            logging.debug(panic_dev)
            vmxml.add_device(panic_dev)
            if version_compare(7, 0, 0):
                cmd_result = virsh.define(vmxml.xml, debug=True)
            else:
                vmxml.sync()
                cmd_result = virsh.start(vm_name,
                                         debug=True,
                                         ignore_status=True)

        # Get Ethernet pci devices
        if case == 'unavail_pci_device':
            lspci = process.run('lspci|grep Ethernet',
                                shell=True).stdout_text.splitlines()
            pci_ids = [line.split()[0] for line in lspci]
            logging.debug(pci_ids)
            max_id = max([int(pci_id.split('.')[-1]) for pci_id in pci_ids])
            prefix = pci_ids[-1].split('.')[0]

            # Create fake pci ids
            for i in range(5):
                max_id += 1
                # function must be <= 7
                if max_id > 7:
                    break
                new_pci_id = '.'.join([prefix, str(max_id)])
                new_pci_xml = libvirt.create_hostdev_xml(new_pci_id)
                vmxml.add_device(new_pci_xml)
            vmxml.sync()
            logging.debug('Vm xml after adding unavailable pci devices: \n%s',
                          vmxml)

        # Check result if there's a result to check
        if 'cmd_result' in locals():
            libvirt.check_exit_status(cmd_result, status_error)
            if error_msg:
                libvirt.check_result(cmd_result, [error_msg])

    finally:
        # In case vm disappeared after test
        if case == 'unavail_pci_device':
            virsh.define(bk_xml.xml, debug=True)
        else:
            bk_xml.sync()
Esempio n. 21
0
def test_nic_single(test, vm, params):
    """
    Try to attach device in iommu group to vm with adding only this
    device to iommu group.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    # Add only this device to corresponding iommu group
    prepare_devices(test, pci_id, device_type, only=True)
    try:
        xmlfile = utlv.create_hostdev_xml(pci_id)
        virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
        # Start successfully, but not expected.
        vm.destroy(gracefully=False)
        cleanup_devices(pci_id, device_type)
        test.fail("Start vm succesfully after attaching single "
                  "device to iommu group.Not expected.")
    except (process.CmdError, virt_vm.VMStartError) as detail:
        logging.debug("Expected:New device does not work well: %s" % detail)

    # Reattaching all devices in iommu group
    prepare_devices(test, pci_id, device_type)
    try:
        vm.start()
    except Exception as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("Start vm failed after attaching all"
                  "device to iommu group:%s" % detail)

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
def run(test, params, env):
    """
    Test detach-device-alias command with
    --config, --live, --current

    1. Test hostdev device detach
    2. Test scsi controller device detach
    3. Test redirect device detach
    4. Test channel devices detach
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    detach_options = params.get("detach_alias_options", "")
    detach_check_xml = params.get("detach_check_xml")
    # hostdev device params
    hostdev_type = params.get("detach_hostdev_type", "")
    hostdev_managed = params.get("detach_hostdev_managed")
    # controller params
    contr_type = params.get("detach_controller_type")
    contr_model = params.get("detach_controller_mode")
    # redirdev params
    redir_type = params.get("detach_redirdev_type")
    redir_bus = params.get("detach_redirdev_bus")
    # channel params
    channel_type = params.get("detach_channel_type")
    channel_target = eval(params.get("detach_channel_target", "{}"))
    # watchdog params
    watchdog_type = params.get("detach_watchdog_type")
    watchdog_dict = eval(params.get('watchdog_dict', '{}'))

    device_alias = "ua-" + str(uuid.uuid4())

    def check_detached_xml_noexist():
        """
        Check detached xml does not exist in the guest dumpxml

        :return: True if it does not exist, False if still exists
        """
        domxml_dt = virsh.dumpxml(vm_name, dump_option).stdout_text.strip()
        if detach_check_xml not in domxml_dt:
            return True
        else:
            return False

    def get_usb_info():
        """
        Get local host usb info

        :return: usb vendor and product id
        """
        install_cmd = process.run("yum install usbutils* -y", shell=True)
        result = process.run("lsusb|awk '{print $6\":\"$2\":\"$4}'",
                             shell=True)
        if not result.exit_status:
            return result.stdout_text.rstrip(':')
        else:
            test.error("Can not get usb hub info for testing")

    # backup xml
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    device_xml = None
    attach_device = True

    if not vm.is_alive():
        vm.start()
    # wait for vm start successfully
    vm.wait_for_login()

    if hostdev_type:
        if hostdev_type in ["usb", "scsi"]:
            if hostdev_type == "usb":
                pci_id = get_usb_info()
            elif hostdev_type == "scsi":
                source_disk = libvirt.create_scsi_disk(scsi_option="",
                                                       scsi_size="8")
                pci_id = get_scsi_info(source_disk)
            device_xml = libvirt.create_hostdev_xml(pci_id=pci_id,
                                                    dev_type=hostdev_type,
                                                    managed=hostdev_managed,
                                                    alias=device_alias)
        else:
            test.error("Hostdev type %s not handled by test."
                       " Please check code." % hostdev_type)
    if contr_type:
        controllers = vmxml.get_controllers(contr_type)
        contr_index = len(controllers) + 1
        contr_dict = {
            "controller_type": contr_type,
            "controller_model": contr_model,
            "controller_index": contr_index,
            "contr_alias": device_alias
        }
        device_xml = libvirt.create_controller_xml(contr_dict)
        detach_check_xml = detach_check_xml % contr_index

    if redir_type:
        device_xml = libvirt.create_redirdev_xml(redir_type, redir_bus,
                                                 device_alias)

    if channel_type:
        channel_params = {'channel_type_name': channel_type}
        channel_params.update(channel_target)
        device_xml = libvirt.create_channel_xml(channel_params, device_alias)

    if watchdog_type:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.remove_all_device_by_type('watchdog')

        device_xml_file = Watchdog()
        device_xml_file.update({"alias": {"name": device_alias}})
        device_xml_file.setup_attrs(**watchdog_dict)
        vmxml.devices = vmxml.devices.append(device_xml_file)
        vmxml.xmltreefile.write()
        vmxml.sync()

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        logging.debug('The vmxml after attached watchdog is:%s', vmxml)

        if not vm.is_alive():
            vm.start()
        vm.wait_for_login().close()

        attach_device = False

    try:
        dump_option = ""
        wait_event = True
        if "--config" in detach_options:
            dump_option = "--inactive"
            wait_event = False

        # Attach xml to domain
        if attach_device:
            logging.info("Attach xml is %s" %
                         process.run("cat %s" % device_xml.xml).stdout_text)
            virsh.attach_device(vm_name,
                                device_xml.xml,
                                flagstr=detach_options,
                                debug=True,
                                ignore_status=False)

        domxml_at = virsh.dumpxml(vm_name, dump_option,
                                  debug=True).stdout.strip()
        if detach_check_xml not in domxml_at:
            test.error("Can not find %s in domxml after attach" %
                       detach_check_xml)

        # Detach xml with alias
        result = virsh.detach_device_alias(vm_name,
                                           device_alias,
                                           detach_options,
                                           wait_for_event=wait_event,
                                           event_timeout=20,
                                           debug=True)
        libvirt.check_exit_status(result)
        if not utils_misc.wait_for(
                check_detached_xml_noexist,
                60,
                step=2,
                text="Repeatedly search guest dumpxml with detached xml"):
            test.fail("Still can find %s in domxml" % detach_check_xml)
    finally:
        backup_xml.sync()
        if hostdev_type == "scsi":
            libvirt.delete_scsi_disk()
Esempio n. 23
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in network_list.values():
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        raise error.TestFail("Managed option can not"
                                             " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
Esempio n. 24
0
def test_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)
    new_pci = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_pci:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            after_disks = vm.get_disks()
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 25
0
def test_nic_fibre_group(test, vm, params):
    """
    Try to attach nic and fibre device at same time in iommu group to vm.

    1.Get original available interfaces&disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface&disk in vm.
    """
    nic_pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    fibre_pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    if nic_pci_id.count("EXAMPLE"):
        test.cancel("Invalid Ethernet pci device id.")
    if fibre_pci_id.count("EXAMPLE"):
        test.cancel("Invalid Fibre pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    prepare_devices(test, nic_pci_id, "Ethernet")
    prepare_devices(test, fibre_pci_id, "Fibre")
    try:
        nicxmlfile = utlv.create_hostdev_xml(nic_pci_id)
        virsh.attach_device(vm.name, nicxmlfile.xml,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        fibrexmlfile = utlv.create_hostdev_xml(fibre_pci_id)
        virsh.attach_device(vm.name, fibrexmlfile.xml,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
        test.fail("New device does not work well: %s" % detail)

    # Get nic devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    # Get disk devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)

    new_pci_nic = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    new_pci_fibre = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))

    try:
        if not new_pci_nic or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)

        if not new_pci_fibre:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(nic_pci_id, "Ethernet")
        cleanup_devices(fibre_pci_id, "Fibre")
Esempio n. 26
0
def test_nic_single(test, vm, params):
    """
    Try to attach device in iommu group to vm with adding only this
    device to iommu group.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    # Add only this device to corresponding iommu group
    prepare_devices(test, pci_id, device_type, only=True)
    try:
        xmlfile = utlv.create_hostdev_xml(pci_id)
        virsh.attach_device(vm.name, xmlfile.xml,
                            flagstr="--config", debug=True,
                            ignore_status=False)
        vm.start()
        # Start successfully, but not expected.
        vm.destroy(gracefully=False)
        cleanup_devices(pci_id, device_type)
        test.fail("Start vm succesfully after attaching single "
                  "device to iommu group.Not expected.")
    except (process.CmdError, virt_vm.VMStartError) as detail:
        logging.debug("Expected:New device does not work well: %s" % detail)

    # Reattaching all devices in iommu group
    prepare_devices(test, pci_id, device_type)
    try:
        vm.start()
    except Exception as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("Start vm failed after attaching all"
                  "device to iommu group:%s" % detail)

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
Esempio n. 27
0
def test_nic_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in list(iface_list.values()):
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        test.fail("Managed option can not"
                                  " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)