예제 #1
0
파일: vfio.py 프로젝트: ldoktor/tp-libvirt
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s", before_pci_nics)
    logging.debug("Ethernet interfaces before:%s", before_interfaces)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name,
                                file_opt=xmlfile,
                                flagstr="--config",
                                debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
예제 #2
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (error.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
예제 #3
0
def test_fibre_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
예제 #4
0
def test_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(vm.name, xmlfile.xml,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)
    new_pci = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_pci:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            after_disks = vm.get_disks()
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
예제 #5
0
def test_nic_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(vm.name, xmlfile.xml,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in list(iface_list.values()):
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        test.fail("Managed option can not"
                                  " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
예제 #6
0
def test_nic_group(vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        raise error.TestNAError("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in network_list.values():
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        raise error.TestFail("Managed option can not"
                                             " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError), detail:
        cleanup_devices(pci_id, device_type)
        raise error.TestFail("New device does not work well: %s" % detail)
예제 #7
0
def test_fibre_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available disks before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added disk in vm.
    """
    pci_id = params.get("fibre_pci_id", "FIBRE:PCI.EXAMPLE")
    device_type = "Fibre"
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")
    disk_check = "yes" == params.get("fibre_pci_disk_check", "no")

    # Login vm to get disks before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_fibres = vm.get_pci_devices("Fibre")
    before_disks = vm.get_disks()
    logging.debug("Fibre PCI devices before:%s",
                  before_pci_fibres)
    logging.debug("Disks before:%s",
                  before_disks)
    vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        logging.debug("VMXML with disk boot:\n%s", virsh.dumpxml(vm.name))
        vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_fibres = vm.get_pci_devices("Fibre")
    after_disks = vm.get_disks()
    logging.debug("Fibre PCI devices after:%s",
                  after_pci_fibres)
    logging.debug("Disks after:%s",
                  after_disks)
    new_pci = "".join(list(set(before_pci_fibres) ^ set(after_pci_fibres)))
    new_disk = "".join(list(set(before_disks) ^ set(after_disks)))
    try:
        if not new_pci:
            test.fail("Cannot find attached host device in vm.")
        if disk_check:
            after_disks = vm.get_disks()
            if not new_disk:
                test.fail("Cannot find attached host device in vm.")
            # Config disk for new disk device
            format_disk(test, vm, new_disk, "10M")
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)
예제 #8
0
def test_nic_group(test, vm, params):
    """
    Try to attach device in iommu group to vm.

    1.Get original available interfaces before attaching.
    2.Attaching hostdev in iommu group to vm.
    3.Start vm and check it.
    4.Check added interface in vm.
    """
    pci_id = params.get("nic_pci_id", "ETH:PCI.EXAMPLE")
    if pci_id.count("EXAMPLE"):
        test.cancel("Invalid pci device id.")

    device_type = "Ethernet"
    nic_ip = params.get("nic_pci_ip")
    nic_mask = params.get("nic_pci_mask", "255.255.0.0")
    nic_gateway = params.get("nic_pci_gateway")
    attach_iface = "yes" == params.get("attach_iface", "no")
    vm_status = params.get("vm_status", "")
    ext_opt = params.get("attach_options", "")

    # Login vm to get interfaces before attaching pci device.
    if vm.is_dead():
        vm.start()
    before_pci_nics = vm.get_pci_devices("Ethernet")
    before_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices before:%s",
                  before_pci_nics)
    logging.debug("Ethernet interfaces before:%s",
                  before_interfaces)
    if not vm_status == "running":
        vm.destroy()

    boot_order = int(params.get("boot_order", 0))
    prepare_devices(test, pci_id, device_type)
    try:
        if boot_order:
            utlv.alter_boot_order(vm.name, pci_id, boot_order)
        elif attach_iface:
            options = "hostdev " + pci_id + " " + ext_opt
            virsh.attach_interface(vm.name, options,
                                   debug=True, ignore_status=False)
        else:
            xmlfile = utlv.create_hostdev_xml(pci_id)
            virsh.attach_device(domain_opt=vm.name, file_opt=xmlfile,
                                flagstr="--config", debug=True,
                                ignore_status=False)
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name)
        logging.debug("VMXML with disk boot:\n%s", vmxml)
        iface_list = vmxml.get_iface_all()
        for node in list(iface_list.values()):
            if node.get('type') == 'hostdev':
                if "managed" in ext_opt:
                    if not node.get('managed') == "yes":
                        test.fail("Managed option can not"
                                  " be found in domain xml")
        if not vm.is_alive():
            vm.start()
    except (process.CmdError, virt_vm.VMStartError) as detail:
        cleanup_devices(pci_id, device_type)
        test.fail("New device does not work well: %s" % detail)

    # VM shouldn't login under boot order 1
    if boot_order:
        try:
            boot_timeout = int(params.get("boot_timeout", 60))
            vm.wait_for_login(timeout=boot_timeout)
            test.fail("Login vm successfully, but not expected.")
        except remote.LoginTimeoutError:
            logging.debug("Expected failure.")
            vm.destroy(gracefully=False)
            cleanup_devices(pci_id, device_type)
            return

    # Get devices in vm again after attaching
    after_pci_nics = vm.get_pci_devices("Ethernet")
    after_interfaces = vm.get_interfaces()
    logging.debug("Ethernet PCI devices after:%s",
                  after_pci_nics)
    logging.debug("Ethernet interfaces after:%s",
                  after_interfaces)
    new_pci = "".join(list(set(before_pci_nics) ^ set(after_pci_nics)))
    new_interface = "".join(list(set(before_interfaces) ^ set(after_interfaces)))
    try:
        if not new_pci or not new_interface:
            test.fail("Cannot find attached host device in vm.")
        # Config network for new interface
        config_network(test, vm, new_interface, nic_ip, nic_mask, nic_gateway)
        # Check interface
        execute_ttcp(test, vm, params)
    finally:
        if vm.is_alive():
            vm.destroy()
        cleanup_devices(pci_id, device_type)