예제 #1
0
def run_virsh_migrate_stress(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("remote_ip")
    username = params.get("remote_user", "root")
    password = params.get("remote_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                # TODO: recover vm if start failed?
        # TODO: set ssh-autologin automatically
        do_migration(vms, src_uri, dest_uri, load_vms, stress_type,
                     migration_type, thread_timeout)
        # Check network of vms on destination
        for vm in vms:
            check_dest_vm_network(vm, remote_host, username, password, prompt)
    finally:
        for vm in vms:
            cleanup_dest(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy()
예제 #2
0
def run(test, params, env):
    """
    Test svirt in virt-clone.
    """
    VIRT_CLONE = None
    try:
        VIRT_CLONE = utils_misc.find_command("virt-clone")
    except ValueError:
        raise error.TestNAError("No virt-clone command found.")

    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_virt_clone_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_virt_clone_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_virt_clone_vm_sec_model", "selinux")
    sec_label = params.get("svirt_virt_clone_vm_sec_label", None)
    sec_relabel = params.get("svirt_virt_clone_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Get varialbles about image.
    img_label = params.get('svirt_virt_clone_disk_label')
    # Label the disks of VM with img_label.
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        backup_labels_of_disks[disk_path] = utils_selinux.get_context_of_file(
            filename=disk_path)
        utils_selinux.set_context_of_file(filename=disk_path,
                                          context=img_label)
    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the context of the VM.
    vmxml.set_seclabel([sec_dict])
    vmxml.sync()

    clone_name = ("%s-clone" % vm.name)
    try:
        cmd = ("%s --original %s --name %s --auto-clone" %
               (VIRT_CLONE, vm.name, clone_name))
        cmd_result = utils.run(cmd, ignore_status=True)
        if cmd_result.exit_status:
            raise error.TestFail("Failed to execute virt-clone command."
                                 "Detail: %s." % cmd_result)
    finally:
        # clean up
        for path, label in backup_labels_of_disks.items():
            utils_selinux.set_context_of_file(filename=path, context=label)
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if not virsh.domstate(clone_name).exit_status:
            libvirt_vm.VM(clone_name, params, None, None).remove_with_storage()
예제 #3
0
def run(test, params, env):
    """
    Test VFIO function by attaching pci device into virtual machine.

    Make sure you know that test needs to unbind default driver of
    whole iommu group to vfio-pci during test. So all the device in
    tested iommu group should be free.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    test_type = params.get("test_type")

    if vm.is_alive():
        vm.destroy()
    new_vm_name = "%s_vfiotest" % vm.name
    if not utlv.define_new_vm(vm.name, new_vm_name):
        test.fail("Define new vm failed.")

    try:
        new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                               vm.address_cache)

        if "yes" == params.get("primary_boot", "no"):
            params['boot_order'] = 1
        else:
            params['boot_order'] = 0

        testcase = globals()["test_%s" % test_type]
        testcase(test, new_vm, params)
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        cleanup_vm(new_vm.name)
예제 #4
0
    def __init__(self, test, params, env):
        self.vm = None
        self.test = test
        self.env = env
        self.params = params
        self.name = params.get('main_vm')
        self.os_version = params.get("os_version")
        self.os_type = params.get('os_type', 'linux')
        self.target = params.get('target')
        self.username = params.get('vm_user', 'root')
        self.password = params.get('vm_pwd')
        self.nic_index = params.get('nic_index', 0)
        self.export_name = params.get('export_name')
        self.delete_vm = 'yes' == params.get('vm_cleanup', 'yes')
        self.virsh_session_id = params.get("virsh_session_id")
        self.windows_root = params.get("windows_root", r"C:\WINDOWS")
        self.output_method = params.get("output_method")
        # Need create session after create the instance
        self.session = None

        if self.name is None:
            logging.error("vm name not exist")

        # libvirt is a default target
        if self.target == "libvirt" or self.target is None:
            self.vm = lvirt.VM(self.name, self.params, self.test.bindir,
                               self.env.get("address_cache"))
            self.pv = libvirt.PoolVolumeTest(test, params)
        elif self.target == "ovirt":
            self.vm = ovirt.VMManager(self.name, self.params, self.test.bindir,
                                      self.env.get("address_cache"))
        else:
            raise ValueError("Doesn't support %s target now" % self.target)
예제 #5
0
 def __init__(self, vm_name, test, params, env):
     self.vm_name = vm_name
     self.vm = libvirt_vm.VM(vm_name, params, test.bindir,
                             env.get("address_cache"))
     self.virsh_instance = None
     self.migration_cmd = None
     self.virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", 60))
     self.vm_ip = None
예제 #6
0
파일: libguestfs.py 프로젝트: ypu/virt-test
 def __init__(self, vm, params):
     self.params = params
     self.oldvm = vm
     # Many command will create a new vm or disk, init it here
     self.newvm = libvirt_vm.VM("VTNEWVM", vm.params, vm.root_dir,
                                vm.address_cache)
     # Preapre for created vm disk
     self.indisk = get_primary_disk(vm)
     self.outdisk = None
예제 #7
0
def run_virsh_migrate_stress(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if not len(vm_names):
        raise error.TestNAError("Provide your vms for migration first.")

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                 env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                        env.get("address_cache")))

    cpu = int(params.get("vm_cpu", 1))
    memory = int(params.get("vm_memory", 1048576))
    stress_type = params.get("migration_stress_type")
    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        for vm in vms:
            vm.start()
            vm.wait_for_login()
            # TODO: recover vm if start failed?
        # TODO: set ssh-autologin automatically
        do_migration(vms, dest_uri, load_vms, stress_type)
    finally:
        for vm in vms:
            cleanup_dest(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy()
예제 #8
0
def run(test, params, env):
    """
    Test multi function of vm devices.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    # To avoid dirty after starting new vm
    if vm.is_alive():
        vm.destroy()
    new_vm_name = params.get("mf_updated_new_vm")
    define_new_vm(vm_name, new_vm_name)
    # Create a new vm object for convenience
    new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    try:
        # Get parameters
        disk_count = int(params.get("mf_added_devices_count", 1))
        disk_size = params.get("mf_added_devices_size", "50M")
        status_error = "yes" == params.get("status_error", "no")
        target_list = []
        index = 0
        while len(target_list) < disk_count:
            target_dev = "vd%s" % chr(ord('a') + index)
            if not device_exists(new_vm, target_dev):
                target_list.append(target_dev)
            index += 1

        disk_params = {}
        disk_params['addr_multifunction'] = params.get("mf_addr_multifunction")
        disk_params['addr_type'] = params.get("mf_addr_type")
        # According disk count, increasing target_dev vdb->vdc->vdd...
        for target_dev in target_list:
            result = attach_additional_device(new_vm_name, disk_size,
                                              target_dev, disk_params)
            if result.exit_status:
                if status_error:
                    # Attach fail is expected.
                    # TODO: check output of fail info
                    logging.info("Failed as expected.")
                    return
                else:
                    raise error.TestFail("Attach device %s failed." %
                                         target_dev)
            else:
                if status_error:
                    raise error.TestFail("Attach %s successfully "
                                         "but not expected." % target_dev)
        logging.debug("New VM XML:\n%s", new_vm.get_xml())

        # Login to check attached devices
        for target_dev in target_list:
            check_disk(new_vm, target_dev, disk_size)
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        cleanup_vm(new_vm_name)
예제 #9
0
 def cleanup_fs():
     """
     Clean up file system by restart and shutdown normally
     """
     vm = libvirt_vm.VM(vm_name, params, test.bindir,
                        env.get('address_cache'))
     if vm.is_dead():
         vm.start()
     # Sleep 1 minute to wait for guest fully bootup
     time.sleep(60)
     vm.shutdown()
예제 #10
0
 def wrapper(*args, **kwargs):
     vm = libvirt_vm.VM(vm_name, params, test.bindir,
                        env.get('address_cache'))
     if vm.is_dead():
         LOG.info('VM is down. Starting it now.')
         vm.start()
     session = vm.wait_for_login()
     kwargs['session'] = session
     kwargs['vm'] = vm
     func(*args, **kwargs)
     if session:
         session.close()
     vm.shutdown()
예제 #11
0
 def check_boot():
     """
     Check if guest can boot up after configuration
     """
     try:
         vm = libvirt_vm.VM(vm_name, params, test.bindir,
                            env.get('address_cache'))
         if vm.is_alive():
             vm.shutdown()
         LOG.info('Booting up %s' % vm_name)
         vm.start()
         vm.wait_for_login()
         vm.shutdown()
         LOG.info('%s is down' % vm_name)
     except Exception as e:
         test.error('Bootup guest and login failed: %s' % str(e))
예제 #12
0
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("migrate_main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824

    # Set the pool target using the source of the first disk
    params["precreation_pool_target"] = os.path.dirname(file_path)

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
    remote_user = params.get("migrate_dest_user", "root")
    remote_passwd = params.get("migrate_dest_pwd")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    if vm.is_dead():
        vm.start()

    # Check if image pre-creation is supported.
    support_precreation = False
    try:
        if qemu_test("drive-mirror") and qemu_test("nbd-server"):
            support_precreation = True
    except exceptions.TestError, e:
        logging.debug(e)
예제 #13
0
def run(test, params, env):
    """
    DiskXML has an attribute named discard for fstrim operations.
    (Only supported after special libvirt version.)
    These are test cases for it:
    """
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    if vm.is_dead():
        vm.start()
        vm.wait_for_login()
    bf_disks = get_vm_disks(vm)
    vm.destroy()

    # Create a new vm for test, undefine it at last
    new_vm_name = "%s_discardtest" % vm.name
    if not utlv.define_new_vm(vm.name, new_vm_name):
        raise error.TestError("Define new vm failed.")
    try:
        new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
    except Exception, detail:
        raise error.TestError("Create new vm failed:%s" % detail)
예제 #14
0
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824

    remote_host = params.get("remote_ip", "REMOTE.EXAMPLE")
    local_host = params.get("local_ip", "LOCAL.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("remote_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    if vm.is_dead():
        vm.start()

    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")

    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        added_disks_list = []
        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated_iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                  emulated_image="emulated_iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                raise error.TestError("Create VG %s on %s failed."
                                      % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = all_disks.keys()
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if not abnormal_type == "not_exist_file":
            for disk, size in all_disks.items():
                if disk == file_path:
                    rdm.create_image("file", disk, size, None, None)
                else:
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk))

        fail_flag = False
        try:
            logging.debug("Start migration...")
            copied_migration(vms, params)
            if migrate_again:
                fail_flag = True
                raise error.TestFail("Migration succeed, but not expected!")
            else:
                return
        except error.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in all_disks.items():
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None, None)
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            copied_migration(vms, params)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            utlv.delete_local_disk(disk_type, disk)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated_iscsi2")
예제 #15
0
    vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
    first_nic = vmxml.get_devices(device_type="interface")[0]
    clean_clone_vm()

    # Clone guest to guest_clone
    dargs = {}
    dargs['files'] = [clone_image]
    dargs['ignore_status'] = True
    clone_result = lgf.virt_clone_cmd(vm_name, newname=vm_clone_name, **dargs)
    if clone_result.exit_status:
        raise error.TestFail("virt-clone failed:%s" %
                             clone_result.stderr.strip())
    try:
        # Modify network to make sure the clone guest can be logging.
        modify_network(vm_clone_name, first_nic)
        new_vm = libvirt_vm.VM(vm_clone_name, params, vm.root_dir,
                               vm.address_cache)
        prepare_action(new_vm)
        test_image = clone_image

        if sysprep_type == "resize":
            img_size = image_info_dict['vsize'] / 1024 / 1024 / 1024
            resize_image = "%s_resize.img" % clone_image
            utils.run("qemu-img create -f raw %s %dG" % (resize_image,
                                                         (img_size + 1)))
            lgf.virt_resize_cmd(clone_image,
                                resize_image,
                                timeout=600,
                                debug=True)
            modify_source(vm_clone_name, target, resize_image)
            test_image = resize_image
        elif sysprep_type == "sparsify":
예제 #16
0
def run(test, params, env):
    """
    1. prepare a fc lun with one of following methods
        - create a scsi pool&vol
        - create a vhba
    2. prepare the virtual disk xml, as one of following
        - source = /dev/disk/by-path
        - source = /dev/mapper/mpathX
        - source = pool&vol format
    3. start a vm with above disk as vdb
    4. create disk-only snapshot of vdb
    5. check the snapshot-list and snapshot file's existence
    6. mount vdb and touch file to it
    7. revert the snapshot and check file's existence
    8. delete snapshot
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "file")
    disk_size = params.get("disk_size", "100M")
    device_target = params.get("device_target", "vdb")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    vd_format = params.get("vd_format", "")
    snapshot_dir = params.get("snapshot_dir", "/tmp")
    snapshot_name = params.get("snapshot_name", "s1")
    pool_name = params.get("pool_name", "")
    pool_target = params.get("pool_target", "/dev")
    snapshot_disk_only = "yes" == params.get("snapshot_disk_only", "no")
    new_vhbas = []
    current_vhbas = []
    new_vhba = []
    path_to_blk = ""
    lun_sl = []
    new_disk = ""
    pool_ins = None
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    vm = env.get_vm(vm_name)
    online_hbas = utils_npiv.find_hbas("hba")
    if not online_hbas:
        raise exceptions.TestSkipError("There is no online hba cards.")
    old_mpath_conf = utils_npiv.prepare_multipath_conf(
        conf_path=mpath_conf_path, replace_existing=True)
    first_online_hba = online_hbas[0]
    old_vhbas = utils_npiv.find_hbas("vhba")
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    virt_vm = libvirt_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    old_disks = virt_vm.get_disks()

    if vm.is_alive():
        vm.destroy(gracefully=False)
    if pool_name:
        pool_ins = libvirt_storage.StoragePool()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    try:
        # prepare a fc lun
        if vd_format in ['scsi_vol']:
            if pool_ins.pool_exists(pool_name):
                raise exceptions.TestFail("Pool %s already exist" % pool_name)
            prepare_scsi_pool(pool_name, wwnn, wwpn, first_online_hba,
                              pool_target)
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT)
            if not utils_npiv.is_vhbas_added(old_vhbas):
                raise exceptions.TestFail("vHBA not successfully created")
            current_vhbas = utils_npiv.find_hbas("vhba")
            new_vhba = list(set(current_vhbas).difference(set(old_vhbas)))[0]
            new_vhbas.append(new_vhba)
            new_vhba_scsibus = re.sub("\D", "", new_vhba)
            utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                timeout=_TIMEOUT)
            new_blks = get_blks_by_scsi(new_vhba_scsibus)
            if not new_blks:
                raise exceptions.TestFail(
                    "block device not found with scsi_%s", new_vhba_scsibus)
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            path_to_blk = list(vol_list.values())[0]
        elif vd_format in ['mpath', 'by_path']:
            old_mpath_devs = utils_npiv.find_mpath_devs()
            new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": first_online_hba,
                "scsi_wwnn": wwnn,
                "scsi_wwpn": wwpn
            })
            utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                                timeout=_TIMEOUT * 2)
            if not new_vhba:
                raise exceptions.TestFail("vHBA not successfully generated.")
            new_vhbas.append(new_vhba)
            if vd_format == "mpath":
                utils_misc.wait_for(
                    lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
                    timeout=_TIMEOUT * 5)
                if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
                    raise exceptions.TestFail("mpath dev not generated.")
                cur_mpath_devs = utils_npiv.find_mpath_devs()
                new_mpath_devs = list(
                    set(cur_mpath_devs).difference(set(old_mpath_devs)))
                logging.debug("The newly added mpath dev is: %s",
                              new_mpath_devs)
                path_to_blk = "/dev/mapper/" + new_mpath_devs[0]
            elif vd_format == "by_path":
                new_vhba_scsibus = re.sub("\D", "", new_vhba)
                utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                                    timeout=_TIMEOUT)
                new_blks = get_blks_by_scsi(new_vhba_scsibus)
                if not new_blks:
                    raise exceptions.TestFail("blk dev not found with scsi_%s",
                                              new_vhba_scsibus)
                first_blk_dev = new_blks[0]
                utils_misc.wait_for(lambda: get_symbols_by_blk(first_blk_dev),
                                    timeout=_TIMEOUT)
                lun_sl = get_symbols_by_blk(first_blk_dev)
                if not lun_sl:
                    raise exceptions.TestFail(
                        "lun symbolic links not found in "
                        "/dev/disk/by-path/ for %s" % first_blk_dev)
                lun_dev = lun_sl[0]
                path_to_blk = os.path.join(_BY_PATH_DIR, lun_dev)
            else:
                pass
        else:
            raise exceptions.TestSkipError("Not provided how to pass"
                                           "virtual disk to VM.")

        # create qcow2 file on the block device with specified size
        if path_to_blk:
            cmd = "qemu-img create -f qcow2 %s %s" % (path_to_blk, disk_size)
            try:
                process.run(cmd, shell=True)
            except process.cmdError as detail:
                raise exceptions.TestFail(
                    "Fail to create qcow2 on blk dev: %s", detail)
        else:
            raise exceptions.TestFail("Don't have a valid path to blk dev.")

        # prepare disk xml
        if "vol" in vd_format:
            vol_list = utlv.get_vol_list(pool_name,
                                         vol_check=True,
                                         timeout=_TIMEOUT * 3)
            test_vol = list(vol_list.keys())[0]
            disk_params = {
                'type_name': disk_type,
                'target_dev': device_target,
                'target_bus': target_bus,
                'source_pool': pool_name,
                'source_volume': test_vol,
                'driver_type': driver_type
            }
        else:
            disk_params = {
                'type_name': disk_type,
                'device': disk_device,
                'driver_name': driver_name,
                'driver_type': driver_type,
                'source_file': path_to_blk,
                'target_dev': device_target,
                'target_bus': target_bus
            }
        if vm.is_alive():
            vm.destroy(gracefully=False)
        new_disk = disk.Disk()
        new_disk.xml = open(utlv.create_disk_xml(disk_params)).read()

        # start vm with the virtual disk
        vmxml.devices = vmxml.devices.append(new_disk)
        vmxml.sync()
        vm.start()
        session = vm.wait_for_login()
        cur_disks = virt_vm.get_disks()
        mount_disk = "".join(list(set(old_disks) ^ set(cur_disks)))

        # mkfs and mount disk in vm, create a file on that disk.
        if not mount_disk:
            logging.debug("old_disk: %s, new_disk: %s", old_disks, cur_disks)
            raise exceptions.TestFail("No new disk found in vm.")
        mkfs_and_mount(session, mount_disk)
        create_file_in_vm(session, "/mnt/before_snapshot.txt", "before")

        # virsh snapshot-create-as vm s --disk-only --diskspec vda,file=path
        if snapshot_disk_only:
            vm_blks = list(vm.get_disk_devices().keys())
            options = "%s --disk-only" % snapshot_name
            for vm_blk in vm_blks:
                snapshot_file = snapshot_dir + "/" + vm_blk + "." + snapshot_name
                if os.path.exists(snapshot_file):
                    os.remove(snapshot_file)
                options = options + " --diskspec %s,file=%s" % (vm_blk,
                                                                snapshot_file)
        else:
            options = snapshot_name
        utlv.check_exit_status(virsh.snapshot_create_as(vm_name, options))

        # check virsh snapshot-list
        logging.debug("Running: snapshot-list %s", vm_name)
        snapshot_list = virsh.snapshot_list(vm_name)
        logging.debug("snapshot list is: %s", snapshot_list)
        if not snapshot_list:
            raise exceptions.TestFail("snapshots not found after creation.")

        # snapshot-revert doesn't support external snapshot for now. so
        # only check this with internal snapshot.
        if not snapshot_disk_only:
            create_file_in_vm(session, "/mnt/after_snapshot.txt", "after")
            logging.debug("Running: snapshot-revert %s %s", vm_name,
                          snapshot_name)
            utlv.check_exit_status(
                virsh.snapshot_revert(vm_name, snapshot_name))
            session = vm.wait_for_login()
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/after_snapshot.txt")
            logging.debug("file exist = %s, file content = %s", file_existence,
                          file_content)
            if file_existence:
                raise exceptions.TestFail("The file created "
                                          "after snapshot still exists.")
            file_existence, file_content = get_file_in_vm(
                session, "/mnt/before_snapshot.txt")
            logging.debug("file eixst = %s, file content = %s", file_existence,
                          file_content)
            if ((not file_existence) or (file_content.strip() != "before")):
                raise exceptions.TestFail("The file created "
                                          "before snapshot is lost.")
        # delete snapshots
        # if diskonly, delete --metadata and remove files
        # if not diskonly, delete snapshot
        if snapshot_disk_only:
            options = "--metadata"
        else:
            options = ""
        for snap in snapshot_list:
            logging.debug("deleting snapshot %s with options %s", snap,
                          options)
            result = virsh.snapshot_delete(vm_name, snap, options)
            logging.debug("result of snapshot-delete: %s",
                          result.stdout.strip())
            if snapshot_disk_only:
                vm_blks = list(vm.get_disk_devices().keys())
                for vm_blk in vm_blks:
                    snapshot_file = snapshot_dir + "/" + vm_blk + "." + snap
                    if os.path.exists(snapshot_file):
                        os.remove(snapshot_file)
        snapshot_list = virsh.snapshot_list(vm_name)
        if snapshot_list:
            raise exceptions.TestFail("Snapshot not deleted: %s",
                                      snapshot_list)
    except Exception as detail:
        raise exceptions.TestFail("exception happens: %s", detail)
    finally:
        logging.debug("Start to clean up env...")
        vmxml_backup.sync()
        if pool_ins and pool_ins.pool_exists(pool_name):
            virsh.pool_destroy(pool_name)
        for new_vhba in new_vhbas:
            virsh.nodedev_destroy(new_vhba)
        utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
예제 #17
0
def run(test, params, env):
    """
    Convert a local vm disk to local libvirt(KVM).
    """
    # VM info
    vm_name = params.get("v2v_vm")

    # Remote host parameters
    remote_hostname = params.get("remote_hostname")
    username = params.get("remote_username", "root")
    password = params.get("remote_passwd")
    remote_hypervisor = params.get("remote_hypervisor")

    # Local pool parameters
    pool_type = params.get("pool_type", "dir")
    block_device = params.get("block_device", "/dev/BLOCK/EXAMPLE")
    if pool_type in ['disk', 'partition', 'lvm'] and \
            re.search("EXAMPLE", block_device):
        raise error.TestNAError("Please set correct block device.")
    pool_name = params.get("pool_name", "v2v_test")
    target_path = params.get("target_path", "pool_path")
    vg_name = params.get("volume_group_name", "vg_v2v")
    local_tmp_path = params.get("local_tmp_path", data_dir.get_tmp_dir())
    # If target_path is not an abs path, join it to data_dir.TMPDIR
    if os.path.dirname(target_path) is "":
        target_path = os.path.join(data_dir.get_tmp_dir(), target_path)

    # dir pool need an exist path
    if pool_type == "dir":
        if not os.path.exists(target_path):
            os.mkdir(target_path)

    # V2V parameters
    input = params.get("input_method")
    files = params.get("config_files")
    network = params.get("network", "default")

    # Result check about
    ignore_virtio = "yes" == params.get("ignore_virtio", "no")

    # Create remote uri for remote host
    # Remote virt-v2v uri's instance
    ruri = utils_v2v.Uri(remote_hypervisor)
    remote_uri = ruri.get_uri(remote_hostname)

    ssh_key.setup_ssh_key(remote_hostname,
                          user=username,
                          port=22,
                          password=password)

    # Check remote vms
    remote_vm = libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache"))
    remote_vm.connect_uri = remote_uri
    if not remote_vm.exists():
        raise error.TestFail("Couldn't find vm '%s' to be converted "
                             "on remote uri '%s'." % (vm_name, remote_uri))

    # Copy remote vm's disk to local and create xml file for it
    tmp_xml_file = copy_remote_vm(remote_vm, local_tmp_path, remote_hostname,
                                  username, password)

    # Local storage pool's instance
    lsp = libvirt_storage.StoragePool()
    try:
        # Create storage pool for test
        if pool_type == "dir":
            if not create_dir_pool(lsp, pool_name, target_path):
                raise error.TestFail("Prepare directory storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "partition":
            if not create_partition_pool(lsp, pool_name, block_device,
                                         target_path):
                raise error.TestFail("Prepare partition storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "lvm":
            if not create_lvm_pool(lsp, pool_name, block_device, vg_name,
                                   target_path):
                raise error.TestFail("Prepare lvm storage pool for "
                                     "virt-v2v failed.")
        elif pool_type == "disk":
            if not create_disk_pool(lsp, pool_name, block_device, target_path):
                raise error.TestFail("Prepare disk storage pool for "
                                     "virt-v2v failed.")

        # Maintain a single params for v2v to avoid duplicate parameters
        v2v_params = {
            "hostname": remote_hostname,
            "username": username,
            "password": password,
            "hypervisor": remote_hypervisor,
            "storage": pool_name,
            "network": network,
            "target": "libvirtxml",
            "vms": tmp_xml_file,
            "input": input,
            "files": files
        }
        try:
            result = utils_v2v.v2v_cmd(v2v_params)
            logging.debug(result)
        except error.CmdError, detail:
            raise error.TestFail("Virt v2v failed:\n%s" % str(detail))

        # v2v may be successful, but devices' driver may be not virtio
        error_info = []
        # Check v2v vm on local host
        # Update parameters for local hypervisor and vm
        params['vms'] = vm_name
        params['target'] = "libvirt"
        vm_check = utils_v2v.LinuxVMCheck(test, params, env)
        try:
            if not vm_check.is_disk_virtio():
                error_info.append("Error:disk type was not converted to "
                                  "virtio.")
            if not vm_check.is_net_virtio():
                error_info.append("Error:nic type was not converted to "
                                  "virtio.")
        except (remote.LoginError, virt_vm.VMError), detail:
            error_info.append(str(detail))
예제 #18
0
def run(test, params, env):
    """
    Test command: virsh pool-define-as; pool-build; pool-start; vol-create-as;
    vol-list; attach-device; login; mount and dd; reboot; check persistence;
    detach-device; pool-destroy; pool-undefine; clear lv,vg and pv;
    Create a libvirt npiv pool from a vHBA's device mapper device and create
    a volume out of the newly created pool and attach it to a guest, mount it,
    reboot and check persistence after reboot.

    Pre-requisite :
    Host should have a vHBA associated with a mpath device
    """

    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pool_type = params.get("pool_type", "dir")
    scsi_wwpn = params.get("scsi_wwpn", "WWPN_EXAMPLE")
    scsi_wwnn = params.get("scsi_wwnn", "WWNN_EXAMPLE")
    pool_target = params.get("pool_target", "pool_target")
    target_device = params.get("disk_target_dev", "vda")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    frmt = params.get("volume_format", 'qcow2')
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    mount_disk = None
    test_unit = None

    if 'EXAMPLE' in scsi_wwnn or 'EXAMPLE' in scsi_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    if not vm.is_alive():
        vm.start()
    pool_extra_args = ""
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)
    process.run("service multipathd restart", shell=True)
    online_hbas_list = nodedev.find_hbas("hba")
    first_online_hba = online_hbas_list[0]
    old_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the old mpath devs are: %s" % old_mpath_devs)
    new_vhbas = nodedev.nodedev_create_from_xml({
        "nodedev_parent": first_online_hba,
        "scsi_wwnn": scsi_wwnn,
        "scsi_wwpn": scsi_wwpn
    })
    logging.info("Newly created vHBA %s" % new_vhbas)
    process.run("service multipathd restart", shell=True)

    utils_misc.wait_for(lambda: nodedev.is_mpath_devs_added(old_mpath_devs),
                        timeout=5)

    cur_mpath_devs = nodedev.find_mpath_devs()
    logging.debug("the current mpath devs are: %s" % cur_mpath_devs)
    new_mpath_devs = list(set(cur_mpath_devs).difference(set(old_mpath_devs)))

    logging.debug("newly added mpath devs are: %s" % new_mpath_devs)
    if not new_mpath_devs:
        raise exceptions.TestFail("No newly added mpath devices found, \
                please check your FC settings")
    source_dev = os.path.join('/dev/mapper/', new_mpath_devs[0])
    logging.debug("We are going to use \"%s\" as our source device"
                  " to create a logical pool" % source_dev)

    cmd = "parted %s mklabel msdos -s" % source_dev
    cmd_result = process.run(cmd, shell=True)
    utlv.check_exit_status(cmd_result)

    if source_dev:
        pool_extra_args = ' --source-dev %s' % source_dev
    else:
        raise exceptions.TestFail(
            "The vHBA %s does not have any associated mpath device" %
            new_vhbas)

    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    # if no online hba cards on host, mark case failed
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    try:
        cmd_result = virsh.pool_define_as(pool_name,
                                          pool_type,
                                          pool_target,
                                          pool_extra_args,
                                          ignore_status=True,
                                          debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_build(pool_name)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)

        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        cmd_result = virsh.vol_create_as(volume_name,
                                         pool_name,
                                         volume_capacity,
                                         allocation,
                                         frmt,
                                         "",
                                         debug=True)
        utlv.check_exit_status(cmd_result)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        logging.debug('Volume list %s', vol_list)
        for unit in vol_list:
            test_unit = vol_list[unit]
            logging.debug(unit)

        disk_params = {
            'type_name': "file",
            'target_dev': target_device,
            'target_bus': "virtio",
            'source_file': test_unit,
            'driver_name': "qemu",
            'driver_type': "raw"
        }
        disk_xml = utlv.create_disk_xml(disk_params)
        session = vm.wait_for_login()

        bf_disks = libvirt_vm.get_disks()

        attach_success = virsh.attach_device(vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()

        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        output = session.cmd_status_output('lsblk', timeout=15)
        logging.debug("%s", output[1])

        session.cmd_status_output('mkfs.ext4 %s' % mount_disk)
        if mount_disk:
            logging.info("%s", mount_disk)
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Can not find mounted device")
        session.close()

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("Mount output: %s", output[1])
        if '/mnt' in output[1]:
            logging.debug("Mount Successful accross reboot")
        session.close()

        status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(status)

    finally:
        vm.destroy(gracefully=False)
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if test_unit:
            process.system('lvremove -f %s' % test_unit, verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            nodedev.vhbas_cleanup(new_vhbas.split())
        process.run("service multipathd restart", shell=True)
예제 #19
0
def run(test, params, env):
    """
    Test qemu-monitor-command blockjobs by migrating with option
    --copy-storage-all or --copy-storage-inc.
    """
    if not libvirt_version.version_compare(1, 0, 1):
        raise error.TestNAError("Blockjob functions - "
                                "complete,pause,resume are"
                                "not supported in current libvirt version.")

    vm = env.get_vm(params.get("main_vm"))
    cpu_size = int(params.get("cpu_size", "1"))
    memory_size = int(params.get("memory_size", "1048576"))
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) / 1073741824
    image_format = utils_test.get_image_info(file_path)["format"]

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    remote_user = params.get("remote_user", "root")
    remote_passwd = params.get("migrate_dest_pwd", "PASSWORD.EXAMPLE")
    if remote_host.count("EXAMPLE"):
        raise error.TestNAError("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Define a new vm with modified cpu/memory
    new_vm_name = "%s_blockjob" % vm.name
    if vm.is_alive():
        vm.destroy()
    utlv.define_new_vm(vm.name, new_vm_name)
    try:
        set_cpu_memory(new_vm_name, cpu_size, memory_size)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    except:  # Make sure created vm is cleaned up
        virsh.remove_domain(new_vm_name)
        raise

    rdm_params = {
        "remote_ip": remote_host,
        "remote_user": remote_user,
        "remote_pwd": remote_passwd
    }
    rdm = utils_test.RemoteDiskManager(rdm_params)

    try:
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
        vm.start()

        rdm.create_image("file",
                         file_path,
                         file_size,
                         None,
                         None,
                         img_frmt=image_format)

        logging.debug("Start migration...")
        copied_migration(vm, params, params.get("qmp_blockjob_type"),
                         primary_target)
    finally:
        # Recover created vm
        if vm.is_alive():
            vm.destroy()
        if vm.name == new_vm_name:
            vm.undefine()
        rdm.remove_path("file", file_path)
        rdm.runner.session.close()
예제 #20
0
def run(test, params, env):
    """
    1. prepare a vHBA
    2. find the nodedev's lun name
    3. prepare the lun dev's xml
    4. start vm
    5. attach disk xml to vm
    6. login vm and check the disk
    7. detach the virtual disk
    8. check the blkdev gone
    9. cleanup env.
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    wwpn = params.get("wwpn", "WWPN_EXAMPLE")
    wwnn = params.get("wwnn", "WWNN_EXAMPLE")
    disk_device = params.get("disk_device", "disk")
    device_type = params.get("device_type", "file")
    device_target = params.get("device_target", "vdb")
    lun_dir_method = params.get("lun_dir_method", "by-path")
    driver_name = params.get("driver_name", "qemu")
    driver_type = params.get("driver_type", "raw")
    target_bus = params.get("target_bus", "virtio")
    readonly = params.get("readonly", "no")
    new_vhbas = []
    blk_dev = ""
    lun_dev = ""
    lun_dev_path = ""
    lun_sl = []
    new_disk = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)
    vm = env.get_vm(vm_name)
    try:
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        old_disk_count = vmxml.get_disk_count(vm_name)
        # Prepare vHBA
        online_hbas = utils_npiv.find_hbas("hba")
        old_vhbas = utils_npiv.find_hbas("vhba")
        if not online_hbas:
            raise exceptions.TestSkipError("Host doesn't have online hba!")
        old_mpath_conf = utils_npiv.prepare_multipath_conf(
            conf_path=mpath_conf_path, replace_existing=True)
        first_online_hba = online_hbas[0]
        new_vhba = utils_npiv.nodedev_create_from_xml({
            "nodedev_parent": first_online_hba,
            "scsi_wwnn": wwnn,
            "scsi_wwpn": wwpn
        })
        utils_misc.wait_for(lambda: utils_npiv.is_vhbas_added(old_vhbas),
                            timeout=_TIMEOUT)
        if not utils_npiv.is_vhbas_added(old_vhbas):
            raise exceptions.TestFail("vHBA is not successfully created.")
        new_vhbas.append(new_vhba)
        new_vhba_scsibus = re.sub("\D", "", new_vhba)
        # Get the new block device generated by the new vHBA
        utils_misc.wait_for(lambda: get_blks_by_scsi(new_vhba_scsibus),
                            timeout=_TIMEOUT)
        blk_devs = get_blks_by_scsi(new_vhba_scsibus)
        if not blk_devs:
            raise exceptions.TestFail("block device not found with scsi_%s",
                                      new_vhba_scsibus)
        first_blk_dev = blk_devs[0]
        # Get the symbolic link of the device in /dev/disk/by-[path|uuid|id]
        logging.debug("first_blk_dev = %s, lun_dir_method = %s" %
                      (first_blk_dev, lun_dir_method))
        utils_misc.wait_for(
            lambda: get_symbols_by_blk(first_blk_dev, lun_dir_method),
            timeout=_TIMEOUT)
        lun_sl = get_symbols_by_blk(first_blk_dev, lun_dir_method)
        if not lun_sl:
            raise exceptions.TestFail("lun symbolic links not found under "
                                      "/dev/disk/%s/ for block device %s." %
                                      (lun_dir_method, blk_dev))
        lun_dev = lun_sl[0]
        lun_dev_path = "/dev/disk/" + lun_dir_method + "/" + lun_dev
        # Prepare xml of virtual disk
        disk_params = {
            'type_name': device_type,
            'device': disk_device,
            'driver_name': driver_name,
            'driver_type': driver_type,
            'source_file': lun_dev_path,
            'target_dev': device_target,
            'target_bus': target_bus,
            'readonly': readonly
        }
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        if not vm.is_alive():
            vm.start()
        session = vm.wait_for_login()
        libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
        old_disks = libvirt_vm.get_disks()
        # Attach disk
        dev_attach_status = virsh.attach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_attach_status)

        cur_disk_count = vmxml.get_disk_count(vm_name)
        cur_disks = libvirt_vm.get_disks()
        if cur_disk_count <= old_disk_count:
            raise exceptions.TestFail("Failed to attach disk: %s" %
                                      lun_disk_xml)
        new_disk = "".join(list(set(old_disks) ^ set(cur_disks)))
        logging.debug("Attached device in vm:%s", new_disk)
        # Check disk in VM
        output = session.cmd_status_output('mkfs.ext4 -F %s' % new_disk)
        logging.debug("mkfs.ext4 the disk in vm, result: %s", output[1])
        if not check_vm_disk(session, new_disk, readonly):
            raise exceptions.TestFail("Failed check the disk in vm.")
        session.cmd_status_output('umount %s' % new_disk)
        # Detach disk
        dev_detach_status = virsh.detach_device(vm_name, disk_xml, debug=True)
        utlv.check_exit_status(dev_detach_status)
        cur_disks = libvirt_vm.get_disks()
        if cur_disks != old_disks:
            raise exceptions.TestFail("Detach disk failed.")
        session.close()

    finally:
        utils_npiv.vhbas_cleanup(new_vhbas)
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync()
        process.system('service multipathd restart', verbose=True)
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
예제 #21
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
예제 #22
0
def run(test, params, env):
    """
    Test command: virsh rename.

    The command can rename a domain.
    1.Prepare test environment.
    2.Perform virsh rename operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    # Get specific parameter value
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    domuuid = vm.get_uuid()
    vm_ref = params.get("domrename_vm_ref", "name")
    status_error = "yes" == params.get("status_error", "no")
    new_name = params.get("vm_new_name", "new")
    pre_vm_state = params.get("domrename_vm_state", "shutoff")
    domain_option = params.get("dom_opt", "")
    new_name_option = params.get("newname_opt", "")
    add_vm = "yes" == params.get("add_vm", "no")

    # Replace the varaiables
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    if new_name == "vm2_name":
        vm2_name = ("%s" % vm_name[:-1]) + "2"
        new_name = vm2_name

    # Build input params
    dom_param = ' '.join([domain_option, vm_ref])
    new_name_param = ' '.join([new_name_option, new_name])

    if vm.is_alive():
        vm.destroy()

    # Backup for recovery.
    vmxml_backup = libvirt_xml.vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    logging.debug("vm xml is %s", vmxml_backup)

    # Clone additional vms if needed
    if add_vm:
        try:
            utils_path.find_command("virt-clone")
        except utils_path.CmdNotFoundError:
            if not utils_package.package_install(["virt-install"]):
                test.cancel("Failed to install virt-install on host")
        ret_clone = utils_libguestfs.virt_clone_cmd(vm_name,
                                                    vm2_name,
                                                    True,
                                                    timeout=360)
        if ret_clone.exit_status:
            test.fail("Error occured when clone a second vm!")
        vm2 = libvirt_vm.VM(vm2_name, vm.params, vm.root_dir, vm.address_cache)
        virsh.dom_list("--name --all", debug=True)

    # Create object instance for renamed domain
    new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)

    # Prepare vm state
    if pre_vm_state not in ["shutoff", "autostart"]:
        vm.start()
        if pre_vm_state == "paused":
            vm.pause()
            logging.debug("Domain state is now: %s", vm.state())
        elif pre_vm_state == "managed_saved":
            vm.managedsave()
        elif pre_vm_state == "with_snapshot":
            virsh.snapshot_create_as(vm_name, "snap1 --disk-only", debug=True)
            vm.destroy(gracefully=False)

    try:
        if pre_vm_state == "autostart":
            virsh.autostart(dom_param, "", debug=True)
            virsh.dom_list("--all --autostart", debug=True)
            logging.debug("files under '/etc/libvirt/qemu/autostart/' are %s",
                          os.listdir('/etc/libvirt/qemu/autostart/'))

        result = virsh.domrename(dom_param,
                                 new_name_param,
                                 ignore_status=True,
                                 debug=True)

        # Raise unexpected pass or fail
        libvirt.check_exit_status(result, status_error)

        # Return expected failure for negative tests
        if status_error:
            logging.debug("Expected failure: %s", result.stderr)
            return

        # Checkpoints after domrename succeed
        else:
            list_ret = virsh.dom_list("--name --all",
                                      debug=True).stdout.strip().splitlines()
            domname_ret = virsh.domname(domuuid, debug=True).stdout.strip()
            if new_name not in list_ret or vm_name in list_ret:
                test.fail("New name does not affect in virsh list")
            if domname_ret != new_name:
                test.fail(
                    "New domain name does not affect in virsh domname uuid")

            if pre_vm_state != "autostart":
                # Try to start vm with the new name
                new_vm.start()
            else:
                utils_libvirtd.libvirtd_restart()
                list_autostart = virsh.dom_list("--autostart",
                                                debug=True).stdout
                logging.debug(
                    "files under '/etc/libvirt/qemu/autostart/' are %s",
                    os.listdir('/etc/libvirt/qemu/autostart/'))
                process.run("file /etc/libvirt/qemu/autostart/%s.xml" %
                            vm_name,
                            verbose=True)
                if new_name not in list_autostart:
                    test.fail(
                        "Domain isn't autostarted after restart libvirtd,"
                        "or becomes a never 'autostart' one.")

    finally:
        # Remove additional vms
        if add_vm and vm2.exists() and result.exit_status:
            virsh.remove_domain(vm2_name, "--remove-all-storage")

        # Undefine newly renamed domain
        if new_vm.exists():
            if new_vm.is_alive():
                new_vm.destroy(gracefully=False)
            new_vm.undefine()

        # Recover domain state
        if pre_vm_state != "shutoff":
            if pre_vm_state == "with_snapshot":
                libvirt.clean_up_snapshots(vm_name)
            else:
                if pre_vm_state == "managed_saved":
                    vm.start()
                vm.destroy(gracefully=False)

        # Restore VM
        vmxml_backup.sync()
def run(test, params, env):
    """
    Test migration with option --copy-storage-all or --copy-storage-inc.
    """
    vm = env.get_vm(params.get("migrate_main_vm"))
    disk_type = params.get("copy_storage_type", "file")
    if disk_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "lvm"
    cp_mig = None
    primary_target = vm.get_first_disk_devices()["target"]
    file_path, file_size = vm.get_device_size(primary_target)
    # Convert to Gib
    file_size = int(file_size) // 1073741824

    # Set the pool target using the source of the first disk
    params["precreation_pool_target"] = os.path.dirname(file_path)

    remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
    local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
    remote_user = params.get("migrate_dest_user", "root")
    remote_passwd = params.get("migrate_dest_pwd")
    if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
        test.cancel("Config remote or local host first.")
    # Config ssh autologin for it
    ssh_key.setup_ssh_key(remote_host, remote_user, remote_passwd, port=22)

    # Attach additional disks to vm if disk count big than 1
    disks_count = int(params.get("added_disks_count", 1)) - 1
    if disks_count:
        new_vm_name = "%s_smtest" % vm.name
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    vms = [vm]
    vms_ip = {}
    for vm in vms:
        if vm.is_dead():
            vm.start()
        vm.wait_for_login().close()
        vms_ip[vm.name] = vm.get_address()
    # Check if image pre-creation is supported.
    support_precreation = False
    try:
        if qemu_test("drive-mirror") and qemu_test("nbd-server"):
            support_precreation = True
    except exceptions.TestError as e:
        logging.debug(e)
    params["support_precreation"] = support_precreation
    # Abnormal parameters
    migrate_again = "yes" == params.get("migrate_again", "no")
    abnormal_type = params.get("abnormal_type")
    added_disks_list = []
    rdm = None
    src_libvirt_file = None
    try:
        rdm = utils_test.RemoteDiskManager(params)
        vgname = params.get("sm_vg_name", "SMTEST")
        pool_created = False

        if disk_type == "lvm":
            target1 = target2 = ""  # For cleanup
            # Create volume group with iscsi
            # For local, target is a device name
            target1 = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=True,
                                                  emulated_image="emulated-iscsi1")
            lv_utils.vg_create(vgname, target1)
            logging.debug("Created VG %s", vgname)
            # For remote, target is real target name
            target2, _ = utlv.setup_or_cleanup_iscsi(is_setup=True, is_login=False,
                                                     emulated_image="emulated-iscsi2")
            logging.debug("Created target: %s", target2)
            # Login on remote host
            remote_device = rdm.iscsi_login_setup(local_host, target2)
            if not rdm.create_vg(vgname, remote_device):
                test.error("Create VG %s on %s failed."
                           % (vgname, remote_host))

        all_disks = utlv.attach_disks(vm, file_path, vgname, params)
        # Reserve for cleanup
        added_disks_list = list(all_disks.keys())
        all_disks[file_path] = file_size
        logging.debug("All disks need to be migrated:%s", all_disks)

        if abnormal_type == "occupied_disk":
            occupied_path = rdm.occupy_space(disk_type, file_size,
                                             file_path, vgname, timeout=600)
        if abnormal_type != "not_exist_file":
            for disk, size in list(all_disks.items()):
                if disk == file_path:
                    if support_precreation:
                        pool_created = create_destroy_pool_on_remote(test, "create",
                                                                     params)
                        if not pool_created:
                            test.error("Create pool on remote " +
                                       "host '%s' failed."
                                       % remote_host)
                    else:
                        rdm.create_image("file", disk, size, None,
                                         None, img_frmt='qcow2')
                else:
                    sparse = False if disk_type == 'lvm' else True
                    rdm.create_image(disk_type, disk, size, vgname,
                                     os.path.basename(disk),
                                     sparse=sparse, timeout=120)

        fail_flag = False
        remove_dict = {
            "do_search": '{"%s": "ssh:/"}' % params.get("migrate_dest_uri")}
        src_libvirt_file = libvirt_config.remove_key_for_modular_daemon(
            remove_dict)
        try:
            logging.debug("Start migration...")
            cp_mig = copied_migration(test, vms, vms_ip, params)
            # Check the new disk can be working well with I/O after migration
            utils_disk.check_remote_vm_disks({'server_ip': remote_host,
                                              'server_user': remote_user,
                                              'server_pwd': remote_passwd,
                                              'vm_ip': vms_ip[vm.name],
                                              'vm_pwd': params.get('password')})

            if migrate_again:
                fail_flag = True
                test.fail("Migration succeed, but not expected!")
            else:
                return
        except exceptions.TestFail:
            if not migrate_again:
                raise

            if abnormal_type == "occupied_disk":
                rdm.remove_path(disk_type, occupied_path)
            elif abnormal_type == "not_exist_file":
                for disk, size in list(all_disks.items()):
                    if disk == file_path:
                        rdm.create_image("file", disk, size, None,
                                         None, img_frmt='qcow2')
                    else:
                        rdm.create_image(disk_type, disk, size, vgname,
                                         os.path.basename(disk))
            elif abnormal_type == "migration_interupted":
                params["thread_timeout"] = 120
            # Raise after cleanup
            if fail_flag:
                raise

            # Migrate it again to confirm failed reason
            params["status_error"] = "no"
            cp_mig = copied_migration(test, vms, vms_ip, params)
    finally:
        # Recover created vm
        if cp_mig:
            cp_mig.cleanup_dest_vm(vm, None, params.get("migrate_dest_uri"))
        if vm.is_alive():
            vm.destroy()

        if src_libvirt_file:
            src_libvirt_file.restore()

        if disks_count and vm.name == new_vm_name:
            vm.undefine()
        for disk in added_disks_list:
            if disk_type == 'file':
                utlv.delete_local_disk(disk_type, disk)
            else:
                lvname = os.path.basename(disk)
                utlv.delete_local_disk(disk_type, disk, vgname, lvname)
            rdm.remove_path(disk_type, disk)
        rdm.remove_path("file", file_path)
        if pool_created:
            pool_destroyed = create_destroy_pool_on_remote(test, "destroy", params)
            if not pool_destroyed:
                test.error("Destroy pool on remote host '%s' failed."
                           % remote_host)

        if disk_type == "lvm":
            rdm.remove_vg(vgname)
            rdm.iscsi_login_setup(local_host, target2, is_login=False)
            try:
                lv_utils.vg_remove(vgname)
            except Exception:
                pass    # let it go to confirm cleanup iscsi device
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated-iscsi1")
            utlv.setup_or_cleanup_iscsi(is_setup=False,
                                        emulated_image="emulated-iscsi2")
예제 #24
0
def run(test, params, env):
    """
    DiskXML has an attribute named discard for fstrim operations.
    (Only supported after special libvirt version.)
    These are test cases for it:
    """
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    if vm.is_dead():
        vm.start()
        vm.wait_for_login()
    bf_disks = get_vm_disks(vm)
    vm.destroy()

    # Create a new vm for test, undefine it at last
    new_vm_name = "%s_discardtest" % vm.name
    if not utlv.define_new_vm(vm.name, new_vm_name):
        test.error("Define new vm failed.")
    try:
        new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                               vm.address_cache)
    except Exception as detail:
        test.error("Create new vm failed:%s" % detail)

    disk_type = params.get("disk_type", "file")
    discard_device = params.get("discard_device", "/DEV/EXAMPLE")
    fstrim_type = params.get("fstrim_type", "fstrim_cmd")
    try:
        if disk_type == "file":
            device_dir = data_dir.get_tmp_dir()
            params["image_name"] = "discard_test"
            params["image_format"] = "raw"
            params["image_size"] = "1G"
            qs = qemu_storage.QemuImg(params, device_dir, "")
            device_path, _ = qs.create(params)
        else:
            if not discard_device.count("/DEV/EXAMPLE"):
                create_iscsi = False
            else:
                create_iscsi = True
                discard_device = create_iscsi_device(test)
            device_path = create_volume(discard_device)

        discard_type = params.get("discard_type", "ignore")
        target_bus = params.get("storage_target_bus", "virtio")
        target_dev = params.get("storage_target_dev", "vdb")
        status_error = "yes" == params.get("status_error", "no")
        xmlfile = create_disk_xml(disk_type, device_path, discard_type,
                                  target_dev, target_bus)
        virsh.attach_device(new_vm_name, xmlfile,
                            flagstr="--persistent", ignore_status=False)
        if fstrim_type == "qemu-guest-agent":
            channelfile = prepare_channel_xml(new_vm_name)
            virsh.attach_device(new_vm_name, channelfile,
                                flagstr="--persistent", ignore_status=False)
        logging.debug("New VMXML:\n%s", virsh.dumpxml(new_vm_name))

        # Verify attached device in vm
        if new_vm.is_dead():
            new_vm.start()
        new_vm.wait_for_login()
        af_disks = get_vm_disks(new_vm)
        logging.debug("\nBefore:%s\nAfter:%s", bf_disks, af_disks)
        # Get new disk name in vm
        new_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not new_disk:
            test.fail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", new_disk)

        # Occupt space of new disk
        frmt_type = params.get("discard_format", "ext4")
        if fstrim_type == "mount_with_discard":
            mount_options = "discard"
        else:
            mount_options = None

        bf_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                   lvname="lvthin")
        logging.debug("Disk size before using:%s", bf_cpy)
        occupy_disk(new_vm, new_disk, "500", frmt_type, mount_options)
        bf_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("Disk size after used:%s", bf_fstrim_cpy)
        do_fstrim(test, fstrim_type, new_vm, status_error)
        af_fstrim_cpy = get_disk_capacity(test, disk_type, imagefile=device_path,
                                          lvname="lvthin")
        logging.debug("\nBefore occupying disk:%s\n"
                      "After occupied disk:%s\n"
                      "After fstrim operation:%s",
                      bf_cpy, bf_fstrim_cpy, af_fstrim_cpy)
        # Check results
        if fstrim_type in ["fstrim_cmd", "qemu-guest-agent"]:
            if not sig_delta(bf_fstrim_cpy, af_fstrim_cpy) and \
                    not status_error:
                test.fail("Manual 'fstrims' didn't work.")
        elif fstrim_type == "mount_with_discard":
            if sig_delta(bf_cpy, bf_fstrim_cpy) and not status_error:
                test.fail("Automatic 'fstrims' didn't work.")
    finally:
        if new_vm.is_alive():
            new_vm.destroy()
        new_vm.undefine()
        if disk_type == "block":
            try:
                lv_utils.lv_remove("vgthin", "lvthin")
            except exceptions.TestError as detail:
                logging.debug(str(detail))
            try:
                lv_utils.vg_remove("vgthin")
            except exceptions.TestError as detail:
                logging.debug(str(detail))
            process.run("pvremove -f %s" % discard_device, ignore_status=True,
                        shell=True)
            if create_iscsi:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
    def verify_migration_speed(test, params, env):
        """
        Check if migration speed is effective with twice migration.
        """
        vms = env.get_all_vms()
        src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
        dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")

        if not len(vms):
            raise error.TestNAError("Please provide migrate_vms for test.")

        if src_uri.count('///') or src_uri.count('EXAMPLE'):
            raise error.TestNAError("The src_uri '%s' is invalid" % src_uri)

        if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
            raise error.TestNAError("The dest_uri '%s' is invalid" % dest_uri)

        remote_host = params.get("migrate_dest_host")
        username = params.get("migrate_dest_user", "root")
        password = params.get("migrate_dest_pwd")
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        # Check migrated vms' state
        for vm in vms:
            if vm.is_dead():
                vm.start()

        load_vm_names = params.get("load_vms").split()
        # vms for load
        load_vms = []
        for vm_name in load_vm_names:
            load_vms.append(
                libvirt_vm.VM(vm_name, params, test.bindir,
                              env.get("address_cache")))
        params["load_vms"] = load_vms

        bandwidth = int(params.get("bandwidth", "4"))
        stress_type = params.get("stress_type", "load_vms_booting")
        migration_type = params.get("migration_type", "orderly")
        thread_timeout = int(params.get("thread_timeout", "60"))
        delta = float(params.get("allowed_delta", "0.1"))
        virsh_migrate_timeout = int(params.get("virsh_migrate_timeout", "60"))
        # virsh migrate options
        virsh_migrate_options = "--live --unsafe --timeout %s" % virsh_migrate_timeout
        # Migrate vms to remote host
        mig_first = utlv.MigrationTest()
        virsh_dargs = {"debug": True}
        for vm in vms:
            set_get_speed(vm.name, bandwidth, virsh_dargs=virsh_dargs)
            vm.wait_for_login()
        utils_test.load_stress(stress_type, vms, params)
        mig_first.do_migration(vms,
                               src_uri,
                               dest_uri,
                               migration_type,
                               options=virsh_migrate_options,
                               thread_timeout=thread_timeout)
        for vm in vms:
            mig_first.cleanup_dest_vm(vm, None, dest_uri)
            # Keep it clean for second migration
            if vm.is_alive():
                vm.destroy()

        # Migrate vms again with new bandwidth
        second_bandwidth = params.get("second_bandwidth", "times")
        if second_bandwidth == "half":
            second_bandwidth = bandwidth / 2
            speed_times = 2
        elif second_bandwidth == "times":
            second_bandwidth = bandwidth * 2
            speed_times = 0.5
        elif second_bandwidth == "same":
            second_bandwidth = bandwidth
            speed_times = 1

        # Migrate again
        for vm in vms:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
            set_get_speed(vm.name, second_bandwidth, virsh_dargs=virsh_dargs)
        utils_test.load_stress(stress_type, vms, params)
        mig_second = utlv.MigrationTest()
        mig_second.do_migration(vms,
                                src_uri,
                                dest_uri,
                                migration_type,
                                options=virsh_migrate_options,
                                thread_timeout=thread_timeout)
        for vm in vms:
            mig_second.cleanup_dest_vm(vm, None, dest_uri)

        fail_info = []
        # Check whether migration failed
        if len(fail_info):
            raise error.TestFail(fail_info)

        for vm in vms:
            first_time = mig_first.mig_time[vm.name]
            second_time = mig_second.mig_time[vm.name]
            logging.debug(
                "Migration time for %s:\n"
                "Time with Bandwidth '%s' first: %s\n"
                "Time with Bandwidth '%s' second: %s", vm.name, bandwidth,
                first_time, second_bandwidth, second_time)
            shift = float(abs(first_time * speed_times -
                              second_time)) / float(second_time)
            logging.debug("Shift:%s", shift)
            if delta < shift:
                fail_info.append(
                    "Spent time for migrating %s is intolerable." % vm.name)

        # Check again for speed result
        if len(fail_info):
            raise error.TestFail(fail_info)
예제 #26
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir, vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {
        'source_path': source_path,
        'source_name': source_name,
        'source_format': source_format,
        'pool_adapter_type': pool_adapter_type,
        'pool_adapter_parent': pool_adapter_parent,
        'pool_wwnn': pool_wwnn,
        'pool_wwpn': pool_wwpn
    }

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(lambda: nodedev.is_vhbas_added(old_vhbas),
                            _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception, e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image,
                         **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
예제 #27
0
def run(test, params, env):
    """
    Test command: virsh pool-define; pool-define-as; pool-start;
    vol-list pool; attach-device LUN to guest; mount the device;
    dd to the mounted device; unmount; pool-destroy; pool-undefine;

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = params.get("pre_def_pool", "no")
    define_pool = params.get("define_pool", "no")
    define_pool_as = params.get("define_pool_as", "no")
    pool_create_as = params.get("pool_create_as", "no")
    need_pool_build = params.get("need_pool_build", "no")
    need_vol_create = params.get("need_vol_create", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("disk_target_dev", "sdc")
    pool_wwnn = params.get("pool_wwnn", "POOL_WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "POOL_WWPN_EXAMPLE")
    vhba_wwnn = params.get("vhba_wwnn", "VHBA_WWNN_EXAMPLE")
    vhba_wwpn = params.get("vhba_wwpn", "VHBA_WWPN_EXAMPLE")
    volume_name = params.get("volume_name", "imagefrommapper.qcow2")
    volume_capacity = params.get("volume_capacity", '1G')
    allocation = params.get("allocation", '1G')
    vol_format = params.get("volume_format", 'raw')
    attach_method = params.get("attach_method", "hot")
    test_unit = None
    mount_disk = None
    pool_kwargs = {}
    pool_extra_args = ""
    emulated_image = "emulated-image"
    disk_xml = ""
    new_vhbas = []
    source_dev = ""
    mpath_vol_path = ""
    old_mpath_conf = ""
    mpath_conf_path = "/etc/multipath.conf"
    original_mpath_conf_exist = os.path.exists(mpath_conf_path)

    if pool_type == "scsi":
        if ('EXAMPLE' in pool_wwnn) or ('EXAMPLE' in pool_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for npiv scsi pool.")
    if pool_type == "logical":
        if ('EXAMPLE' in vhba_wwnn) or ('EXAMPLE' in vhba_wwpn):
            raise exceptions.TestSkipError(
                    "No wwpn and wwnn provided for vhba.")
    online_hbas_list = utils_npiv.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)
    old_mpath_conf = utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                                       replace_existing=True)
    if not online_hbas_list:
        raise exceptions.TestSkipError(
            "Host doesn't have online hba cards")
    old_vhbas = utils_npiv.find_hbas("vhba")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()
    if not vm.is_alive():
        vm.start()
    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)
    pool_ins = libvirt_storage.StoragePool()
    if pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    if pool_type == "scsi":
        if define_pool == "yes":
            if pool_adapter_parent == "":
                pool_adapter_parent = online_hbas_list[0]
            pool_kwargs = {'source_path': source_path,
                           'source_name': source_name,
                           'source_format': source_format,
                           'pool_adapter_type': pool_adapter_type,
                           'pool_adapter_parent': pool_adapter_parent,
                           'pool_wwnn': pool_wwnn,
                           'pool_wwpn': pool_wwpn}
    elif pool_type == "logical":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*5)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        source_dev = "/dev/mapper/" + new_mpath_devs[0]
        logging.debug("We are going to use \"%s\" as our source device"
                      " to create a logical pool", source_dev)
        try:
            cmd = "parted %s mklabel msdos -s" % source_dev
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
        if define_pool_as == "yes":
            pool_extra_args = ""
            if source_dev:
                pool_extra_args = ' --source-dev %s' % source_dev
    elif pool_type == "mpath":
        if (not vhba_wwnn) or (not vhba_wwpn):
            raise exceptions.TestFail("No wwnn/wwpn provided to create vHBA.")
        old_mpath_devs = utils_npiv.find_mpath_devs()
        new_vhba = utils_npiv.nodedev_create_from_xml({
                "nodedev_parent": online_hbas_list[0],
                "scsi_wwnn": vhba_wwnn,
                "scsi_wwpn": vhba_wwpn})
        utils_misc.wait_for(
            lambda: utils_npiv.is_vhbas_added(old_vhbas), timeout=_DELAY_TIME*2)
        if not new_vhba:
            raise exceptions.TestFail("vHBA not sucessfully generated.")
        new_vhbas.append(new_vhba)
        utils_misc.wait_for(
            lambda: utils_npiv.is_mpath_devs_added(old_mpath_devs),
            timeout=_DELAY_TIME*2)
        if not utils_npiv.is_mpath_devs_added(old_mpath_devs):
            raise exceptions.TestFail("mpath dev not generated.")
        cur_mpath_devs = utils_npiv.find_mpath_devs()
        new_mpath_devs = list(set(cur_mpath_devs).difference(
            set(old_mpath_devs)))
        logging.debug("The newly added mpath dev is: %s", new_mpath_devs)
        mpath_vol_path = "/dev/mapper/" + new_mpath_devs[0]
        try:
            cmd = "parted %s mklabel msdos -s" % mpath_vol_path
            cmd_result = process.run(cmd, shell=True)
        except Exception as e:
            raise exceptions.TestError("Error occurred when parted mklable")
    if pre_def_pool == "yes":
        try:
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type,
                         pool_target, emulated_image,
                         **pool_kwargs)
            utils_misc.wait_for(
                    lambda: utils_npiv.is_vhbas_added(old_vhbas),
                    _DELAY_TIME*2)
            virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
            virsh.pool_destroy(pool_name)
        except Exception as e:
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
            raise exceptions.TestError(
                "Error occurred when prepare pool xml:\n %s" % e)
        if os.path.exists(pool_xml_f):
            with open(pool_xml_f, 'r') as f:
                logging.debug("Create pool from file: %s", f.read())
    try:
        # define/create/start the pool
        if (pre_def_pool == "yes") and (define_pool == "yes"):
            pool_define_status = virsh.pool_define(pool_xml_f,
                                                   ignore_status=True,
                                                   debug=True)
            utlv.check_exit_status(pool_define_status)
        if define_pool_as == "yes":
            pool_define_as_status = virsh.pool_define_as(
                pool_name, pool_type,
                pool_target, pool_extra_args,
                ignore_status=True, debug=True
                )
            utlv.check_exit_status(pool_define_as_status)
        if pool_create_as == "yes":
            if pool_type != "scsi":
                raise exceptions.TestSkipError("pool-create-as only needs to "
                                               "be covered by scsi pool for "
                                               "NPIV test.")
            cmd = "virsh pool-create-as %s %s \
                   --adapter-wwnn %s --adapter-wwpn %s \
                   --adapter-parent %s --target %s"\
                   % (pool_name, pool_type, pool_wwnn, pool_wwpn,
                      online_hbas_list[0], pool_target)
            cmd_status = process.system(cmd, verbose=True)
            if cmd_status:
                raise exceptions.TestFail("pool-create-as scsi pool failed.")
        if need_pool_build == "yes":
            pool_build_status = virsh.pool_build(pool_name, "--overwrite")
            utlv.check_exit_status(pool_build_status)

        pool_ins = libvirt_storage.StoragePool()
        if not pool_ins.pool_exists(pool_name):
            raise exceptions.TestFail("define or create pool failed.")
        else:
            if not pool_ins.is_pool_active(pool_name):
                pool_start_status = virsh.pool_start(pool_name)
                utlv.check_exit_status(pool_start_status)
                utlv.check_actived_pool(pool_name)
                pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
                logging.debug("Pool detail: %s", pool_detail)

        # create vol if required
        if need_vol_create == "yes":
            vol_create_as_status = virsh.vol_create_as(
                    volume_name, pool_name,
                    volume_capacity, allocation,
                    vol_format, "", debug=True
                    )
            utlv.check_exit_status(vol_create_as_status)
        virsh.pool_refresh(pool_name)
        vol_list = utlv.get_vol_list(pool_name, vol_check=True,
                                     timeout=_DELAY_TIME*3)
        logging.debug('Volume list is: %s' % vol_list)

        # use test_unit to save the first vol in pool
        if pool_type == "mpath":
            cmd = "virsh vol-list %s | grep \"%s\" |\
                   awk '{FS=\" \"} {print $1}'" % (pool_name, mpath_vol_path)
            cmd_result = process.run(cmd, shell=True)
            status = cmd_result.exit_status
            output = cmd_result.stdout_text.strip()
            if cmd_result.exit_status:
                raise exceptions.TestFail("vol-list pool %s failed", pool_name)
            if not output:
                raise exceptions.TestFail("Newly added mpath dev not in pool.")
            test_unit = output
            logging.info(
                "Using %s to attach to a guest", test_unit)
        else:
            test_unit = list(vol_list.keys())[0]
            logging.info(
                "Using the first volume %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()

        # prepare disk xml which will be hot/cold attached to vm
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': vol_format}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)
        copyfile(lun_disk_xml, disk_xml)
        disk_xml_str = open(lun_disk_xml).read()
        logging.debug("The disk xml is: %s", disk_xml_str)

        # hot attach disk xml to vm
        if attach_method == "hot":
            copyfile(lun_disk_xml, disk_xml)
            dev_attach_status = virsh.attach_device(vm_name, disk_xml,
                                                    debug=True)
            # Pool/vol virtual disk is not supported by mpath pool yet.
            if dev_attach_status.exit_status and pool_type == "mpath":
                raise exceptions.TestSkipError("mpath pool vol is not "
                                               "supported in virtual disk yet,"
                                               "the error message is: %s",
                                               dev_attach_status.stderr)
                session.close()
            utlv.check_exit_status(dev_attach_status)
        # cold attach disk xml to vm
        elif attach_method == "cold":
            if vm.is_alive():
                vm.destroy(gracefully=False)
            new_disk = disk.Disk()
            new_disk.xml = disk_xml_str
            vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            vmxml.devices = vmxml.devices.append(new_disk)
            vmxml.sync()
            logging.debug(vmxml)
            try:
                vm.start()
            except virt_vm.VMStartError as e:
                logging.debug(e)
                if pool_type == "mpath":
                    raise exceptions.TestSkipError("'mpath' pools for backing "
                                                   "'volume' disks isn't "
                                                   "supported for now")
                else:
                    raise exceptions.TestFail("Failed to start vm")
            session = vm.wait_for_login()
        else:
            pass

        # checking attached disk in vm
        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")
        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        # detach disk from vm
        dev_detach_status = virsh.detach_device(vm_name, disk_xml,
                                                debug=True)
        utlv.check_exit_status(dev_detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            data_dir.clean_tmp_files()
            logging.debug("Cleanup disk xml")
        if pre_def_pool == "yes":
            # Do not apply cleanup_pool for logical pool, logical pool will
            # be cleaned below
            pvt.cleanup_pool(pool_name, pool_type, pool_target,
                             emulated_image, **pool_kwargs)
        if (test_unit and
                (need_vol_create == "yes" and (pre_def_pool == "no")) and
                (pool_type == "logical")):
            process.system('lvremove -f %s/%s' % (pool_name, test_unit),
                           verbose=True)
            process.system('vgremove -f %s' % pool_name, verbose=True)
            process.system('pvremove -f %s' % source_dev, verbose=True)
        if new_vhbas:
            utils_npiv.vhbas_cleanup(new_vhbas)
        # Restart multipathd, this is to avoid bz1399075
        if source_dev:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(source_dev),
                                _DELAY_TIME*5, 0.0, 5.0)
        elif mpath_vol_path:
            utils_misc.wait_for(lambda: utils_npiv.restart_multipathd(mpath_vol_path),
                                _DELAY_TIME*5, 0.0, 5.0)
        else:
            utils_npiv.restart_multipathd()
        if old_mpath_conf:
            utils_npiv.prepare_multipath_conf(conf_path=mpath_conf_path,
                                              conf_content=old_mpath_conf,
                                              replace_existing=True)
        if not original_mpath_conf_exist and os.path.exists(mpath_conf_path):
            os.remove(mpath_conf_path)
예제 #28
0
def run(test, params, env):
    """
    Test the tpm virtual devices
    1. prepare a guest with different tpm devices
    2. check whether the guest can be started
    3. check the xml and qemu cmd line, even swtpm for vtpm
    4. check tpm usage in guest os
    """
    # Tpm passthrough supported since libvirt 1.0.5.
    if not libvirt_version.version_compare(1, 0, 5):
        test.cancel("Tpm device is not supported "
                    "on current libvirt version.")
    # Tpm passthrough supported since qemu 2.12.0-49.
    if not utils_misc.compare_qemu_version(2, 9, 0, is_rhev=False):
        test.cancel("Tpm device is not supported "
                    "on current qemu version.")

    tpm_model = params.get("tpm_model")
    backend_type = params.get("backend_type")
    backend_version = params.get("backend_version")
    device_path = params.get("device_path")
    tpm_num = int(params.get("tpm_num", 1))
    # After first start of vm with vtpm, do operations, check it still works
    vm_operate = params.get("vm_operate")
    # Sub-operation(e.g.domrename) under vm_operate(e.g.restart)
    vm_oprt = params.get("vm_oprt")
    secret_uuid = params.get("secret_uuid")
    secret_value = params.get("secret_value")
    # Change encryption state: from plain to encrypted, or reverse.
    encrypt_change = params.get("encrypt_change")
    secret_uuid = params.get("secret_uuid")
    prepare_secret = ("yes" == params.get("prepare_secret", "no"))
    remove_dev = ("yes" == params.get("remove_dev", "no"))
    multi_vms = ("yes" == params.get("multi_vms", "no"))
    # Remove swtpm state file
    rm_statefile = ("yes" == params.get("rm_statefile", "no"))
    test_suite = ("yes" == params.get("test_suite", "no"))
    restart_libvirtd = ("yes" == params.get("restart_libvirtd", "no"))
    no_backend = ("yes" == params.get("no_backend", "no"))
    status_error = ("yes" == params.get("status_error", "no"))
    err_msg = params.get("xml_errmsg", "")
    loader = params.get("loader", "")
    nvram = params.get("nvram", "")
    uefi_disk_url = params.get("uefi_disk_url", "")
    download_file_path = os.path.join(data_dir.get_tmp_dir(), "uefi_disk.qcow2")

    # Check tpm chip on host for passthrough testing
    if backend_type == "passthrough":
        dmesg_info = process.getoutput("dmesg|grep tpm -wi", shell=True)
        logging.debug("dmesg info about tpm:\n %s", dmesg_info)
        dmesg_error = re.search("No TPM chip found|TPM is disabled", dmesg_info)
        if dmesg_error:
            test.cancel(dmesg_error.group())
        else:
            # Try to check host tpm chip version
            tpm_v = None
            if re.search("2.0 TPM", dmesg_info):
                tpm_v = "2.0"
                if not utils_package.package_install("tpm2-tools"):
                    # package_install() return 'True' if succeed
                    test.error("Failed to install tpm2-tools on host")
            else:
                if re.search("1.2 TPM", dmesg_info):
                    tpm_v = "1.2"
                # If "1.2 TPM" or no version info in dmesg, try to test a tpm1.2 at first
                if not utils_package.package_install("tpm-tools"):
                    test.error("Failed to install tpm-tools on host")
    # Check host env for vtpm testing
    elif backend_type == "emulator":
        if not utils_misc.compare_qemu_version(4, 0, 0, is_rhev=False):
            test.cancel("vtpm(emulator backend) is not supported "
                        "on current qemu version.")
        # Install swtpm pkgs on host for vtpm emulation
        if not utils_package.package_install("swtpm*"):
            test.error("Failed to install swtpm swtpm-tools on host")

    def replace_os_disk(vm_xml, vm_name, nvram):
        """
        Replace os(nvram) and disk(uefi) for x86 vtpm test

        :param vm_xml: current vm's xml
        :param vm_name: current vm name
        :param nvram: nvram file path of vm
        """
        # Add loader, nvram in <os>
        nvram = nvram.replace("<VM_NAME>", vm_name)
        dict_os_attrs = {"loader_readonly": "yes",
                         "secure": "yes",
                         "loader_type": "pflash",
                         "loader": loader,
                         "nvram": nvram}
        vm_xml.set_os_attrs(**dict_os_attrs)
        logging.debug("Set smm=on in VMFeaturesXML")
        # Add smm in <features>
        features_xml = vm_xml.features
        features_xml.smm = "on"
        vm_xml.features = features_xml
        vm_xml.sync()
        # Replace disk with an uefi image
        if not utils_package.package_install("wget"):
            test.error("Failed to install wget on host")
        if uefi_disk_url.count("EXAMPLE"):
            test.error("Please provide the URL %s" % uefi_disk_url)
        else:
            download_cmd = ("wget %s -O %s" % (uefi_disk_url, download_file_path))
            process.system(download_cmd, verbose=False, shell=True)
        vm = env.get_vm(vm_name)
        uefi_disk = {'disk_source_name': download_file_path}
        libvirt.set_vm_disk(vm, uefi_disk)

    vm_names = params.get("vms").split()
    vm_name = vm_names[0]
    vm = env.get_vm(vm_name)
    vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml.copy()
    os_xml = getattr(vm_xml, "os")
    host_arch = platform.machine()
    if backend_type == "emulator" and host_arch == 'x86_64':
        if not utils_package.package_install("OVMF"):
            test.error("Failed to install OVMF or edk2-ovmf pkgs on host")
        if os_xml.xmltreefile.find('nvram') is None:
            replace_os_disk(vm_xml, vm_name, nvram)
            vm_xml = VMXML.new_from_inactive_dumpxml(vm_name)
    if vm.is_alive():
        vm.destroy()

    vm2 = None
    if multi_vms:
        if len(vm_names) > 1:
            vm2_name = vm_names[1]
            vm2 = env.get_vm(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
            vm2_xml_backup = vm2_xml.copy()
        else:
            # Clone additional vms if needed
            try:
                utils_path.find_command("virt-clone")
            except utils_path.CmdNotFoundError:
                if not utils_package.package_install(["virt-install"]):
                    test.cancel("Failed to install virt-install on host")
            vm2_name = "vm2_" + utils_misc.generate_random_string(5)
            ret_clone = utils_libguestfs.virt_clone_cmd(vm_name, vm2_name,
                                                        True, timeout=360, debug=True)
            if ret_clone.exit_status:
                test.error("Need more than one domains, but error occured when virt-clone.")
            vm2 = vm.clone(vm2_name)
            vm2_xml = VMXML.new_from_inactive_dumpxml(vm2_name)
        if vm2.is_alive():
            vm2.destroy()

    service_mgr = service.ServiceManager()

    def check_dumpxml(vm_name):
        """
        Check whether the added devices are shown in the guest xml

        :param vm_name: current vm name
        """
        logging.info("------Checking guest dumpxml------")
        if tpm_model:
            pattern = '<tpm model="%s">' % tpm_model
        else:
            # The default tpm model is "tpm-tis"
            pattern = '<tpm model="tpm-tis">'
        # Check tpm model
        xml_after_adding_device = VMXML.new_from_dumpxml(vm_name)
        logging.debug("xml after add tpm dev is %s", xml_after_adding_device)
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s tpm device xml "
                      "in the guest xml file." % tpm_model)
        # Check backend type
        pattern = '<backend type="%s"' % backend_type
        if pattern not in astring.to_text(xml_after_adding_device):
            test.fail("Can not find the %s backend type xml for tpm dev "
                      "in the guest xml file." % backend_type)
        # Check backend version
        if backend_version:
            check_ver = backend_version if backend_version != 'none' else '2.0'
            pattern = '"emulator" version="%s"' % check_ver
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s backend version xml for tpm dev "
                          "in the guest xml file." % check_ver)
        # Check device path
        if backend_type == "passthrough":
            pattern = '<device path="/dev/tpm0"'
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s device path xml for tpm dev "
                          "in the guest xml file." % device_path)
        # Check encryption secret
        if prepare_secret:
            pattern = '<encryption secret="%s" />' % encryption_uuid
            if pattern not in astring.to_text(xml_after_adding_device):
                test.fail("Can not find the %s secret uuid xml for tpm dev "
                          "in the guest xml file." % encryption_uuid)
        logging.info('------PASS on guest dumpxml check------')

    def check_qemu_cmd_line(vm, vm_name, domid):
        """
        Check whether the added devices are shown in the qemu cmd line

        :param vm: current vm
        :param vm_name: current vm name
        :param domid: domain id for checking vtpm socket file
        """
        logging.info("------Checking qemu cmd line------")
        if not vm.get_pid():
            test.fail('VM pid file missing.')
        with open('/proc/%s/cmdline' % vm.get_pid()) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Qemu cmd line info:\n %s", cmdline)
        # Check tpm model
        pattern_list = ["-device.%s" % tpm_model]
        # Check backend type
        if backend_type == "passthrough":
            dev_num = re.search(r"\d+", device_path).group()
            backend_segment = "id=tpm-tpm%s" % dev_num
        else:
            # emulator backend
            backend_segment = "id=tpm-tpm0,chardev=chrtpm"
        pattern_list.append("-tpmdev.%s,%s" % (backend_type, backend_segment))
        # Check chardev socket for vtpm
        if backend_type == "emulator":
            pattern_list.append("-chardev.socket,id=chrtpm,"
                                "path=.*/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name))
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                if not remove_dev:
                    test.fail("Can not find the %s for tpm device "
                              "in qemu cmd line." % pattern)
            elif remove_dev:
                test.fail("%s still exists after remove vtpm and restart" % pattern)
        logging.info("------PASS on qemu cmd line check------")

    def check_swtpm(domid, domuuid, vm_name):
        """
        Check swtpm cmdline and files for vtpm.

        :param domid: domain id for checking vtpm files
        :param domuuid: domain uuid for checking vtpm state file
        :param vm_name: current vm name
        """
        logging.info("------Checking swtpm cmdline and files------")
        # Check swtpm cmdline
        swtpm_pid = utils_misc.get_pid("%s-swtpm.pid" % vm_name)
        if not swtpm_pid:
            if not remove_dev:
                test.fail('swtpm pid file missing.')
            else:
                return
        elif remove_dev:
            test.fail('swtpm pid file still exists after remove vtpm and restart')
        with open('/proc/%s/cmdline' % swtpm_pid) as cmdline_file:
            cmdline = cmdline_file.read()
            logging.debug("Swtpm cmd line info:\n %s", cmdline)
        pattern_list = ["--daemon", "--ctrl", "--tpmstate", "--log", "--tpm2", "--pid"]
        if prepare_secret:
            pattern_list.extend(["--key", "--migration-key"])
        for pattern in pattern_list:
            if not re.search(pattern, cmdline):
                test.fail("Can not find the %s for tpm device "
                          "in swtpm cmd line." % pattern)
        # Check swtpm files
        file_list = ["/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.sock" % (domid, vm_name)]
        file_list.append("/var/lib/libvirt/swtpm/%s/tpm2" % domuuid)
        file_list.append("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm_name)
        file_list.append("/var/run/libvirt/qemu/swtpm/%s-%s-swtpm.pid" % (domid, vm_name))
        for swtpm_file in file_list:
            if not os.path.exists(swtpm_file):
                test.fail("Swtpm file: %s does not exist" % swtpm_file)
        logging.info("------PASS on Swtpm cmdline and files check------")

    def get_tpm2_tools_cmd(session=None):
        """
        Get tpm2-tools pkg version and return corresponding getrandom cmd

        :session: guest console session
        :return: tpm2_getrandom cmd usage
        """
        cmd = 'rpm -q tpm2-tools'
        get_v_tools = session.cmd(cmd) if session else process.run(cmd).stdout_text
        v_tools_list = get_v_tools.strip().split('-')
        if session:
            logging.debug("The tpm2-tools version is %s", v_tools_list[2])
        v_tools = int(v_tools_list[2].split('.')[0])
        return "tpm2_getrandom 8" if v_tools < 4 else "tpm2_getrandom -T device:/dev/tpm0 8 --hex"

    def get_host_tpm_bef(tpm_v):
        """
        Test host tpm function and identify its real version before passthrough
        Since sometimes dmesg info doesn't include tpm msg, need use tpm-tool or
        tpm2-tools to try the function.

        :param tpm_v: host tpm version get from dmesg info
        :return: host tpm version
        """
        logging.info("------Checking host tpm device before passthrough------")
        # Try tcsd tool for suspected tpm1.2 chip on host
        tpm_real_v = tpm_v
        if tpm_v != "2.0":
            if not service_mgr.start('tcsd'):
                # service_mgr.start() return 'True' if succeed
                if tpm_v == "1.2":
                    test.fail("Host tcsd.serivce start failed")
                else:
                    # Means tpm_v got nothing from dmesg, log failure here and
                    # go to next 'if' to try tpm2.0 tools.
                    logging.info("Host tcsd.serivce start failed")
            else:
                tpm_real_v = "1.2"
                logging.info("Host tpm version info:")
                result = process.run("tpm_version", ignore_status=False)
                logging.debug("[host]# tpm_version\n %s", result.stdout)
                time.sleep(2)
                service_mgr.stop('tcsd')
        if tpm_v != "1.2":
            # Try tpm2.0 tools
            if not utils_package.package_install("tpm2-tools"):
                test.error("Failed to install tpm2-tools on host")
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.cancel("Both tcsd and tpm2-tools can not work, "
                            "pls check your host tpm version and test env.")
            else:
                tpm_real_v = "2.0"
        logging.info("------PASS on host tpm device check------")
        return tpm_real_v

    def test_host_tpm_aft(tpm_real_v):
        """
        Test host tpm function after passthrough

        :param tpm_real_v: host tpm real version indentified from testing
        """
        logging.info("------Checking host tpm device after passthrough------")
        if tpm_real_v == "1.2":
            if service_mgr.start('tcsd'):
                time.sleep(2)
                service_mgr.stop('tcsd')
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        elif tpm_real_v == "2.0":
            tpm2_getrandom_cmd = get_tpm2_tools_cmd()
            if not process.run(tpm2_getrandom_cmd, ignore_status=True).exit_status:
                test.fail("Host tpm should not work after passthrough to guest.")
            else:
                logging.info("Expected failure: Tpm is being used by guest.")
        logging.info("------PASS on host tpm device check------")

    def test_guest_tpm(expect_version, session, expect_fail):
        """
        Test tpm function in guest

        :param expect_version: guest tpm version, as host version, or emulator specified
        :param session: Guest session to be tested
        :param expect_fail: guest tpm is expectedly fail to work
        """
        logging.info("------Checking guest tpm device work------")
        if expect_version == "1.2":
            # Install tpm-tools and test by tcsd method
            if not utils_package.package_install(["tpm-tools"], session, 360):
                test.error("Failed to install tpm-tools package in guest")
            else:
                status, output = session.cmd_status_output("systemctl start tcsd")
                logging.debug("Command output: %s", output)
                if status:
                    if expect_fail:
                        test.cancel("tpm-crb passthrough only works with host tpm2.0, "
                                    "but your host tpm version is 1.2")
                    else:
                        test.fail("Failed to start tcsd.service in guest")
                else:
                    dev_output = session.cmd_output("ls /dev/|grep tpm")
                    logging.debug("Command output: %s", dev_output)
                    status, output = session.cmd_status_output("tpm_version")
                    logging.debug("Command output: %s", output)
                    if status:
                        test.fail("Guest tpm can not work")
        else:
            # If expect_version is tpm2.0, install and test by tpm2-tools
            if not utils_package.package_install(["tpm2-tools"], session, 360):
                test.error("Failed to install tpm2-tools package in guest")
            else:
                tpm2_getrandom_cmd = get_tpm2_tools_cmd(session)
                status1, output1 = session.cmd_status_output("ls /dev/|grep tpm")
                logging.debug("Command output: %s", output1)
                status2, output2 = session.cmd_status_output(tpm2_getrandom_cmd)
                logging.debug("Command output: %s", output2)
                if status1 or status2:
                    if not expect_fail:
                        test.fail("Guest tpm can not work")
                    else:
                        d_status, d_output = session.cmd_status_output("date")
                        if d_status:
                            test.fail("Guest OS doesn't work well")
                        logging.debug("Command output: %s", d_output)
                elif expect_fail:
                    test.fail("Expect fail but guest tpm still works")
        logging.info("------PASS on guest tpm device work check------")

    def run_test_suite_in_guest(session):
        """
        Run kernel test suite for guest tpm.

        :param session: Guest session to be tested
        """
        logging.info("------Checking kernel test suite for guest tpm------")
        boot_info = session.cmd('uname -r').strip().split('.')
        kernel_version = '.'.join(boot_info[:2])
        # Download test suite per current guest kernel version
        parent_path = "https://cdn.kernel.org/pub/linux/kernel"
        if float(kernel_version) < 5.3:
            major_version = "5"
            file_version = "5.3"
        else:
            major_version = boot_info[0]
            file_version = kernel_version
        src_url = "%s/v%s.x/linux-%s.tar.xz" % (parent_path, major_version, file_version)
        download_cmd = "wget %s -O %s" % (src_url, "/root/linux.tar.xz")
        output = session.cmd_output(download_cmd, timeout=480)
        logging.debug("Command output: %s", output)
        # Install neccessary pkgs to build test suite
        if not utils_package.package_install(["tar", "make", "gcc", "rsync", "python2"], session, 360):
            test.fail("Failed to install specified pkgs in guest OS.")
        # Unzip the downloaded test suite
        status, output = session.cmd_status_output("tar xvJf /root/linux.tar.xz -C /root")
        if status:
            test.fail("Uzip failed: %s" % output)
        # Specify using python2 to run the test suite per supporting
        test_path = "/root/linux-%s/tools/testing/selftests" % file_version
        sed_cmd = "sed -i 's/python -m unittest/python2 -m unittest/g' %s/tpm2/test_*.sh" % test_path
        output = session.cmd_output(sed_cmd)
        logging.debug("Command output: %s", output)
        # Build and and run the .sh files of test suite
        status, output = session.cmd_status_output("make -C %s TARGETS=tpm2 run_tests" % test_path, timeout=360)
        logging.debug("Command output: %s", output)
        if status:
            test.fail("Failed to run test suite in guest OS.")
        for test_sh in ["test_smoke.sh", "test_space.sh"]:
            pattern = "ok .* selftests: tpm2: %s" % test_sh
            if not re.search(pattern, output) or ("not ok" in output):
                test.fail("test suite check failed.")
        logging.info("------PASS on kernel test suite check------")

    def reuse_by_vm2(tpm_dev):
        """
        Try to add same tpm to a second guest, when it's being used by one guest.

        :param tpm_dev: tpm device to be added into guest xml
        """
        logging.info("------Trying to add same tpm to a second domain------")
        vm2_xml.remove_all_device_by_type('tpm')
        vm2_xml.add_device(tpm_dev)
        vm2_xml.sync()
        ret = virsh.start(vm2_name, ignore_status=True, debug=True)
        if backend_type == "passthrough":
            if ret.exit_status:
                logging.info("Expected failure when try to passthrough a tpm"
                             " that being used by another guest")
                return
            test.fail("Reuse a passthroughed tpm should not succeed.")
        elif ret.exit_status:
            # emulator backend
            test.fail("Vtpm for each guest should not interfere with each other")

    try:
        tpm_real_v = None
        sec_uuids = []
        new_name = ""
        virsh_dargs = {"debug": True, "ignore_status": False}
        vm_xml.remove_all_device_by_type('tpm')
        tpm_dev = Tpm()
        if tpm_model:
            tpm_dev.tpm_model = tpm_model
        if not no_backend:
            backend = tpm_dev.Backend()
            if backend_type != 'none':
                backend.backend_type = backend_type
                if backend_type == "passthrough":
                    tpm_real_v = get_host_tpm_bef(tpm_v)
                    logging.debug("The host tpm real version is %s", tpm_real_v)
                    if device_path:
                        backend.device_path = device_path
                if backend_type == "emulator":
                    if backend_version != 'none':
                        backend.backend_version = backend_version
                    if prepare_secret:
                        auth_sec_dict = {"sec_ephemeral": "no",
                                         "sec_private": "yes",
                                         "sec_desc": "sample vTPM secret",
                                         "sec_usage": "vtpm",
                                         "sec_name": "VTPM_example"}
                        encryption_uuid = libvirt.create_secret(auth_sec_dict)
                        if secret_value != 'none':
                            virsh.secret_set_value(encryption_uuid, "open sesame", encode=True, debug=True)
                        sec_uuids.append(encryption_uuid)
                        if encrypt_change != 'encrpt':
                            # plain_to_encrypt will not add encryption on first start
                            if secret_uuid == 'invalid':
                                encryption_uuid = encryption_uuid[:-1]
                            backend.encryption_secret = encryption_uuid
                        if secret_uuid == "change":
                            auth_sec_dict["sec_desc"] = "sample2 vTPM secret"
                            auth_sec_dict["sec_name"] = "VTPM_example2"
                            new_encryption_uuid = libvirt.create_secret(auth_sec_dict)
                            virsh.secret_set_value(new_encryption_uuid, "open sesame", encode=True, debug=True)
                            sec_uuids.append(new_encryption_uuid)
                    if secret_uuid == 'nonexist':
                        backend.encryption_secret = "aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee"
            tpm_dev.backend = backend
        logging.debug("tpm dev xml to add is:\n %s", tpm_dev)
        for num in range(tpm_num):
            vm_xml.add_device(tpm_dev, True)
        ret = virsh.define(vm_xml.xml, ignore_status=True, debug=True)
        expected_match = ""
        if not err_msg:
            expected_match = "Domain .*%s.* defined from %s" % (vm_name, vm_xml.xml)
        libvirt.check_result(ret, err_msg, "", False, expected_match)
        if err_msg:
            # Stop test when get expected failure
            return
        if vm_operate != "restart":
            check_dumpxml(vm_name)
        # For default model, no need start guest to test
        if tpm_model:
            expect_fail = False
            try:
                vm.start()
            except VMStartError as detail:
                if secret_value == 'none' or secret_uuid == 'nonexist':
                    logging.debug("Expected failure: %s", detail)
                    return
                else:
                    test.fail(detail)
            domuuid = vm.get_uuid()
            if vm_operate or restart_libvirtd:
                # Make sure OS works before vm operate or restart libvirtd
                session = vm.wait_for_login()
                test_guest_tpm("2.0", session, False)
                session.close()
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()
                swtpm_statedir = "/var/lib/libvirt/swtpm/%s" % domuuid
                if vm_operate == "resume":
                    virsh.suspend(vm_name, **virsh_dargs)
                    time.sleep(3)
                    virsh.resume(vm_name, **virsh_dargs)
                elif vm_operate == "snapshot":
                    virsh.snapshot_create_as(vm_name, "sp1 --memspec file=/tmp/testvm_sp1", **virsh_dargs)
                elif vm_operate in ["restart", "create"]:
                    vm.destroy()
                    if vm_operate == "create":
                        virsh.undefine(vm_name, options="--nvram", **virsh_dargs)
                        if os.path.exists(swtpm_statedir):
                            test.fail("Swtpm state dir: %s still exist after vm undefine" % swtpm_statedir)
                        virsh.create(vm_xml.xml, **virsh_dargs)
                    else:
                        if vm_oprt == "domrename":
                            new_name = "vm_" + utils_misc.generate_random_string(5)
                            virsh.domrename(vm_name, new_name, **virsh_dargs)
                            new_vm = libvirt_vm.VM(new_name, vm.params, vm.root_dir, vm.address_cache)
                            vm = new_vm
                            vm_name = new_name
                        elif secret_value == 'change':
                            logging.info("Changing secret value...")
                            virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        elif not restart_libvirtd:
                            # remove_dev or do other vm operations during restart
                            vm_xml.remove_all_device_by_type('tpm')
                            if secret_uuid == "change" or encrypt_change:
                                # Change secret uuid, or change encrytion state:from plain to encrypted, or on the contrary
                                if encrypt_change == 'plain':
                                    # Change from encrypted state to plain:redefine a tpm dev without encryption
                                    tpm_dev = Tpm()
                                    tpm_dev.tpm_model = tpm_model
                                    backend = tpm_dev.Backend()
                                    backend.backend_type = backend_type
                                    backend.backend_version = backend_version
                                else:
                                    # Use a new secret's uuid
                                    if secret_uuid == "change":
                                        encryption_uuid = new_encryption_uuid
                                    backend.encryption_secret = encryption_uuid
                                tpm_dev.backend = backend
                                logging.debug("The new tpm dev xml to add for restart vm is:\n %s", tpm_dev)
                                vm_xml.add_device(tpm_dev, True)
                            if encrypt_change in ['encrpt', 'plain']:
                                # Avoid sync() undefine removing the state file
                                vm_xml.define()
                            else:
                                vm_xml.sync()
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                        ret = virsh.start(vm_name, ignore_status=True, debug=True)
                        libvirt.check_exit_status(ret, status_error)
                        if status_error and ret.exit_status != 0:
                            return
                    if not remove_dev:
                        check_dumpxml(vm_name)
                elif vm_operate == 'managedsave':
                    virsh.managedsave(vm_name, **virsh_dargs)
                    time.sleep(5)
                    if secret_value == 'change':
                        logging.info("Changing secret value...")
                        virsh.secret_set_value(encryption_uuid, "new sesame", encode=True, debug=True)
                        if rm_statefile:
                            swtpm_statefile = "%s/tpm2/tpm2-00.permall" % swtpm_statedir
                            logging.debug("Removing state file: %s", swtpm_statefile)
                            os.remove(swtpm_statefile)
                    ret = virsh.start(vm_name, ignore_status=True, debug=True)
                    libvirt.check_exit_status(ret, status_error)
                    if status_error and ret.exit_status != 0:
                        return
            domid = vm.get_id()
            check_qemu_cmd_line(vm, vm_name, domid)
            if backend_type == "passthrough":
                if tpm_real_v == "1.2" and tpm_model == "tpm-crb":
                    expect_fail = True
                expect_version = tpm_real_v
                test_host_tpm_aft(tpm_real_v)
            else:
                # emulator backend
                if remove_dev:
                    expect_fail = True
                expect_version = backend_version
                check_swtpm(domid, domuuid, vm_name)
            session = vm.wait_for_login()
            if test_suite:
                run_test_suite_in_guest(session)
            else:
                test_guest_tpm(expect_version, session, expect_fail)
            session.close()
            if multi_vms:
                reuse_by_vm2(tpm_dev)
                if backend_type != "passthrough":
                    #emulator backend
                    check_dumpxml(vm2_name)
                    domid = vm2.get_id()
                    domuuid = vm2.get_uuid()
                    check_qemu_cmd_line(vm2, vm2_name, domid)
                    check_swtpm(domid, domuuid, vm2_name)
                    session = vm2.wait_for_login()
                    test_guest_tpm(backend_version, session, expect_fail)
                    session.close()

    finally:
        # Remove renamed domain if it exists
        if new_name:
            virsh.remove_domain(new_name, "--nvram", debug=True)
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % new_name)
        # Remove snapshot if exists
        if vm_operate == "snapshot":
            snapshot_lists = virsh.snapshot_list(vm_name)
            if len(snapshot_lists) > 0:
                libvirt.clean_up_snapshots(vm_name, snapshot_lists)
                for snap in snapshot_lists:
                    virsh.snapshot_delete(vm_name, snap, "--metadata")
                if os.path.exists("/tmp/testvm_sp1"):
                    os.remove("/tmp/testvm_sp1")
        # Clear guest os
        if test_suite:
            session = vm.wait_for_login()
            logging.info("Removing dir /root/linux-*")
            output = session.cmd_output("rm -rf /root/linux-*")
            logging.debug("Command output:\n %s", output)
            session.close()
        if vm_operate == "create":
            vm.define(vm_xml.xml)
        vm_xml_backup.sync(options="--nvram --managed-save")
        # Remove swtpm log file in case of impact on later runs
        if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name):
            os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm.name)
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, ignore_status=True, debug=True)
        if vm2:
            if len(vm_names) > 1:
                vm2_xml_backup.sync(options="--nvram")
            else:
                virsh.remove_domain(vm2_name, "--nvram --remove-all-storage", debug=True)
            if os.path.exists("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name):
                os.remove("/var/log/swtpm/libvirt/qemu/%s-swtpm.log" % vm2.name)
예제 #29
0
def run(test, params, env):
    """
    Test command: virsh pool-define;pool-start;vol-list pool;
    attach-device LUN to guest; mount the device, dd; unmount;
    reboot guest; mount the device, dd again; pool-destroy; pool-undefine;

    Create a libvirt npiv pool from an XML file. The test needs to have a wwpn
    and wwnn of a vhba in host which is zoned & mapped to a SAN controller.

    Pre-requiste:
    Host needs to have a wwpn and wwnn of a vHBA which is zoned and mapped to
    SAN controller.
    """
    pool_xml_f = params.get("pool_create_xml_file", "/PATH/TO/POOL.XML")
    pool_name = params.get("pool_create_name", "virt_test_pool_tmp")
    pre_def_pool = "yes" == params.get("pre_def_pool", "no")
    pool_type = params.get("pool_type", "dir")
    source_format = params.get("pool_src_format", "")
    source_name = params.get("pool_source_name", "")
    source_path = params.get("pool_source_path", "/")
    pool_target = params.get("pool_target", "pool_target")
    pool_adapter_type = params.get("pool_adapter_type", "")
    pool_adapter_parent = params.get("pool_adapter_parent", "")
    target_device = params.get("pool_target_device", "sdc")
    pool_wwnn = params.get("pool_wwnn", "WWNN_EXAMPLE")
    pool_wwpn = params.get("pool_wwpn", "WWPN_EXAMPLE")
    test_unit = None
    mount_disk = None

    if 'EXAMPLE' in pool_wwnn or 'EXAMPLE' in pool_wwpn:
        raise exceptions.TestSkipError("Please provide proper WWPN/WWNN")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if not vm.is_alive():
        vm.start()
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    libvirt_vm = lib_vm.VM(vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    pool_ins = libvirt_storage.StoragePool()
    if pre_def_pool and pool_ins.pool_exists(pool_name):
        raise exceptions.TestFail("Pool %s already exist" % pool_name)
    online_hbas_list = nodedev.find_hbas("hba")
    logging.debug("The online hbas are: %s", online_hbas_list)

    # if no online hba cards on host test fails
    if not online_hbas_list:
        raise exceptions.TestSkipError("Host doesn't have online hba cards")
    else:
        if pool_adapter_parent == "":
            pool_adapter_parent = online_hbas_list[0]

    kwargs = {'source_path': source_path,
              'source_name': source_name,
              'source_format': source_format,
              'pool_adapter_type': pool_adapter_type,
              'pool_adapter_parent': pool_adapter_parent,
              'pool_wwnn': pool_wwnn,
              'pool_wwpn': pool_wwpn}

    pvt = utlv.PoolVolumeTest(test, params)
    emulated_image = "emulated-image"
    old_vhbas = nodedev.find_hbas("vhba")
    try:
        pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image,
                     **kwargs)
        utils_misc.wait_for(
            lambda: nodedev.is_vhbas_added(old_vhbas), _DELAY_TIME)
        virsh.pool_dumpxml(pool_name, to_file=pool_xml_f)
        virsh.pool_destroy(pool_name)
    except Exception as e:
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        raise exceptions.TestError(
            "Error occurred when prepare pool xml:\n %s" % e)
    if os.path.exists(pool_xml_f):
        with open(pool_xml_f, 'r') as f:
            logging.debug("Create pool from file:\n %s", f.read())

    try:
        cmd_result = virsh.pool_define(pool_xml_f, ignore_status=True,
                                       debug=True)
        utlv.check_exit_status(cmd_result)

        cmd_result = virsh.pool_start(pool_name)
        utlv.check_exit_status(cmd_result)
        utlv.check_actived_pool(pool_name)
        pool_detail = libvirt_xml.PoolXML.get_pool_details(pool_name)
        logging.debug("Pool detail: %s", pool_detail)

        vol_list = utlv.get_vol_list(pool_name, timeout=10)
        test_unit = list(vol_list.keys())[0]
        logging.info(
            "Using the first LUN unit %s to attach to a guest", test_unit)

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        old_count = vmxml.get_disk_count(vm_name)
        bf_disks = libvirt_vm.get_disks()
        disk_params = {'type_name': 'volume', 'target_dev': target_device,
                       'target_bus': 'virtio', 'source_pool': pool_name,
                       'source_volume': test_unit, 'driver_type': 'raw'}
        disk_xml = os.path.join(data_dir.get_tmp_dir(), 'disk_xml.xml')
        lun_disk_xml = utlv.create_disk_xml(disk_params)

        copyfile(lun_disk_xml, disk_xml)
        attach_success = virsh.attach_device(
            vm_name, disk_xml, debug=True)

        utlv.check_exit_status(attach_success)

        virsh.reboot(vm_name, debug=True)

        logging.info("Checking disk availability in domain")
        if not vmxml.get_disk_count(vm_name):
            raise exceptions.TestFail("No disk in domain %s." % vm_name)
        new_count = vmxml.get_disk_count(vm_name)

        if new_count <= old_count:
            raise exceptions.TestFail(
                "Failed to attach disk %s" % lun_disk_xml)

        session = vm.wait_for_login()
        output = session.cmd_status_output('lsblk')
        logging.debug("%s", output[1])
        logging.debug("Disks before attach: %s", bf_disks)

        af_disks = libvirt_vm.get_disks()
        logging.debug("Disks after attach: %s", af_disks)

        mount_disk = "".join(list(set(bf_disks) ^ set(af_disks)))
        if not mount_disk:
            raise exceptions.TestFail("Can not get attached device in vm.")
        logging.debug("Attached device in vm:%s", mount_disk)

        logging.debug("Creating file system for %s", mount_disk)
        output = session.cmd_status_output(
            'echo yes | mkfs.ext4 %s' % mount_disk)
        logging.debug("%s", output[1])
        if mount_disk:
            mount_success = mount_and_dd(session, mount_disk)
            if not mount_success:
                raise exceptions.TestFail("Mount failed")
        else:
            raise exceptions.TestFail("Partition not available for disk")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)

        virsh.reboot(vm_name, debug=True)

        session = vm.wait_for_login()
        output = session.cmd_status_output('mount')
        logging.debug("%s", output[1])
        mount_success = mount_and_dd(session, mount_disk)
        if not mount_success:
            raise exceptions.TestFail("Mount failed")

        logging.debug("Unmounting disk")
        session.cmd_status_output('umount %s' % mount_disk)
        session.close()

        detach_status = virsh.detach_device(vm_name, disk_xml,
                                            debug=True)
        utlv.check_exit_status(detach_status)

    finally:
        vm.destroy(gracefully=False)
        vmxml_backup.sync()
        logging.debug('Destroying pool %s', pool_name)
        virsh.pool_destroy(pool_name)
        logging.debug('Undefining pool %s', pool_name)
        virsh.pool_undefine(pool_name)
        pvt.cleanup_pool(pool_name, pool_type, pool_target,
                         emulated_image, **kwargs)
        if os.path.exists(pool_xml_f):
            os.remove(pool_xml_f)
        if os.path.exists(disk_xml):
            logging.debug("Cleanup disk xml")
            data_dir.clean_tmp_files()
예제 #30
0
def run(test, params, env):
    """
    Test multi function of vm devices.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # To avoid dirty after starting new vm
    if vm.is_alive():
        vm.destroy()
    new_vm_name = "mq_new_%s" % vm_name
    utlv.define_new_vm(vm_name, new_vm_name)
    # Create a new vm object for convenience
    new_vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

    host_session = None
    client_sessions = []
    try:
        # Config new vm for multiqueue
        try:
            prepare_vm_queue(test, new_vm, params)
        except Exception:
            if int(params.get("queue_count")) > 8:
                params["queue_count"] = 8
                prepare_vm_queue(test, new_vm, params)

        # Start checking
        vhost_pids = get_vhost_pids(test, new_vm)
        logging.debug("vhosts: %s", vhost_pids)
        if len(vhost_pids) != int(params.get("queue_count")):
            test.fail("Vhost count is not matched with queue.")

        affinity_cpu_number = params.get("affinity_cpu_number", '1')
        # Here, cpu affinity should be in this format:
        # 0001 means CPU0, 0010 means CPU1...
        affinity_format = {'1': 2, '2': 4, '3': 8}
        if params.get("application") == "affinity":
            affinity_cpu = affinity_format[affinity_cpu_number]
            set_cpu_affinity(vm, affinity_cpu)

        # Run iperf
        # Use iperf to make interface busy, otherwise we may not get
        # expected results
        host_session, client_sessions = start_iperf(test, vm, params)
        # Wait for cpu consumed
        time.sleep(10)

        # Top to get vhost state or get cpu affinity
        if params.get("application") == "iperf":
            top_vhost(test, vhost_pids, params.get("vcpu_count", 1))
        elif params.get("application") == "affinity":
            check_cpu_affinity(test, vm, affinity_cpu_number)
    finally:
        if host_session:
            host_session.close()
        for client_session in client_sessions:
            client_session.close()
        if new_vm.is_alive():
            new_vm.destroy()
        new_vm.undefine()