Beispiel #1
0
def attach_additional_device(vm_name, disksize, targetdev, params):
    """
    Create a disk with disksize, then attach it to given vm.

    @param vm: Libvirt VM name.
    @param disksize: size of attached disk
    @param targetdev: target of disk device
    """
    logging.info("Attaching disk...")
    disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev)
    cmd = "qemu-img create %s %s" % (disk_path, disksize)
    status, output = commands.getstatusoutput(cmd)
    if status:
        return (False, output)

    # Update params for source file
    params['source_file'] = disk_path
    params['target_dev'] = targetdev

    # Create a file of device
    xmlfile = create_disk_xml(params)

    # To confirm attached device do not exist.
    virsh.detach_disk(vm_name, targetdev, extra="--config")

    return virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
                               flagstr="--config", debug=True)
    def attach_disk_test():
        """
        Attach-disk testcase.
        1.Attch a disk to guest.
        2.Perform domblkinfo operation.
        3.Detach the disk.

        :return: Command status and output.
        """
        try:
            source_file = open(test_disk_source, 'wb')
            source_file.seek((512 * 1024 * 1024) - 1)
            source_file.write(str(0))
            source_file.close()
            virsh.attach_disk(vm_name, test_disk_source, front_dev, debug=True)
            vm_ref = vm_name
            result_source = virsh.domblkinfo(vm_ref, test_disk_source,
                                             ignore_status=True, debug=True)
            status_source = result_source.exit_status
            output_source = result_source.stdout.strip()
            if driver == "qemu":
                result_target = virsh.domblkinfo(vm_ref, front_dev,
                                                 ignore_status=True, debug=True)
                status_target = result_target.exit_status
                output_target = result_target.stdout.strip()
            else:
                status_target = 0
                output_target = "Xen doesn't support domblkinfo target!"
            virsh.detach_disk(vm_name, front_dev, debug=True)
            return status_target, output_target, status_source, output_source
        except (error.CmdError, IOError):
            return 1, "", 1, ""
Beispiel #3
0
 def modify_source(vm_name, target, dst_image):
     """
     Modify domain's configuration to change its disk source
     """
     try:
         virsh.detach_disk(vm_name, target, extra="--config",
                           ignore_status=False)
         virsh.attach_disk(vm_name, dst_image, target, extra="--config",
                           ignore_status=False)
     except (remote.LoginError, virt_vm.VMError, aexpect.ShellError), detail:
         raise error.TestFail("Modify guest source failed: %s" % detail)
Beispiel #4
0
 def modify_source(vm_name, target, dst_image):
     """
     Modify domain's configuration to change its disk source
     """
     try:
         virsh.detach_disk(vm_name, target, extra="--config",
                           ignore_status=False)
         dst_image_format = utils_test.get_image_info(dst_image)['format']
         options = "--config --subdriver %s" % dst_image_format
         virsh.attach_disk(vm_name, dst_image, target, extra=options,
                           ignore_status=False)
     except (remote.LoginError, virt_vm.VMError,
             aexpect.ShellError), detail:
         raise error.TestFail("Modify guest source failed: %s" % detail)
def attach_additional_disk(vm, disksize, targetdev):
    """
    Create a disk with disksize, then attach it to given vm.

    @param vm: Libvirt VM object.
    @param disksize: size of attached disk
    @param targetdev: target of disk device
    """
    logging.info("Attaching disk...")
    disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev)
    cmd = "qemu-img create %s %s" % (disk_path, disksize)
    status, output = commands.getstatusoutput(cmd)
    if status:
        return (False, output)

    # To confirm attached device do not exist.
    virsh.detach_disk(vm.name, targetdev, extra="--config")

    attach_result = virsh.attach_disk(vm.name, disk_path, targetdev, extra="--config", debug=True)
    if attach_result.exit_status:
        return (False, attach_result)
    return (True, disk_path)
Beispiel #6
0
def attach_additional_device(vm_name, targetdev, disk_path, params):
    """
    Create a disk with disksize, then attach it to given vm.

    :param vm_name: Libvirt VM name.
    :param disk_path: path of attached disk
    :param targetdev: target of disk device
    :param params: dict include necessary configurations of device
    """
    logging.info("Attaching disk...")

    # Update params for source file
    params['source_file'] = disk_path
    params['target_dev'] = targetdev

    # Create a file of device
    xmlfile = create_disk_xml(params)

    # To confirm attached device do not exist.
    virsh.detach_disk(vm_name, targetdev, extra="--config")

    return virsh.attach_device(domain_opt=vm_name, file_opt=xmlfile,
                               flagstr="--config", debug=True)
def run_virsh_snapshot_disk(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))

    # Get a tmp_dir.
    tmp_dir = test.tmpdir
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    virsh.attach_disk(vm_name, source=img_path, target="vdf", extra="--persistent --subdriver %s" % image_format)

    # Init snapshot_name
    snapshot_name = None
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n",
                     "<memory snapshot=\'internal\'/>\n",
                     "</domainsnapshot>"]
            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
        else:
            snapshot_result = virsh.snapshot_create(vm_name)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+", snapshot_result.stdout.strip()).group(0)


        # Touch a file in VM.
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name)
        if revert_result.exit_status:
            raise error.TestFail("Revert snapshot failed. %s" % revert_result.stderr.strip())

        if not vm.is_alive():
            raise error.TestFail("Revert snapshot failed.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name)
def run(test, params, env):
    """
    Test command: virsh domblklist.
    1.Prepare test environment.
    2.Run domblklist and check
    3.Do attach disk and rerun domblklist with check
    4.Clean test environment.
    """

    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref, options,
                                  ignore_status=True, debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s"
                          % output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" % disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s"
                          % output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblklist_vm_ref")
    options = params.get("domblklist_options", "")
    info_options = params.get("info_options", "")
    status_error = params.get("status_error", "no")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")
    domblkinfo = params.get("domblkinfo", "no")
    extra = ""

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vm_state = vm.state()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # run domblklist and check
    domblklist_test()

    # Test domblkinfo function as well
    if domblkinfo == "yes":
        ret = virsh.domblklist(vm_ref, options,
                               ignore_status=True, debug=True)
        target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout)
        if info_options == "":
            check_list = ["Capacity", "Allocation", "Physical"]
            ret2 = virsh.domblkinfo(vm_ref, target_disks[0])
        elif info_options == "--human":
            check_list = ["Capacity", "Allocation", "Physical", "GiB"]
            cmd = "virsh domblkinfo %s %s %s" % (vm_ref, target_disks[0], info_options)
            ret2 = process.run(cmd, shell=True, ignore_status=True)
        for check in check_list:
            if not re.search(check, results_stdout_52lts(ret2)):
                test.fail("Cmd domblkinfo run failed")

    if status_error == "no":
        try:
            # attach disk and check
            with open(test_attach_disk, 'wb') as source_file:
                source_file.seek((512 * 1024 * 1024) - 1)
                source_file.write(str(0).encode())
            # since bug 1049529, --config will work with detach when
            # domain is running, so change it back using --config here
            if "--inactive" in options or vm_state == "shut off":
                extra = "--config"
            virsh.attach_disk(vm_name, test_attach_disk, front_dev, extra,
                              debug=True)
            domblklist_test()
        finally:
            virsh.detach_disk(vm_name, front_dev, extra, debug=True)
            if os.path.exists(test_attach_disk):
                os.remove(test_attach_disk)
def run(test, params, env):
    """
    Test migration of multi vms.
    """
    vm_names = params.get("migrate_vms").split()
    if len(vm_names) < 2:
        raise exceptions.TestSkipError("No multi vms provided.")

    # Prepare parameters
    method = params.get("virsh_migrate_method")
    jobabort = "yes" == params.get("virsh_migrate_jobabort", "no")
    options = params.get("virsh_migrate_options", "")
    status_error = "yes" == params.get("status_error", "no")
    remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM")
    local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM")
    host_user = params.get("host_user", "root")
    host_passwd = params.get("host_password", "PASSWORD")
    nfs_shared_disk = params.get("nfs_shared_disk", True)
    migration_type = params.get("virsh_migration_type", "simultaneous")
    migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900))
    migration_time = int(params.get("virsh_migrate_timeout", 60))

    # Params for NFS and SSH setup
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_user"] = "******"
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_user"] = "******"
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")
    desturi = libvirt_vm.get_uri_with_transport(transport="ssh",
                                                dest_ip=remote_host)
    srcuri = libvirt_vm.get_uri_with_transport(transport="ssh",
                                               dest_ip=local_host)

    # Don't allow the defaults.
    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri)
    if desturi.count('///') or desturi.count('EXAMPLE'):
        raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi)

    # Config ssh autologin for remote host
    ssh_key.setup_ssh_key(remote_host, host_user, host_passwd, port=22)

    # Prepare local session and remote session
    localrunner = remote.RemoteRunner(host=remote_host, username=host_user,
                                      password=host_passwd)
    remoterunner = remote.RemoteRunner(host=remote_host, username=host_user,
                                       password=host_passwd)
    # Configure NFS in remote host
    if nfs_shared_disk:
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

    # Prepare MigrationHelper instance
    vms = []
    for vm_name in vm_names:
        vm = env.get_vm(vm_name)
        vms.append(vm)

    try:
        option = make_migration_options(method, options, migration_time)

        # make sure cache=none
        if "unsafe" not in options:
            device_target = params.get("virsh_device_target", "sda")
            for vm in vms:
                if vm.is_alive():
                    vm.destroy()
            for each_vm in vm_names:
                logging.info("configure cache=none")
                vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm)
                device_source = str(vmxml.get_disk_attr(each_vm, device_target,
                                                        'source', 'file'))
                ret_detach = virsh.detach_disk(each_vm, device_target,
                                               "--config")
                status = ret_detach.exit_status
                output = ret_detach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_detach:
                    raise exceptions.TestError("Detach disks fails")

                subdriver = utils_test.get_image_info(device_source)['format']
                ret_attach = virsh.attach_disk(each_vm, device_source,
                                               device_target, "--driver qemu "
                                               "--config --cache none "
                                               "--subdriver %s" % subdriver)
                status = ret_attach.exit_status
                output = ret_attach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_attach:
                    raise exceptions.TestError("Attach disks fails")

        for vm in vms:
            if vm.is_dead():
                vm.start()
                vm.wait_for_login()
        multi_migration(vms, srcuri, desturi, option, migration_type,
                        migrate_timeout, jobabort, lrunner=localrunner,
                        rrunner=remoterunner)
    except Exception, info:
        logging.error("Test failed: %s" % info)
        flag_migration = False
Beispiel #10
0
    # Get expected cache state for test
    attach_scsi_disk = "yes" == params.get("attach_scsi_disk", "no")
    disk_cache = params.get("virsh_migrate_disk_cache", "none")
    unsafe_test = False
    if options.count("unsafe") and disk_cache != "none":
        unsafe_test = True

    exception = False
    try:
        # Change the disk of the vm to shared disk
        if vm.is_alive():
            vm.destroy(gracefully=False)

        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm_name, device, "--config", debug=True)
            if not s_detach:
                logging.error("Detach vda failed before test.")

        subdriver = utils_test.get_image_info(shared_storage)['format']
        extra_attach = ("--config --driver qemu --subdriver %s --cache %s"
                        % (subdriver, disk_cache))
        s_attach = virsh.attach_disk(vm_name, shared_storage, "vda",
                                     extra_attach, debug=True)
        if s_attach.exit_status != 0:
            logging.error("Attach vda failed before test.")

        # Attach a scsi device for special testcases
        if attach_scsi_disk:
            shared_dir = os.path.dirname(shared_storage)
            scsi_disk = "%s/scsi_test.img" % shared_dir
def run(test, params, env):
    """
    Test storage pool and volumes with applications such as:
    install vms, attached to vms...
    """
    pool_type = params.get("pool_type")
    pool_name = "test_%s_app" % pool_type
    pool_target = params.get("pool_target")
    emulated_img = params.get("emulated_image", "emulated-image")
    volume_count = int(params.get("volume_count", 1))
    volume_size = params.get("volume_size", "1G")
    emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1)
    application = params.get("application", "install")
    disk_target = params.get("disk_target", "vdb")
    test_message = params.get("test_message", "")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    block_device = params.get("block_device", "/DEV/EXAMPLE")
    if application == "install":
        cdrom_path = os.path.join(data_dir.get_data_dir(),
                                  params.get("cdrom_cd1"))
        if not os.path.exists(cdrom_path):
            raise error.TestNAError("Can't find installation cdrom:%s"
                                    % cdrom_path)
        # Get a nonexist domain name
        vm_name = "vol_install_test"

    try:
        pvtest = utlv.PoolVolumeTest(test, params)
        pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                        image_size=emulated_size, pre_disk_vol=[volume_size],
                        device_name=block_device)

        logging.debug("Current pools:\n%s",
                      libvirt_storage.StoragePool().list_pools())

        new_pool = libvirt_storage.PoolVolume(pool_name)
        if pool_type == "disk":
            volumes = new_pool.list_volumes()
            logging.debug("Current volumes:%s", volumes)
        else:
            volumes = create_volumes(new_pool, volume_count, volume_size)
        if application == "attach":
            vm = env.get_vm(vm_name)
            session = vm.wait_for_login()
            virsh.attach_disk(vm_name, volumes.values()[volume_count - 1],
                              disk_target)
            vm_attach_device = "/dev/%s" % disk_target
            if session.cmd_status("which parted"):
                # No parted command, check device only
                if session.cmd_status("ls %s" % vm_attach_device):
                    raise error.TestFail("Didn't find attached device:%s"
                                         % vm_attach_device)
                return
            # Test if attached disk can be used normally
            utlv.mk_part(vm_attach_device, session=session)
            session.cmd("mkfs.ext4 %s1" % vm_attach_device)
            session.cmd("mount %s1 /mnt" % vm_attach_device)
            session.cmd("echo %s > /mnt/test" % test_message)
            output = session.cmd_output("cat /mnt/test").strip()
            if output != test_message:
                raise error.TestFail("%s cannot be used normally!"
                                     % vm_attach_device)
        elif application == "install":
            # Get a nonexist domain name anyway
            while virsh.domain_exists(vm_name):
                vm_name += "_test"
            # Prepare installation parameters
            params["main_vm"] = vm_name
            vm = env.create_vm("libvirt", None, vm_name, params,
                               test.bindir)
            env.register_vm(vm_name, vm)
            params["image_name"] = volumes.values()[volume_count - 1]
            params["image_format"] = "raw"
            params['force_create_image'] = "yes"
            params['remove_image'] = "yes"
            params['shutdown_cleanly'] = "yes"
            params['shutdown_cleanly_timeout'] = 120
            params['guest_port_unattended_install'] = 12323
            params['inactivity_watcher'] = "error"
            params['inactivity_treshold'] = 1800
            params['image_verify_bootable'] = "no"
            params['unattended_delivery_method'] = "cdrom"
            params['drive_index_unattended'] = 1
            params['drive_index_cd1'] = 2
            params['boot_once'] = "d"
            params['medium'] = "cdrom"
            params['wait_no_ack'] = "yes"
            params['image_raw_device'] = "yes"
            params['backup_image_before_testing'] = "no"
            params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                       "console=ttyS0,115200 console=tty0")
            params['cdroms'] = "unattended cd1"
            params['redirs'] += " unattended_install"
            selinux_mode = None
            try:
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")
                try:
                    unattended_install.run(test, params, env)
                except process.CmdError, detail:
                    raise error.TestFail("Guest install failed:%s" % detail)
            finally:
                if selinux_mode is not None:
                    utils_selinux.set_status(selinux_mode)
                env.unregister_vm(vm_name)
    finally:
        try:
            if application == "install":
                if virsh.domain_exists(vm_name):
                    virsh.remove_domain(vm_name)
            elif application == "attach":
                virsh.detach_disk(vm_name, disk_target)
        finally:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img,
                                device_name=block_device)
Beispiel #12
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        tmpdir = data_dir.get_tmp_dir()
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)
        print dom.name
        try:
            for event in events_list:
                if event in ['start', 'restore']:
                    if dom.is_alive():
                        dom.destroy()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                if event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "detach-disk":
                    if not os.path.exists(new_disk):
                        open(new_disk, 'a').close()
                    # Attach disk firstly, this event will not be catched
                    virsh.attach_disk(dom.name, new_disk, 'vdb', **virsh_dargs)
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                else:
                    raise error.TestError("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Beispiel #13
0
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    tmp_dir = data_dir.get_tmp_dir()
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s", pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        # Do the attach action.
        out = utils.run("qemu-img info %s" % img_path)
        logging.debug("The img info is:\n%s" % out.stdout.strip())
        result = virsh.attach_disk(vm_name,
                                   source=img_path,
                                   target="vdf",
                                   extra=extra,
                                   debug=True)
        if result.exit_status:
            raise error.TestNAError("Failed to attach disk %s to VM."
                                    "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = [
                "<domainsnapshot>\n",
                "<name>%s</name>\n" % snapshot_name,
                "<description>Snapshot Test</description>\n"
            ]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    snap_path = "%s.snap" % os.path.basename(disk['source'])
                    disk_external = os.path.join(tmp_dir, snap_path)
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            logging.debug("The xml content for snapshot create is:")
            with open(snapshot_xml_path, 'r') as fin:
                logging.debug(fin.read())

            options += " --xmlfile %s " % snapshot_xml_path
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this QEMU binary",
                            out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = [
                    "<domainsnapshot>\n",
                    "<description>Snapshot Test</description>\n",
                    "<state>running</state>\n",
                    "<creationTime>%s</creationTime>" % snapshot_name,
                    "</domainsnapshot>"
                ]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                logging.debug("The xml content for snapshot create is:")
                with open(snapshot_xml_path, 'r') as fin:
                    logging.debug(fin.read())
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # Destroy vm for snapshot revert.
        if not libvirt_version.version_compare(1, 2, 3):
            virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name,
                                              snapshot_name,
                                              revert_options,
                                              debug=True)
        if revert_result.exit_status:
            # As commit d410e6f for libvirt 1.2.3, attempts to revert external
            # snapshots will FAIL with an error "revert to external snapshot
            # not supported yet". Thus, let's check for that and handle as a
            # SKIP for now. Check bug:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
            if libvirt_version.version_compare(1, 2, 3):
                if re.search("revert to external snapshot not supported yet",
                             revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
            else:
                raise error.TestFail("Revert snapshot failed. %s" %
                                     revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused"
                                     "  option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("After revert cat file output='%s'", output)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail(
                                "Snapshot xml file %s missing" % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still" %
                                                 snap_xml_path + " exist")

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except error.TestFail, detail:
                logging.error(str(detail))
Beispiel #14
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'define', 'undefine'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
Beispiel #15
0
    # Create virtual device file.
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            raise error.TestNAError("Can not get iscsi device name in host")
    else:
        create_device_file(device_source)

    if vm.is_alive():
        vm.destroy(gracefully=False)

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        virsh.detach_disk(vm_name,
                          device_target,
                          "--config",
                          ignore_status=True)

    device_xml = create_device_xml(params, test.virtdir, device_source)
    if not no_attach:
        s_attach = virsh.attach_device(vm_name, device_xml,
                                       flagstr="--config").exit_status
        if s_attach != 0:
            logging.error("Attach device failed before testing detach-device")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if device in ['disk', 'cdrom']:
        if not acpiphp_module_modprobe(vm, os_type):
Beispiel #16
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()

        try:
            for event in events_list:
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    ifaces = vmxml.devices.by_device_tag('interface')
                    if ifaces:
                        iface_xml_obj = ifaces[0]
                        iface_xml_obj.del_address()
                        logging.debug(iface_xml_obj)
                    else:
                        test.error('No interface in vm to be detached.')

                    virsh.detach_device(dom.name,
                                        iface_xml_obj.xml,
                                        wait_remove_event=True,
                                        event_timeout=60,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, "''", target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")

                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     "--driver qemu --config").exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")

        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
Beispiel #18
0
def run(test, params, env):
    """
    Test storage pool and volumes with applications such as:
    install vms, attached to vms...
    """
    pool_type = params.get("pool_type")
    pool_name = "test_%s_app" % pool_type
    pool_target = params.get("pool_target")
    emulated_img = params.get("emulated_image", "emulated-image")
    volume_count = int(params.get("volume_count", 1))
    volume_size = params.get("volume_size", "1G")
    emulated_size = "%sG" % (volume_count * int(volume_size[:-1]) + 1)
    application = params.get("application", "install")
    disk_target = params.get("disk_target", "vdb")
    test_message = params.get("test_message", "")
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    block_device = params.get("block_device", "/DEV/EXAMPLE")
    if application == "install":
        cdrom_path = os.path.join(data_dir.get_data_dir(),
                                  params.get("cdrom_cd1"))
        if not os.path.exists(cdrom_path):
            raise error.TestNAError("Can't find installation cdrom:%s" %
                                    cdrom_path)
        # Get a nonexist domain name
        vm_name = "vol_install_test"

    try:
        pvtest = utlv.PoolVolumeTest(test, params)
        pvtest.pre_pool(pool_name,
                        pool_type,
                        pool_target,
                        emulated_img,
                        image_size=emulated_size,
                        pre_disk_vol=[volume_size],
                        device_name=block_device)

        logging.debug("Current pools:\n%s",
                      libvirt_storage.StoragePool().list_pools())

        new_pool = libvirt_storage.PoolVolume(pool_name)
        if pool_type == "disk":
            volumes = new_pool.list_volumes()
            logging.debug("Current volumes:%s", volumes)
        else:
            volumes = create_volumes(new_pool, volume_count, volume_size)
        if application == "attach":
            vm = env.get_vm(vm_name)
            session = vm.wait_for_login()
            virsh.attach_disk(vm_name,
                              volumes.values()[volume_count - 1],
                              disk_target,
                              extra="--subdriver raw")
            vm_attach_device = "/dev/%s" % disk_target
            if session.cmd_status("which parted"):
                # No parted command, check device only
                if session.cmd_status("ls %s" % vm_attach_device):
                    raise error.TestFail("Didn't find attached device:%s" %
                                         vm_attach_device)
                return
            # Test if attached disk can be used normally
            time.sleep(10)  # Need seconds for the new disk to be recognized
            utlv.mk_part(vm_attach_device, session=session)
            session.cmd("mkfs.ext4 %s1" % vm_attach_device)
            session.cmd("mount %s1 /mnt" % vm_attach_device)
            session.cmd("echo %s > /mnt/test" % test_message)
            output = session.cmd_output("cat /mnt/test").strip()
            if output != test_message:
                raise error.TestFail("%s cannot be used normally!" %
                                     vm_attach_device)
        elif application == "install":
            # Get a nonexist domain name anyway
            while virsh.domain_exists(vm_name):
                vm_name += "_test"
            # Prepare installation parameters
            params["main_vm"] = vm_name
            vm = env.create_vm("libvirt", None, vm_name, params, test.bindir)
            env.register_vm(vm_name, vm)
            params["image_name"] = volumes.values()[volume_count - 1]
            params["image_format"] = "raw"
            params['force_create_image'] = "yes"
            params['remove_image'] = "yes"
            params['shutdown_cleanly'] = "yes"
            params['shutdown_cleanly_timeout'] = 120
            params['guest_port_unattended_install'] = 12323
            params['inactivity_watcher'] = "error"
            params['inactivity_treshold'] = 1800
            params['image_verify_bootable'] = "no"
            params['unattended_delivery_method'] = "cdrom"
            params['drive_index_unattended'] = 1
            params['drive_index_cd1'] = 2
            params['boot_once'] = "d"
            params['medium'] = "cdrom"
            params['wait_no_ack'] = "yes"
            params['image_raw_device'] = "yes"
            params['backup_image_before_testing'] = "no"
            params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                       "console=ttyS0,115200 console=tty0")
            params['cdroms'] = "unattended cd1"
            params['redirs'] += " unattended_install"
            selinux_mode = None
            try:
                selinux_mode = utils_selinux.get_status()
                utils_selinux.set_status("permissive")
                try:
                    unattended_install.run(test, params, env)
                except process.CmdError, detail:
                    raise error.TestFail("Guest install failed:%s" % detail)
            finally:
                if selinux_mode is not None:
                    utils_selinux.set_status(selinux_mode)
                env.unregister_vm(vm_name)
    finally:
        try:
            if application == "install":
                if virsh.domain_exists(vm_name):
                    virsh.remove_domain(vm_name)
            elif application == "attach":
                virsh.detach_disk(vm_name, disk_target)
        finally:
            pvtest.cleanup_pool(pool_name,
                                pool_type,
                                pool_target,
                                emulated_img,
                                device_name=block_device)
Beispiel #19
0
def run(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {
        'type': sec_type,
        'model': sec_model,
        'label': sec_label,
        'relabel': sec_relabel
    }
    # Get variables about pool vol
    with_pool_vol = 'yes' == params.get("with_pool_vol", "no")
    check_cap_rawio = "yes" == params.get("check_cap_rawio", "no")
    virt_use_nfs = params.get("virt_use_nfs", "off")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    enable_namespace = 'yes' == params.get('enable_namespace', 'no')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in list(disks.values()):
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)

    pvt = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        if check_cap_rawio:
            qemu_conf.user = '******'
            qemu_conf.group = 'root'
            logging.debug("the qemu.conf content is: %s" % qemu_conf)
            libvirtd.restart()

        # Set the context of the VM.
        vmxml.set_seclabel([sec_dict])
        vmxml.sync()
        logging.debug("the domain xml is: %s" % vmxml.xmltreefile)

        if with_pool_vol:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            logging.debug("pool_type %s" % pool_type)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                vol_arg = {
                    'name': vol_name,
                    'format': vol_format,
                    'capacity': 1073741824,
                    'allocation': 1048576,
                }
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["iscsi", "disk"]:
                extra = "--driver qemu --targetbus scsi --persistent"
                if pool_type == "iscsi":
                    extra = extra + " --type lun --rawio"
                else:
                    if not enable_namespace:
                        qemu_conf.namespaces = ''
                        logging.debug("the qemu.conf content is: %s" %
                                      qemu_conf)
                        libvirtd.restart()
                    extra = extra + " --type disk"
            else:
                extra = "--persistent --subdriver qcow2"

            # set host_sestatus as nfs pool will reset it
            utils_selinux.set_status(host_sestatus)
            # set virt_use_nfs
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                                 shell=True)
            if result.exit_status:
                test.cancel("Failed to set virt_use_nfs value")
        else:
            # Init a QemuImg instance.
            params['image_name'] = img_name
            tmp_dir = data_dir.get_tmp_dir()
            image = qemu_storage.QemuImg(params, tmp_dir, img_name)
            # Create a image.
            img_path, result = image.create(params)
            # Set the context of the image.
            utils_selinux.set_context_of_file(filename=img_path,
                                              context=img_label)
            extra = "--persistent"

        # Do the attach action.
        result = virsh.attach_disk(vm_name,
                                   source=img_path,
                                   target="vdf",
                                   extra=extra,
                                   debug=True)
        logging.debug(VMXML.new_from_inactive_dumpxml(vm_name))
        if result.exit_status:
            test.fail("Failed to attach disk %s to VM."
                      "Detail: %s." % (img_path, result.stderr))

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                test.fail('Test succeeded in negative case.')

            if check_cap_rawio:
                cap_list = ['CapPrm', 'CapEff', 'CapBnd']
                cap_dict = {}
                pid = vm.get_pid()
                pid_status_path = "/proc/%s/status" % pid
                with open(pid_status_path) as f:
                    for line in f:
                        val_list = line.split(":")
                        if val_list[0] in cap_list:
                            cap_dict[val_list[0]] = int(
                                val_list[1].strip(), 16)

                # bit and with rawio capabilitiy value to check cap_sys_rawio
                # is set
                cap_rawio_val = 0x0000000000020000
                for i in cap_list:
                    if not cap_rawio_val & cap_dict[i]:
                        err_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        err_msg += " lack cap_sys_rawio capabilities"
                        test.fail(err_msg)
                    else:
                        inf_msg = "vm process with %s: 0x%x" % (i, cap_dict[i])
                        inf_msg += " have cap_sys_rawio capabilities"
                        logging.debug(inf_msg)
            if pool_type == "disk":
                if libvirt_version.version_compare(3, 1,
                                                   0) and enable_namespace:
                    vm_pid = vm.get_pid()
                    output = process.system_output(
                        "nsenter -t %d -m -- ls -lZ %s" % (vm_pid, img_path))
                else:
                    output = process.system_output('ls -lZ %s' % img_path)
                logging.debug("The default label is %s", default_label)
                logging.debug("The label after guest started is %s",
                              to_text(output.strip().split()[-2]))
                if default_label not in to_text(output.strip().split()[-2]):
                    test.fail("The label is wrong after guest started\n")
        except virt_vm.VMStartError as e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                test.fail("Test failed in positive case." "error: %s" % e)

        try:
            virsh.detach_disk(vm_name,
                              target="vdf",
                              extra="--persistent",
                              debug=True)
        except process.CmdError:
            test.fail("Detach disk 'vdf' from VM %s failed." % vm.name)
    finally:
        # clean up
        vm.destroy()
        if not with_pool_vol:
            image.remove()
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except exceptions.TestFail as detail:
                logging.error(str(detail))
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
        if check_cap_rawio:
            qemu_conf.restore()
            libvirtd.restart()
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in list(params.keys()):
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi",
                           "disk", "gluster"]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            test.cancel("%s not in support list %s" %
                        (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            test.cancel("QED support changed, check bug: "
                        "https://bugzilla.redhat.com/show_bug.cgi"
                        "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            test.cancel("Snapshot on glusterfs not support in "
                        "current version. Check more info with "
                        "https://bugzilla.redhat.com/buglist.cgi?"
                        "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = list(pv.list_volumes().keys())
                if vols:
                    vol_name = vols[0]
                else:
                    test.cancel("No volume in pool: %s" % pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    test.cancel("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                test.cancel("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = process.run(cmd, ignore_status=True, shell=True)
                    if cmd_result.exit_status:
                        test.cancel("Failed to format volume, %s" %
                                    cmd_result.stdout_text.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = process.run("qemu-img info %s" % img_path, shell=True)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                       extra=extra, debug=True)
            if result.exit_status:
                test.cancel("Failed to attach disk %s to VM."
                            "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            # Remove non-storage disk such as 'cdrom'
            for disk in disks:
                if disk.device != 'disk':
                    disks.remove(disk)
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if 'file' in disk_xml.source.attrs:
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif 'name' in disk_xml.source.attrs:
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif ('dev' in disk_xml.source.attrs and
                          disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search("live disk snapshot not supported with this "
                                 "QEMU binary", out_err):
                        test.cancel(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search("internal snapshot of a running VM" +
                                     " must include the memory state",
                                     out_err):
                            test.cancel("Check Bug #1083345, %s" %
                                        out_err)

                    test.fail("Failed to create snapshot. Error:%s."
                              % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    test.fail("Failed to create snapshot. Error:%s."
                              % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(vm_name,
                                                              snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options, debug=True)
                if snapshot_result.exit_status:
                    test.fail("Failed to create snapshot --current."
                              "Error:%s." %
                              snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                test.fail("Success to create snapshot in negative"
                          " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            test.fail("'%s' run failed with '%s'" %
                      (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search("revert to external \w* ?snapshot not supported yet",
                             revert_result.stderr):
                    test.cancel(revert_result.stderr.strip())
                else:
                    test.fail("Revert snapshot failed. %s" %
                              revert_result.stderr.strip())

            if vm.is_dead():
                test.fail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    test.fail("Revert command successed, but VM is not "
                              "paused after reverting with --paused"
                              "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" % tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                test.fail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name, snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        test.fail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s missing"
                                      % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        test.fail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            test.fail("Snapshot xml file %s still"
                                      % snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2", brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image, source_name=vol_name)
            except exceptions.TestFail as detail:
                libvirtd.restart()
                logging.error(str(detail))
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        raise error.TestNAError("per-image DAC only supported on version 1.2.7"
                                " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name, filearg=disk_xml,
                                     flagstr=option, debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    raise error.TestFail("The image dac label %s is not "
                                         "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name, target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
            params['inactivity_treshold'] = 1800
            params['image_verify_bootable'] = "no"
            params['unattended_delivery_method'] = "cdrom"
            params['drive_index_unattended'] = 1
            params['drive_index_cd1'] = 2
            params['boot_once'] = "d"
            params['medium'] = "cdrom"
            params['wait_no_ack'] = "yes"
            params['image_raw_device'] = "yes"
            params['backup_image_before_testing'] = "no"
            params['kernel_params'] = ("ks=cdrom nicdelay=60 "
                                       "console=ttyS0,115200 console=tty0")
            params['cdroms'] = "unattended cd1"
            params['redirs'] += " unattended_install"
            try:
                unattended_install.run(test, params, env)
            except error.CmdError, detail:
                raise error.TestFail("Guest install failed:%s" % detail)
            finally:
                env.unregister_vm(vm_name)
    finally:
        try:
            if application == "install":
                if virsh.domain_exists(vm_name):
                    virsh.remove_domain(vm_name)
            elif application == "attach":
                virsh.detach_disk(vm_name, disk_target)
        finally:
            pvtest.cleanup_pool(pool_name, pool_type, pool_target,
                                emulated_img)
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocal = params.get("disk_source_protocal", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    + " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocal
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.hostname = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_host
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocal,
                               'source_name': iscsi_target + "/1",
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocal,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        # Attach the iscsi network disk to domain
        logging.debug("Attach disk by XML: %s", open(disk_xml).read())
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstrs=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            vm.start()
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
Beispiel #24
0
def run(test, params, env):
    """
    Integration test of backup and backing_chain.

    Steps:
    1. create a vm with extra disk vdb
    2. create some data on vdb
    3. start a pull mode full backup on vdb
    4. create some data on vdb
    5. start a pull mode incremental backup
    6. repeat step 5 to 7
    7. before the last round of backup job, do a blockcommit/pull/copy
    8. check the full/incremental backup file data
    """
    def run_blk_cmd():
        """
        Run blockcommit/blockpull/blockcopy command.
        """
        def run_blockpull():
            """
            Run blockpull command.
            """
            if from_to == "mid_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, middle_layer1_index)
            elif from_to == "base_to_top":
                cmd_option = ("--base {0}[{1}] --wait").format(
                    original_disk_target, base_layer_index)
            virsh.blockpull(vm_name,
                            original_disk_target,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        def run_blockcommit():
            """
            Run blockcommit command.
            """
            if from_to == "top_to_base":
                # Do blockcommit from top layer to base layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    base_layer_index))

            elif from_to == "mid_to_mid":
                # Do blockcommit from middle layer to another middle layer
                if len(indice) < 4:
                    test.fail(
                        "At lease 4 layers required for the test 'mid_to_mid'")
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              middle_layer2_index))
            elif from_to == "top_to_mid":
                # Do blockcommit from top layer to middle layer
                cmd_option = (
                    "--top {0}[{1}] --base {0}[{2}] --active --pivot "
                    "--wait".format(original_disk_target, top_layer_index,
                                    middle_layer1_index))
            elif from_to == "mid_to_base":
                # Do blockcommit from middle layer to base layer
                cmd_option = ("--top {0}[{1}] --base {0}[{2}] "
                              "--wait".format(original_disk_target,
                                              middle_layer1_index,
                                              base_layer_index))
            virsh.blockcommit(vm_name,
                              original_disk_target,
                              cmd_option,
                              debug=True,
                              ignore_stauts=False)

        def run_blockcopy():
            """
            Run blockcopy command.
            """
            copy_dest = os.path.join(tmp_dir, "copy_dest.qcow2")
            cmd_option = "--wait --verbose --transient-job --pivot"
            if blockcopy_method == "shallow_copy":
                cmd_option += " --shallow"
            if blockcopy_reuse == "reuse_external":
                cmd_option += " --reuse-external"
                if blockcopy_method == "shallow_copy":
                    create_img_cmd = "qemu-img create -f qcow2 -F qcow2 -b %s %s"
                    create_img_cmd %= (backend_img, copy_dest)
                else:
                    create_img_cmd = "qemu-img create -f qcow2 %s %s"
                    create_img_cmd %= (copy_dest, original_disk_size)
                process.run(create_img_cmd, shell=True, ignore_status=False)
            virsh.blockcopy(vm_name,
                            original_disk_target,
                            copy_dest,
                            cmd_option,
                            debug=True,
                            ignore_status=False)

        # Get disk backing store indice info in vm disk xml
        cur_vm_xml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        cur_disk_xmls = cur_vm_xml.get_devices(device_type="disk")
        cur_test_disk_xml = ''
        for disk_xml in cur_disk_xmls:
            if disk_xml.target['dev'] == original_disk_target:
                cur_test_disk_xml = disk_xml
                logging.debug("Current disk xml for %s is:\n %s",
                              original_disk_target, cur_test_disk_xml)
                break
        indice = re.findall(r".*index=['|\"](\d+)['|\"].*",
                            str(cur_test_disk_xml))
        logging.debug("backing store indice for %s is: %s",
                      original_disk_target, indice)
        if len(indice) < 3:
            test.fail("At least 3 layers required for the test.")
        top_layer_index = indice[0]
        middle_layer1_index = indice[1]
        middle_layer2_index = indice[-2]
        base_layer_index = indice[-1]
        logging.debug(
            "Following backing store will be used: %s",
            "top:%s; middle_1: %s, middle_2:%s, base: %s" %
            (top_layer_index, middle_layer1_index, middle_layer2_index,
             base_layer_index))
        # Start the block command
        if blockcommand == "blockpull":
            run_blockpull()
        if blockcommand == "blockcommit":
            run_blockcommit()
        if blockcommand == "blockcopy":
            run_blockcopy()

    def create_shutoff_snapshot(original_img, snapshot_img):
        """
        Create shutoff snapshot, which means the disk snapshot is not controlled
        by libvirt, but created directly by qemu command.

        :param original_img: The image we will take shutoff snapshot for.
        :param snapshot_img: The newly created shutoff snapshot image.
        """
        cmd = "qemu-img info --output=json -f qcow2 {}".format(original_img)
        img_info = process.run(cmd, shell=True,
                               ignore_status=False).stdout_text
        json_data = json.loads(img_info)
        cmd = "qemu-img create -f qcow2 -F qcow2 -b {0} {1}".format(
            original_img, snapshot_img)
        process.run(cmd, shell=True, ignore_status=False)
        try:
            bitmaps = json_data['format-specific']['data']['bitmaps']
            for bitmap in bitmaps:
                bitmap_flags = bitmap['flags']
                bitmap_name = bitmap['name']
                if 'auto' in bitmap_flags and 'in-use' not in bitmap_flags:
                    cmd = "qemu-img bitmap -f qcow2 {0} --add {1}".format(
                        snapshot_img, bitmap_name)
                    process.run(cmd, shell=True, ignore_status=False)
        except Exception as bitmap_error:
            logging.debug("Cannot add bitmap to new image, skip it: %s",
                          bitmap_error)

    # Cancel the test if libvirt version is too low
    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel("Current libvirt version doesn't support "
                    "incremental backup.")

    # vm's original disk config
    original_disk_size = params.get("original_disk_size", "100M")
    original_disk_type = params.get("original_disk_type", "local")
    original_disk_target = params.get("original_disk_target", "vdb")

    # pull mode backup config
    scratch_type = params.get("scratch_type", "file")
    nbd_protocol = params.get("nbd_protocol", "tcp")
    nbd_tcp_port = params.get("nbd_tcp_port", "10809")

    # test config
    backup_rounds = int(params.get("backup_rounds", 4))
    shutoff_snapshot = "yes" == params.get("shutoff_snapshot")
    blockcommand = params.get("blockcommand")
    from_to = params.get("from_to")
    blockcopy_method = params.get("blockcopy_method")
    blockcopy_reuse = params.get("blockcopy_reuse")
    backup_error = "yes" == params.get("backup_error")
    tmp_dir = data_dir.get_tmp_dir()

    try:
        vm_name = params.get("main_vm")
        vm = env.get_vm(vm_name)

        # Make sure there is no checkpoint metadata before test
        utils_backup.clean_checkpoints(vm_name)

        # Backup vm xml
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        vmxml_backup = vmxml.copy()
        disks_not_tested = list(vmxml.get_disk_all().keys())
        logging.debug("Not tested disks are: %s", disks_not_tested)
        utils_backup.enable_inc_backup_for_vm(vm)

        # Destroy vm before test
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Prepare the disk to be backuped.
        disk_params = {}
        disk_path = ""
        if original_disk_type == "local":
            image_name = "%s_image.qcow2" % original_disk_target
            disk_path = os.path.join(tmp_dir, image_name)
            libvirt.create_local_disk("file", disk_path, original_disk_size,
                                      "qcow2")
            disk_params = {
                "device_type": "disk",
                "type_name": "file",
                "driver_type": "qcow2",
                "target_dev": original_disk_target,
                "source_file": disk_path
            }
            if original_disk_target:
                disk_params["target_dev"] = original_disk_target
        else:
            logging.cancel("The disk type '%s' not supported in this script.",
                           original_disk_type)
        disk_xml = libvirt.create_disk_xml(disk_params)
        virsh.attach_device(vm.name, disk_xml, flagstr="--config", debug=True)
        vm.start()
        session = vm.wait_for_login()
        new_disks_in_vm = list(utils_disk.get_linux_disks(session).keys())
        session.close()
        if len(new_disks_in_vm) != 1:
            test.fail("Test disk not prepared in vm")

        # Use the newly added disk as the test disk
        test_disk_in_vm = "/dev/" + new_disks_in_vm[0]

        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        vm_disks = list(vmxml.get_disk_all().keys())

        checkpoint_list = []
        is_incremental = False
        backup_file_list = []
        snapshot_list = []
        cur_disk_xml = disk_xml
        cur_disk_path = disk_path
        cur_disk_params = disk_params
        backend_img = ""
        for backup_index in range(backup_rounds):
            # Do external snapshot
            if shutoff_snapshot:
                virsh.detach_disk(vm.name,
                                  original_disk_target,
                                  extra="--persistent",
                                  ignore_status=False,
                                  debug=True)
                if vm.is_alive():
                    vm.destroy(gracefully=False)
                shutoff_snapshot_name = "shutoff_snap_%s" % str(backup_index)
                shutoff_snapshot_path = os.path.join(tmp_dir,
                                                     shutoff_snapshot_name)

                create_shutoff_snapshot(cur_disk_path, shutoff_snapshot_path)
                cur_disk_params["source_file"] = shutoff_snapshot_path
                cur_disk_xml = libvirt.create_disk_xml(cur_disk_params)
                virsh.attach_device(vm.name,
                                    cur_disk_xml,
                                    flagstr="--config",
                                    ignore_status=False,
                                    debug=True)
                vm.start()
                vm.wait_for_login().close()
                cur_disk_path = shutoff_snapshot_path
            else:
                snapshot_name = "snap_%s" % str(backup_index)
                snapshot_option = ""
                snapshot_file_name = os.path.join(tmp_dir, snapshot_name)
                for disk_name in disks_not_tested:
                    snapshot_option += "--diskspec %s,snapshot=no " % disk_name
                snapshot_option += "--diskspec %s,file=%s" % (
                    original_disk_target, snapshot_file_name)
                virsh.snapshot_create_as(vm_name,
                                         "%s --disk-only %s" %
                                         (snapshot_name, snapshot_option),
                                         debug=True)
                snapshot_list.append(snapshot_name)

            # Prepare backup xml
            backup_params = {"backup_mode": "pull"}
            if backup_index > 0:
                is_incremental = True
                backup_params["backup_incremental"] = "checkpoint_" + str(
                    backup_index - 1)

            # Set libvirt default nbd export name and bitmap name
            nbd_export_name = original_disk_target
            nbd_bitmap_name = "backup-" + original_disk_target

            backup_server_dict = {"name": "localhost", "port": nbd_tcp_port}
            backup_params["backup_server"] = backup_server_dict
            backup_disk_xmls = []
            for vm_disk in vm_disks:
                backup_disk_params = {"disk_name": vm_disk}
                if vm_disk != original_disk_target:
                    backup_disk_params["enable_backup"] = "no"
                else:
                    backup_disk_params["enable_backup"] = "yes"
                    backup_disk_params["disk_type"] = scratch_type

                    # Prepare nbd scratch file/dev params
                    scratch_params = {"attrs": {}}
                    scratch_file_name = "scratch_file_%s" % backup_index
                    scratch_file_path = os.path.join(tmp_dir,
                                                     scratch_file_name)
                    scratch_params["attrs"]["file"] = scratch_file_path
                    logging.debug("scratch_params: %s", scratch_params)
                    backup_disk_params["backup_scratch"] = scratch_params
                backup_disk_xml = utils_backup.create_backup_disk_xml(
                    backup_disk_params)
                backup_disk_xmls.append(backup_disk_xml)
            logging.debug("disk list %s", backup_disk_xmls)
            backup_xml = utils_backup.create_backup_xml(
                backup_params, backup_disk_xmls)
            logging.debug("ROUND_%s Backup Xml: %s", backup_index, backup_xml)

            # Prepare checkpoint xml
            checkpoint_name = "checkpoint_%s" % backup_index
            checkpoint_list.append(checkpoint_name)
            cp_params = {"checkpoint_name": checkpoint_name}
            cp_params["checkpoint_desc"] = params.get(
                "checkpoint_desc", "desc of cp_%s" % backup_index)
            disk_param_list = []
            for vm_disk in vm_disks:
                cp_disk_param = {"name": vm_disk}
                if vm_disk != original_disk_target:
                    cp_disk_param["checkpoint"] = "no"
                else:
                    cp_disk_param["checkpoint"] = "bitmap"
                    cp_disk_bitmap = params.get("cp_disk_bitmap")
                    if cp_disk_bitmap:
                        cp_disk_param["bitmap"] = cp_disk_bitmap + str(
                            backup_index)
                disk_param_list.append(cp_disk_param)
            checkpoint_xml = utils_backup.create_checkpoint_xml(
                cp_params, disk_param_list)
            logging.debug("ROUND_%s Checkpoint Xml: %s", backup_index,
                          checkpoint_xml)

            # Start backup
            backup_options = backup_xml.xml + " " + checkpoint_xml.xml

            # Create some data in vdb
            dd_count = "1"
            dd_seek = str(backup_index * 10 + 10)
            dd_bs = "1M"
            session = vm.wait_for_login()
            utils_disk.dd_data_to_vm_disk(session, test_disk_in_vm, dd_bs,
                                          dd_seek, dd_count)
            session.close()

            backup_result = virsh.backup_begin(vm_name,
                                               backup_options,
                                               debug=True)
            if backup_result.exit_status:
                raise utils_backup.BackupBeginError(
                    backup_result.stderr.strip())

            backup_file_path = os.path.join(
                tmp_dir, "backup_file_%s.qcow2" % str(backup_index))
            backup_file_list.append(backup_file_path)
            nbd_params = {
                "nbd_protocol": nbd_protocol,
                "nbd_hostname": "localhost",
                "nbd_export": nbd_export_name,
                "nbd_tcp_port": nbd_tcp_port
            }
            if not is_incremental:
                # Do full backup
                utils_backup.pull_full_backup_to_file(nbd_params,
                                                      backup_file_path)
                logging.debug("Full backup to: %s", backup_file_path)
            else:
                # Do incremental backup
                utils_backup.pull_incremental_backup_to_file(
                    nbd_params, backup_file_path, nbd_bitmap_name,
                    original_disk_size)
            virsh.domjobabort(vm_name, debug=True)
            # Start to run the blockcommit/blockpull cmd before the last round
            # of backup job, this is to test if the block command will keep the
            # dirty bitmap data.
            if backup_index == backup_rounds - 2:
                run_blk_cmd()
                cur_disk_path = vm.get_blk_devices(
                )[original_disk_target]['source']

            if backup_index == backup_rounds - 3:
                backend_img = vm.get_blk_devices(
                )[original_disk_target]['source']

        # Get current active image for the test disk
        vm_disks = vm.get_blk_devices()
        current_active_image = vm_disks[original_disk_target]['source']
        logging.debug("The current active image for '%s' is '%s'",
                      original_disk_target, current_active_image)

        for checkpoint_name in checkpoint_list:
            virsh.checkpoint_delete(vm_name,
                                    checkpoint_name,
                                    debug=True,
                                    ignore_status=False)
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Compare the backup data and original data

        original_data_file = os.path.join(tmp_dir, "original_data.qcow2")
        cmd = "qemu-img convert -f qcow2 %s -O qcow2 %s" % (
            current_active_image, original_data_file)
        process.run(cmd, shell=True, verbose=True)
        for backup_file in backup_file_list:
            if not utils_backup.cmp_backup_data(original_data_file,
                                                backup_file):
                test.fail("Backup and original data are not identical for"
                          "'%s' and '%s'" %
                          (current_active_image, backup_file))
            else:
                logging.debug("'%s' contains correct backup data", backup_file)
    except utils_backup.BackupBeginError as details:
        if backup_error:
            logging.debug("Backup failed as expected.")
        else:
            test.fail(details)
    finally:
        # Remove checkpoints' metadata again to make sure vm has no checkpoints
        if "checkpoint_list" in locals():
            for checkpoint_name in checkpoint_list:
                virsh.checkpoint_delete(vm_name,
                                        checkpoint_name,
                                        options="--metadata")
        # Remove snapshots
        if "snapshot_list" in locals():
            for snapshot_name in snapshot_list:
                virsh.snapshot_delete(vm_name,
                                      "%s --metadata" % snapshot_name,
                                      debug=True)

        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Restoring vm
        vmxml_backup.sync()

        for file_name in os.listdir(tmp_dir):
            file_path = os.path.join(tmp_dir, file_name)
            if 'env' not in file_path:
                if os.path.isfile(file_path):
                    os.remove(file_path)
                elif os.path.isdir(file_path):
                    shutil.rmtree(file_path)
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(disk_src_host.split(),
                                              disk_src_port.split()):
                source_host.append({'name': host_name, 'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = (
                    "--auth-type %s --auth-username %s --secret-usage '%s'" %
                    (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(
                    pool_name, mon_host, disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name,
                               vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name,
                                 "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(), "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")

        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)".format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True,
                                shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt,
                                 os.path.join(disk_src_pool, vol_name),
                                 snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt,
                         os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {
            "s1":
            "s1 --disk-only --no-metadata",
            "s2":
            "s2 --memspec %s --no-metadata" % snapshot2_file,
            "s3":
            "s3 --memspec %s --no-metadata --live" % snapshot3_file,
            "s4":
            "s4 --memspec %s --diskspec vda,file=%s --no-metadata" %
            (snapshot4_file, snapshot4_disk_file),
            "s5":
            "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" %
            (snapshot5_file, snapshot5_disk_file)
        }
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name,
                                           snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(
                    first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(
            secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume",
                                           "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get(
        "test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol",
                                                    "no")
    disk_snapshot_with_sanlock = "yes" == params.get(
        "disk_internal_with_sanlock", "no")

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name,
                                        guest_name,
                                        True,
                                        timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(), "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        unsupported_err.append('Could not create file: Permission denied')
        unsupported_err.append('Permission denied')
    if test_disk_internal_snapshot:
        unsupported_err.append(
            'unsupported configuration: internal snapshot for disk ' +
            'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()
        san_lock_config = LibvirtSanLockConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict[
                "sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict[
                "chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict[
                "restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict[
                "truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict[
                "sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict[
                "chown sanlock:sanlock " +
                "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict[
                "sanlock client add_lockspace -s TEST_LS:1:" +
                "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {
                'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'
            }
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" % (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" % (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" % (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port
        })
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt, disk_src_name,
                                          disk_format, blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {
                "name": vol_name,
                "capacity": int(vol_cap),
                "capacity_unit": vol_cap_unit,
                "format": disk_format
            }

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s || qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}' %
                            (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' %
                                (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                ret = virsh.attach_device(guest_name, xml_file, "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path, targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name,
                                      xml_file,
                                      flagstr=opts,
                                      debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(
                    vm, targetdev, old_parts, read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm" " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.skip(details)
        else:
            test.fail("VM failed to start." "Error: %s" % str(details))
    finally:
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt,
                             os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(
                       mon_host, key_opt,
                       os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
Beispiel #26
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            test.cancel("%s value '%s' is not a number." %
                        (i, params.get(i)))
    # False positive - img_val was filled in the for loop above.
    # pylint: disable=E0632
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        if backup_sestatus == "disabled":
            test.cancel("SELinux is in Disabled mode."
                        "It must be Enabled to"
                        "run this test")
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs,
                             shell=True)
        if result.exit_status:
            test.cancel("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = test.tmpdir
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            test.cancel("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            test.fail("Failed to attach disk %s to VM."
                      "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                test.fail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                test.fail("Test failed in positive case."
                          "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    test.fail("Failed to create snapshot. Error:%s."
                              % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except process.CmdError:
            test.fail("Detach disk 'vdf' from VM %s failed."
                      % vm.name)
Beispiel #27
0
 def vm_stress_events(self, event, vm):
     """
     Stress events
     :param event: event name
     :param vm: vm object
     """
     dargs = {'ignore_status': True, 'debug': True}
     for itr in range(self.iterations):
         if "vcpupin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.vcpupin(vm.name, vcpu,
                                        random.choice(self.host_cpu_list),
                                        **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "emulatorpin" in event:
             for vcpu in range(int(self.current_vcpu)):
                 result = virsh.emulatorpin(vm.name,
                                            random.choice(
                                                self.host_cpu_list),
                                            **dargs)
                 if not self.ignore_status:
                     libvirt.check_exit_status(result)
         elif "suspend" in event:
             result = virsh.suspend(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
             time.sleep(self.event_sleep_time)
             result = virsh.resume(vm.name, **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
         elif "cpuhotplug" in event:
             result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.max_vcpu,
                             'guest_live': self.max_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
             time.sleep(self.event_sleep_time)
             result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                     **dargs)
             if not self.ignore_status:
                 libvirt.check_exit_status(result)
                 exp_vcpu = {'max_config': self.max_vcpu,
                             'max_live': self.max_vcpu,
                             'cur_config': self.current_vcpu,
                             'cur_live': self.current_vcpu,
                             'guest_live': self.current_vcpu}
                 utils_hotplug.check_vcpu_value(
                     vm, exp_vcpu, option="--live")
         elif "reboot" in event:
             vm.reboot()
         elif "nethotplug" in event:
             for iface_num in range(int(self.iface_num)):
                 logging.debug("Try to attach interface %d" % iface_num)
                 mac = utils_net.generate_mac_address_simple()
                 options = ("%s %s --model %s --mac %s %s" %
                            (self.iface_type, self.iface_source['network'],
                             self.iface_model, mac, self.attach_option))
                 logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                 ret = virsh.attach_interface(vm.name, options,
                                              ignore_status=True)
                 time.sleep(self.event_sleep_time)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret)
                 if self.detach_option:
                     options = ("--type %s --mac %s %s" %
                                (self.iface_type, mac, self.detach_option))
                     logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                     ret = virsh.detach_interface(vm.name, options,
                                                  ignore_status=True)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
         elif "diskhotplug" in event:
             for disk_num in range(len(self.device_source_names)):
                 disk = {}
                 disk_attach_error = False
                 disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                 device_source = libvirt.create_local_disk(
                     self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                 disk.update({"format": self.disk_format,
                              "source": device_source})
                 disk_xml = Disk(self.disk_type)
                 disk_xml.device = self.disk_device
                 disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                 ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                 if not self.ignore_status:
                     libvirt.check_exit_status(ret, disk_attach_error)
                 if self.detach_option:
                     ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                     if not self.ignore_status:
                         libvirt.check_exit_status(ret)
                     libvirt.delete_local_disk(self.disk_type, disk_name)
         else:
             raise NotImplementedError
Beispiel #28
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
Beispiel #29
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # refresh the pool
            cmd_result = virsh.pool_refresh(disk_src_pool)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name, vol_path = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                                str(cmd_result.stdout))[1]
                # Snapshot doesn't support raw disk format, create a qcow2 volume
                # disk for snapshot operation.
                process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                            shell=True)
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots list for %s", vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots info for %s", vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                raise error.TestError("Snapshot %s not found" % snapshot_name2)

        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
Beispiel #30
0
def run(test, params, env):
    """
    Test per-image DAC disk hotplug to VM.

    (1).Init variables for test.
    (2).Creat disk xml with per-image DAC
    (3).Start VM
    (4).Attach the disk to VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")

    # Get per-image DAC setting
    vol_name = params.get('vol_name')
    target_dev = params.get('target_dev')
    disk_type_name = params.get("disk_type_name")
    img_user = params.get("img_user")
    img_group = params.get("img_group")
    relabel = 'yes' == params.get('relabel', 'yes')

    if not libvirt_version.version_compare(1, 2, 7):
        test.cancel("per-image DAC only supported on version 1.2.7"
                    " and after.")

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)

    img_path = None
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_group
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        first_disk = vm.get_first_disk_devices()
        blk_source = first_disk['source']
        owner_str = format_user_group_str(qemu_user, qemu_group)
        src_usr, src_grp = owner_str.split(':')
        os.chown(blk_source, int(src_usr), int(src_grp))
        vm.start()

        # Init a QemuImg instance and create a img.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        image = qemu_storage.QemuImg(params, tmp_dir, vol_name)
        # Create a image.
        img_path, result = image.create(params)

        # Create disk xml for attach.
        params['source_file'] = img_path
        sec_label = "%s:%s" % (img_user, img_group)
        params['sec_label'] = sec_label
        params['type_name'] = disk_type_name
        sec_label_id = format_user_group_str(img_user, img_group)

        disk_xml = utlv.create_disk_xml(params)

        # Change img file to qemu:qemu and 660 mode
        os.chown(img_path, 107, 107)
        os.chmod(img_path, 432)

        img_label_before = check_ownership(img_path)
        if img_label_before:
            logging.debug("the image ownership before "
                          "attach: %s" % img_label_before)

        # Do the attach action.
        option = "--persistent"
        result = virsh.attach_device(vm_name,
                                     filearg=disk_xml,
                                     flagstr=option,
                                     debug=True)
        utlv.check_exit_status(result, status_error)

        if not result.exit_status:
            img_label_after = check_ownership(img_path)
            if dynamic_ownership and relabel:
                if img_label_after != sec_label_id:
                    test.fail("The image dac label %s is not "
                              "expected." % img_label_after)

            ret = virsh.detach_disk(vm_name,
                                    target=target_dev,
                                    extra=option,
                                    debug=True)
            utlv.check_exit_status(ret, status_error)
    finally:
        # clean up
        vm.destroy()
        qemu_conf.restore()
        vmxml.sync()
        libvirtd.restart()
        if img_path and os.path.exists(img_path):
            os.unlink(img_path)
Beispiel #31
0
    def vm_stress_events(self, event, vm):
        """
        Stress events

        :param event: event name
        :param vm: vm object
        """
        dargs = {'ignore_status': True, 'debug': True}
        for itr in range(self.iterations):
            if "vcpupin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.vcpupin(vm.name, vcpu,
                                           random.choice(self.host_cpu_list),
                                           **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "emulatorpin" in event:
                for vcpu in range(int(self.current_vcpu)):
                    result = virsh.emulatorpin(vm.name,
                                               random.choice(
                                                   self.host_cpu_list),
                                               **dargs)
                    if not self.ignore_status:
                        libvirt.check_exit_status(result)
            elif "suspend" in event:
                result = virsh.suspend(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                time.sleep(self.event_sleep_time)
                result = virsh.resume(vm.name, **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
            elif "cpuhotplug" in event:
                result = virsh.setvcpus(vm.name, self.max_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.max_vcpu,
                                'guest_live': self.max_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
                time.sleep(self.event_sleep_time)
                result = virsh.setvcpus(vm.name, self.current_vcpu, "--live",
                                        **dargs)
                if not self.ignore_status:
                    libvirt.check_exit_status(result)
                    exp_vcpu = {'max_config': self.max_vcpu,
                                'max_live': self.max_vcpu,
                                'cur_config': self.current_vcpu,
                                'cur_live': self.current_vcpu,
                                'guest_live': self.current_vcpu}
                    utils_hotplug.check_vcpu_value(
                        vm, exp_vcpu, option="--live")
            elif "reboot" in event:
                vm.reboot()
            elif "nethotplug" in event:
                for iface_num in range(int(self.iface_num)):
                    logging.debug("Try to attach interface %d" % iface_num)
                    mac = utils_net.generate_mac_address_simple()
                    options = ("%s %s --model %s --mac %s %s" %
                               (self.iface_type, self.iface_source['network'],
                                self.iface_model, mac, self.attach_option))
                    logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options)
                    ret = virsh.attach_interface(vm.name, options,
                                                 ignore_status=True)
                    time.sleep(self.event_sleep_time)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret)
                    if self.detach_option:
                        options = ("--type %s --mac %s %s" %
                                   (self.iface_type, mac, self.detach_option))
                        logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options)
                        ret = virsh.detach_interface(vm.name, options,
                                                     ignore_status=True)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
            elif "diskhotplug" in event:
                for disk_num in range(len(self.device_source_names)):
                    disk = {}
                    disk_attach_error = False
                    disk_name = os.path.join(self.path, vm.name, self.device_source_names[disk_num])
                    device_source = libvirt.create_local_disk(
                        self.disk_type, disk_name, self.disk_size, disk_format=self.disk_format)
                    disk.update({"format": self.disk_format,
                                 "source": device_source})
                    disk_xml = Disk(self.disk_type)
                    disk_xml.device = self.disk_device
                    disk_xml.driver = {"name": self.disk_driver, "type": self.disk_format}
                    ret = virsh.attach_disk(vm.name, disk["source"], self.device_target[disk_num], self.attach_option, debug=True)
                    if not self.ignore_status:
                        libvirt.check_exit_status(ret, disk_attach_error)
                    if self.detach_option:
                        ret = virsh.detach_disk(vm.name, self.device_target[disk_num], extra=self.detach_option)
                        if not self.ignore_status:
                            libvirt.check_exit_status(ret)
                        libvirt.delete_local_disk(self.disk_type, disk_name)
            else:
                raise NotImplementedError
            time.sleep(self.itr_sleep_time)
Beispiel #32
0
def run(test, params, env):
    """
    Test guestinfo command, make sure that all supported options work well
    """
    def hotplug_disk(disk_name):
        """
        hotplug a disk to guest

        :param disk_name: the name of the disk be hotplugged
        """
        device_source = os.path.join(data_dir.get_tmp_dir(), disk_name)
        libvirt.create_local_disk("file", device_source, size='1')
        try:
            res = virsh.attach_disk(vm_name,
                                    device_source,
                                    disk_target_name,
                                    debug=True)
            utils_misc.wait_for(
                lambda: (res.stdout == "Disk attached successfully"), 10)
        except Exception:
            test.error("Can not attach %s to the guest" % disk_target_name)

    def check_attached_disk_info(disk_info, target_name):
        """
        Check the info of the attached disk

        :param disk_info: the disk info returned from virsh guestinfo --disk
        :param target_name: the target name for the attached disk
        :return: the attached disk info returned from virsh guestinfo --disk
        """
        attached_disk_info_reported = False
        disk_logical_name = '/dev/%s' % target_name
        for i in range(int(disk_info['disk.count'])):
            prefix = 'disk.' + str(i) + '.'
            try:
                if disk_info[prefix + 'alias'] == target_name:
                    if (disk_info[prefix + 'name'] == disk_logical_name
                            and disk_info[prefix + 'partition'] == 'no'):
                        attached_disk_info_reported = True
                        break
            except KeyError:
                logging.error("Num %i is not the attached disk", i)
        return attached_disk_info_reported

    def check_guest_os_info():
        """
        Check the info of guest os from guest side

        :return: the guest os info from guest side
        """
        os_info = {}
        session = vm.wait_for_login()
        try:
            output = session.cmd_output(
                'cat /etc/os-release').strip().splitlines()
            os_info_dict = dict(item.split("=") for item in output if item)
            os_info["os.id"] = os_info_dict["ID"].strip('"')
            os_info["os.name"] = os_info_dict["NAME"].strip('"')
            os_info["os.pretty-name"] = os_info_dict["PRETTY_NAME"].strip('"')
            os_info["os.version"] = os_info_dict["VERSION"].strip('"')
            os_info["os.version-id"] = os_info_dict["VERSION_ID"].strip('"')
            os_info["os.machine"] = session.cmd_output('uname -m').strip()
            os_info["os.kernel-release"] = session.cmd_output(
                'uname -r').strip()
            os_info["os.kernel-version"] = session.cmd_output(
                'uname -v').strip()
        finally:
            session.close()
        return os_info

    def parse_timezone_info():
        """
        Parse the info returned from timedatectl cmd

        :return: the guest timezone name and offset to UTC time
        """
        session = vm.wait_for_login()
        try:
            output = session.cmd_output('timedatectl').strip().splitlines()
            out_dict = dict(item.split(": ") for item in output if item)
            tz_dict = dict((x.strip(), y.strip()) for x, y in out_dict.items())
            tz_info = re.search(r"\((.+)\)", tz_dict["Time zone"]).group(1)
            name, offset = tz_info.split(', ')
        finally:
            session.close()
        return name, offset

    def check_guest_timezone_info():
        """
        Check the info of guest timezone from guest side

        :return: the guest timezone info from guest side
        """
        timezone_info = {}
        timezone_name, hour_offset = parse_timezone_info()
        timezone_info["timezone.name"] = timezone_name
        sign = 1 if int(hour_offset) > 0 else -1
        second_offset = int(hour_offset[-4:-2]) * 3600 + int(
            hour_offset[-2:] * 60)
        timezone_info["timezone.offset"] = str(sign * second_offset)
        return timezone_info

    def check_guest_hostname_info():
        """
        Check the info of guest hostname from guest side

        :return: the guest hostname info from guest side
        """
        hostname_info = {}
        session = vm.wait_for_login()
        try:
            output = session.cmd_output('hostnamectl --static').strip()
            if not output:
                output = session.cmd_output('hostnamectl --transient').strip()
        finally:
            session.close()
        hostname_info['hostname'] = output
        return hostname_info

    def add_user(name, passwd):
        """
        Added a user account

        :param name: user name
        :param passwd: password of user account
        """
        session = vm.wait_for_login()
        try:
            session.cmd_output('useradd %s' % name)
            logging.debug('now system users are %s',
                          session.cmd_output('users'))
        finally:
            session.close()
        virsh.set_user_password(vm_name, name, passwd, debug=True)

    def convert_to_timestamp(t_str):
        dt = dateutil.parser.parse(t_str)
        timestamp = datetime.datetime.timestamp(dt)
        return timestamp

    def check_guest_user_info():
        """
        check the info of guest user from guest side

        :return: the guest user info from guest side
        """
        user_info = {}
        session = vm.wait_for_login()
        try:
            output = session.cmd_output(
                'last --time-format iso').strip().splitlines()
            users_login = [
                item for item in output if re.search(r'still logged in', item)
            ]
            users_login_list = [
                re.split(r"\s{2,}", item) for item in users_login
            ]
            users_login_info = [[item[0],
                                 convert_to_timestamp(item[-2])]
                                for item in users_login_list]
            sorted_user_info = sorted(users_login_info,
                                      key=lambda item: item[1])
            count = -1
            users_list = []
            for user, login_time in sorted_user_info:
                if user not in users_list:
                    users_list.append(user)
                    count += 1
                    user_key = "user." + str(count) + ".name"
                    login_time_key = "user." + str(count) + ".login-time"
                    user_info[user_key] = user
                    user_info[login_time_key] = login_time
            user_info["user.count"] = str(len(users_list))
        finally:
            session.close()
        return user_info

    def check_disk_size(ses, disk):
        """
        check the disk size from guest side

        :return: total size and used size of the disk
        """
        disk_size = ses.cmd_output('df %s' % disk).strip().splitlines()[-1]
        total_size = disk_size.split()[1]
        used_size = disk_size.split()[2]
        return total_size, used_size

    def check_guest_filesystem_info():
        """
        check the info of filesystem from guest side

        :return: the filesystem info from guest side
        """
        fs_info = {}
        count = -1
        session = vm.wait_for_login()
        try:
            lsblk_cmd = 'lsblk -Jp -o KNAME,FSTYPE,TYPE,MOUNTPOINT,PKNAME,SERIAL'
            output = json.loads(session.cmd_output(lsblk_cmd).strip())

            fs_unsorted = [
                item for item in dict(output)['blockdevices']
                if item['mountpoint'] not in [None, '[SWAP]']
            ]
            fs = sorted(fs_unsorted, key=lambda item: item['kname'])

            fs_info['fs.count'] = str(len(fs))
            for item in fs:
                total_size, used_size = check_disk_size(session, item['kname'])
                count += 1
                key_prefix = 'fs.' + str(count) + '.'
                fs_info[key_prefix + 'name'] = os.path.basename(item['kname'])
                fs_info[key_prefix + 'mountpoint'] = item['mountpoint']
                fs_info[key_prefix + 'fstype'] = item['fstype']
                fs_info[key_prefix + 'total-bytes'] = str(
                    int(total_size) * 1024)
                fs_info[key_prefix + 'used-bytes'] = str(int(used_size) * 1024)
                disks_count = item['pkname'].count('/dev')
                fs_info[key_prefix + 'disk.count'] = str(disks_count)
                for i in range(disks_count):
                    fs_info[key_prefix + "disk." + str(i) +
                            ".alias"] = re.search(
                                r"(\D+)",
                                os.path.basename(item['pkname'])).group(0)
                    if item['serial']:
                        fs_info[key_prefix + "disk." + str(i) +
                                ".serial"] = item['serial']
                if item['type'] == "lvm":
                    fs_info[key_prefix + "disk." + str(i) +
                            ".device"] = item['pkname']
                else:
                    fs_info[key_prefix + "disk." + str(i) +
                            ".device"] = item['kname']
        finally:
            session.close()
        return fs_info

    vm_name = params.get("main_vm")
    option = params.get("option", " ")
    added_user_name = params.get("added_user_name")
    added_user_passwd = params.get("added_user_passwd")
    status_error = ("yes" == params.get("status_error", "no"))
    start_ga = ("yes" == params.get("start_ga", "yes"))
    prepare_channel = ("yes" == params.get("prepare_channel", "yes"))
    disk_target_name = params.get("disk_target_name")
    disk_name = params.get("disk_name")
    readonly_mode = ("yes" == params.get("readonly_mode"))

    if not libvirt_version.version_compare(6, 0, 0):
        test.cancel(
            "Guestinfo command is not supported before version libvirt-6.0.0 ")
    import dateutil.parser

    added_user_session = None
    root_session = None

    try:
        vm = env.get_vm(vm_name)
        virsh_dargs = {}
        if readonly_mode:
            virsh_dargs["readonly"] = True

        if start_ga and prepare_channel:
            vm.prepare_guest_agent(start=True, channel=True)

        if "user" in option:
            add_user(added_user_name, added_user_passwd)
            added_user_session = vm.wait_for_login(username=added_user_name,
                                                   password=added_user_passwd)
            root_session = vm.wait_for_login()

        if "disk" in option:
            hotplug_disk(disk_name)

        result = virsh.guestinfo(vm_name,
                                 option,
                                 **virsh_dargs,
                                 ignore_status=True,
                                 debug=True)
        error_msg = []
        if not prepare_channel:
            error_msg.append("QEMU guest agent is not configured")
        if readonly_mode:
            error_msg.append("read only access prevents virDomainGetGuestInfo")
        libvirt.check_result(result,
                             expected_fails=error_msg,
                             any_error=status_error)

        if status_error:
            return

        out = result.stdout.strip().splitlines()
        out_dict = dict(item.split(": ") for item in out)
        info_from_agent_cmd = dict(
            (x.strip(), y.strip()) for x, y in out_dict.items())
        logging.debug("info from the guest is %s", info_from_agent_cmd)

        if "disk" in option:
            if not check_attached_disk_info(info_from_agent_cmd,
                                            disk_target_name):
                test.fail(
                    "The disk info reported by agent cmd is not correct. "
                    "result: %s" % info_from_agent_cmd)
            return
        else:
            func_name = "check_guest_%s_info" % option[2:]
            info_from_guest = locals()[func_name]()
            logging.debug('%s_info_from_guest is %s', option[2:],
                          info_from_guest)

        if ("user" not in option) and ("filesystem" not in option):
            if info_from_guest != info_from_agent_cmd:
                test.fail(
                    "The %s info get from guestinfo cmd is not correct." %
                    option[2:])
        else:
            for key, value in info_from_guest.items():
                if "used-bytes" in key:
                    # The guest block size may change by about 16000Kib after
                    # getting info via guestinfo cmd. We just need make sure
                    # the size difference will not exceed then 17000KiB.
                    if abs(int(value) -
                           int(info_from_agent_cmd[key])) > 17408000:
                        test.fail("The block size returned from guest agent "
                                  "is not correct.")
                elif "login-time" in key:
                    # login time returned from guestinfo cmd is with milliseconds,
                    # so it may cause at most 1 second deviation
                    if abs(
                            float(value) -
                            int(info_from_agent_cmd[key]) / 1000) > 1.0:
                        test.fail(
                            "The login time of active users get from guestinfo "
                            "is not correct.")
                else:
                    if value != info_from_agent_cmd[key]:
                        test.fail("The %s info get from guestinfo cmd"
                                  "is not correct." % option[2:])
    finally:
        if "user" in option:
            if added_user_session:
                added_user_session.close()
            if root_session:
                root_session.cmd('userdel -f %s' % added_user_name)
                root_session.close()
        if "disk" in option:
            virsh.detach_disk(vm_name,
                              disk_target_name,
                              ignore_status=False,
                              debug=True)
        vm.destroy()
Beispiel #33
0
def run(test, params, env):
    """
    Test virsh migrate when disks are virtio-scsi.
    """
    def check_vm_state(vm, state):
        """
        Return True if vm is in the correct state.
        """
        try:
            actual_state = vm.state()
        except process.CmdError:
            return False
        if actual_state == state:
            return True
        else:
            return False

    def check_disks_in_vm(vm, vm_ip, disks_list=[], runner=None):
        """
        Check disks attached to vm.
        """
        fail_list = []
        while len(disks_list):
            disk = disks_list.pop()
            if runner:
                check_cmd = ("ssh %s \"dd if=/dev/urandom of=%s bs=1 "
                             "count=1024\"" % (vm_ip, disk))
                try:
                    logging.debug(runner.run(check_cmd))
                    continue
                except process.CmdError as detail:
                    logging.debug("Remote checking failed:%s", detail)
                    fail_list.append(disk)
            else:
                check_cmd = "dd if=/dev/urandom of=%s bs=1 count=1024"
                session = vm.wait_for_login()
                cs = session.cmd_status(check_cmd)
                if cs:
                    fail_list.append(disk)
                session.close()
        if len(fail_list):
            test.fail("Checking attached devices failed:%s" % fail_list)

    def get_disk_id(device):
        """
        Show disk by id.
        """
        output = process.run("ls /dev/disk/by-id/", shell=True).stdout_text
        for line in output.splitlines():
            disk_ids = line.split()
            for disk_id in disk_ids:
                disk = os.path.basename(
                    process.run("readlink %s" % disk_id,
                                shell=True).stdout_text)
                if disk == os.path.basename(device):
                    return disk_id
        return None

    def cleanup_ssh_config(vm):
        session = vm.wait_for_login()
        session.cmd("rm -f ~/.ssh/authorized_keys")
        session.cmd("rm -f ~/.ssh/id_rsa*")
        session.close()

    vm = env.get_vm(params.get("migrate_main_vm"))
    source_type = params.get("disk_source_type", "file")
    device_type = params.get("disk_device_type", "disk")
    disk_format = params.get("disk_format_type", "raw")
    if source_type == "file":
        params['added_disk_type'] = "file"
    else:
        params['added_disk_type'] = "block"
        block_device = params.get("disk_block_device", "/dev/EXAMPLE")
        if block_device.count("EXAMPLE"):
            # Prepare host parameters
            local_host = params.get("migrate_source_host", "LOCAL.EXAMPLE")
            remote_host = params.get("migrate_dest_host", "REMOTE.EXAMPLE")
            remote_user = params.get("migrate_dest_user", "root")
            remote_passwd = params.get("migrate_dest_pwd")
            if remote_host.count("EXAMPLE") or local_host.count("EXAMPLE"):
                test.cancel("Config remote or local host first.")
            rdm_params = {
                'remote_ip': remote_host,
                'remote_user': remote_user,
                'remote_pwd': remote_passwd
            }
            rdm = utils_test.RemoteDiskManager(rdm_params)
            # Try to build an iscsi device
            # For local, target is a device name
            target = utlv.setup_or_cleanup_iscsi(
                is_setup=True, is_login=True, emulated_image="emulated-iscsi")
            logging.debug("Created target: %s", target)
            try:
                # Attach this iscsi device both local and remote
                remote_device = rdm.iscsi_login_setup(local_host, target)
            except Exception as detail:
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Attach iscsi device on remote failed:%s" % detail)

            # Use id to get same path on local and remote
            block_device = get_disk_id(target)
            if block_device is None:
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False)
                test.error("Set iscsi device couldn't find id?")

    srcuri = params.get("virsh_migrate_srcuri")
    dsturi = params.get("virsh_migrate_dsturi")
    remote_ip = params.get("remote_ip")
    username = params.get("remote_user", "root")
    host_pwd = params.get("remote_pwd")
    # Connection to remote, init here for cleanup
    runner = None
    # Identify easy config. mistakes early
    warning_text = ("Migration VM %s URI %s appears problematic "
                    "this may lead to migration problems. "
                    "Consider specifying vm.connect_uri using "
                    "fully-qualified network-based style.")

    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        test.cancel(warning_text % ('source', srcuri))

    if dsturi.count('///') or dsturi.count('EXAMPLE'):
        test.cancel(warning_text % ('destination', dsturi))

    # Config auto-login to remote host for migration
    ssh_key.setup_ssh_key(remote_ip, username, host_pwd)

    sys_image = vm.get_first_disk_devices()
    sys_image_source = sys_image["source"]
    sys_image_info = utils_misc.get_image_info(sys_image_source)
    logging.debug("System image information:\n%s", sys_image_info)
    sys_image_fmt = sys_image_info["format"]
    created_img_path = os.path.join(os.path.dirname(sys_image_source),
                                    "vsmimages")

    migrate_in_advance = "yes" == params.get("migrate_in_advance", "no")

    status_error = "yes" == params.get("status_error", "no")
    if source_type == "file" and device_type == "lun":
        status_error = True

    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = migration.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name,
                                         device,
                                         "--config",
                                         debug=True)
            if not s_detach:
                test.error("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name,
                          sys_image_source,
                          "vda",
                          attach_args,
                          debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError as detail:
                if status_error:
                    logging.debug("Expected failure:%s", detail)
                    return None, None
                else:
                    raise
            vm.wait_for_login()

            # Confirm VM can be accessed through network.
            # And this ip will be used on remote after migration
            vm_ip = vm.get_address()
            vm_pwd = params.get("password")
            s_ping, o_ping = utils_test.ping(vm_ip, count=2, timeout=60)
            logging.info(o_ping)
            if s_ping != 0:
                test.fail("%s did not respond after several "
                          "seconds with attaching new devices." % vm.name)
            return vm_ip, vm_pwd

        options = "--live --unsafe"
        # Do migration before attaching new devices
        if migrate_in_advance:
            vm_ip, vm_pwd = start_check_vm(vm)
            cleanup_ssh_config(vm)
            mig_thread = threading.Thread(target=mig.thread_func_migration,
                                          args=(vm, dsturi, options))
            mig_thread.start()
            # Make sure migration is running
            time.sleep(2)

        # Attach other disks
        params['added_disk_target'] = "scsi"
        params['target_bus'] = "scsi"
        params['device_type'] = device_type
        params['type_name'] = source_type
        params['added_disk_format'] = disk_format
        if migrate_in_advance:
            params["attach_disk_config"] = "no"
            attach_disk_config = False
        else:
            params["attach_disk_config"] = "yes"
            attach_disk_config = True
        try:
            if source_type == "file":
                utlv.attach_disks(vm, "%s/image" % created_img_path, None,
                                  params)
            else:
                ret = utlv.attach_additional_device(vm.name,
                                                    "sda",
                                                    block_device,
                                                    params,
                                                    config=attach_disk_config)
                if ret.exit_status:
                    test.fail(ret)
        except (exceptions.TestFail, process.CmdError) as detail:
            if status_error:
                logging.debug("Expected failure:%s", detail)
                return
            else:
                raise

        if migrate_in_advance:
            mig_thread.join(60)
            if mig_thread.isAlive():
                mig.RET_LOCK.acquire()
                mig.MIGRATION = False
                mig.RET_LOCK.release()
        else:
            vm_ip, vm_pwd = start_check_vm(vm)

        # Have got expected failures when starting vm, end the test
        if vm_ip is None and status_error:
            return

        # Start checking before migration and go on checking after migration
        disks = []
        for target in list(vm.get_disk_devices().keys()):
            if target != "vda":
                disks.append("/dev/%s" % target)

        checked_count = int(params.get("checked_count", 0))
        disks_before = disks[:(checked_count // 2)]
        disks_after = disks[(checked_count // 2):checked_count]
        logging.debug(
            "Disks to be checked:\nBefore migration:%s\n"
            "After migration:%s", disks_before, disks_after)

        options = "--live --unsafe"
        if not migrate_in_advance:
            cleanup_ssh_config(vm)
            mig.do_migration(vms, None, dsturi, "orderly", options, 120)

        if mig.RET_MIGRATION:
            utils_test.check_dest_vm_network(vm, vm_ip, remote_ip, username,
                                             host_pwd)
            runner = remote.RemoteRunner(host=remote_ip,
                                         username=username,
                                         password=host_pwd)
            # After migration, config autologin to vm
            ssh_key.setup_remote_ssh_key(vm_ip, "root", vm_pwd)
            check_disks_in_vm(vm, vm_ip, disks_after, runner)

            if migrate_in_advance:
                test.fail("Migration before attaching successfully, "
                          "but not expected.")

    finally:
        # Cleanup remote vm
        if srcuri != dsturi:
            mig.cleanup_dest_vm(vm, srcuri, dsturi)
        # Cleanup created vm anyway
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.undefine(new_vm_name)

        # Cleanup iscsi device for block if it is necessary
        if source_type == "block":
            if params.get("disk_block_device",
                          "/dev/EXAMPLE").count("EXAMPLE"):
                rdm.iscsi_login_setup(local_host, target, is_login=False)
                utlv.setup_or_cleanup_iscsi(is_setup=False,
                                            emulated_image="emulated-iscsi")

        if runner:
            runner.session.close()
        process.run("rm -f %s/*vsmtest" % created_img_path, shell=True)
Beispiel #34
0
def run(test, params, env):
    """
    Test the PCIe controllers' options
    1. Backup guest xml before the tests
    2. Modify guest xml and define the guest
    3. Start guest
    4. Hotplug if needed
    5. Do checking
    6. Destroy guest and restore guest
    """

    def get_disk_bus(disk_dev=None):
        """
        Get the bus list of guest disks

        :param disk_dev: The specified disk device
        :return: list for disks' buses
        """
        disk_bus_list = []

        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        disk_dev_list = cur_vm_xml.get_disk_blk(vm_name)
        if disk_dev not in disk_dev_list:
            return disk_bus_list
        for disk_index in range(0, len(disk_dev_list)):
            disk_target = disk_dev if disk_dev else disk_dev_list[disk_index]
            disk_bus = cur_vm_xml.get_disk_attr(vm_name, disk_target, 'address', 'bus')
            disk_bus_list.append(disk_bus)
            if disk_dev:
                break
        return disk_bus_list

    def check_guest_disks(ishotplug):
        """
        Check guest disks in different ways

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if some errors happen
        """
        def _find_disk_by_cmd():
            """
            Check disk using virsh command

            :return: True if the disk is found, otherwise False
            """
            ret = virsh.domblklist(vm_name, **virsh_options)
            target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout.strip())
            logging.debug(target_disks)

            for one_disk in target_disks:
                if target_dev in one_disk:
                    logging.debug("Found the disk '{}'".format(target_dev))
                    return True
            logging.debug("Can't find the disk '{}'".format(target_dev))
            return False

        def _find_disk_in_xml():
            """
            Check disk in guest xml

            :return: True if the disk is found with right bus
                     False if the disk is not found
            :raise: test.fail if the disk's bus is incorrect
            """
            bus_list = get_disk_bus(target_dev)
            if len(bus_list) == 0:
                return False
            if bus_list[0] != '0x%02x' % int(contr_index):
                test.fail("The found disk's bus is expected to be {}, "
                          "but {} found".format('0x%02x' % int(contr_index),
                                                bus_list[0]))
            return True

        virsh_options.update({'ignore_status': False})
        # Firstly check virsh.domblklist
        found_by_cmd = _find_disk_by_cmd()
        found_in_xml = _find_disk_in_xml()
        msg1 = "Can't find the device with target_dev '{}' by cmd".format(target_dev)
        msg2 = "Found the device with target_dev '{}' unexpectedly by cmd".format(target_dev)
        msg3 = "The device with target_dev '{}' was not detached successfully in xml".format(target_dev)
        msg4 = "The device with target_dev '{}' was detached unexpectedly in xml".format(target_dev)
        if ((ishotplug and not status_error and not found_by_cmd) or
           (not ishotplug and status_error and not found_by_cmd)):
            test.fail(msg1)
        if ((ishotplug and status_error and found_by_cmd) or
           (not ishotplug and not status_error and found_by_cmd)):
            test.fail(msg2)
        if ((ishotplug and not status_error and not found_in_xml) or
           (not ishotplug and not status_error and found_in_xml)):
            test.fail(msg3)
        if ((ishotplug and status_error and found_in_xml) or
           (not ishotplug and status_error and not found_in_xml)):
            test.fail(msg4)

    def check_inside_guest(ishotplug):
        """
        Check devices within the guest

        :param ishotplug: True for hotplug, False for hotunplug
        :raise: test.fail if the result is not expected
        """
        def _check_disk_in_guest():
            """
            Compare the disk numbers within the guest

            :return: True if new disk is found, otherwise False
            """
            new_disk_num = len(vm.get_disks())
            if new_disk_num > ori_disk_num:
                logging.debug("New disk is found in vm")
                return True
            logging.debug("New disk is not found in vm")
            return False

        vm_session = vm.wait_for_login()
        status = _check_disk_in_guest()
        vm_session.close()
        msg1 = "Can't find the device in the guest"
        msg2 = "Found the device in the guest unexpectedly"
        if ((ishotplug and not status_error and not status) or
                (not ishotplug and status_error and not status)):
            test.fail(msg1)
        if ((ishotplug and status_error and status) or
                (not ishotplug and not status_error and status)):
            test.fail(msg2)

    def check_guest_contr():
        """
        Check the controller in guest xml

        :raise: test.fail if the controller does not meet the expectation
        """
        cntl = None
        cur_vm_xml = VMXML.new_from_dumpxml(vm_name)
        for cntl in cur_vm_xml.devices.by_device_tag('controller'):
            if (cntl.type == 'pci' and
               cntl.model == contr_model and
               cntl.index == contr_index):
                logging.debug(cntl.target)
                cntl_hotplug = cntl.target.get('hotplug')
                logging.debug("Got controller's hotplug:%s", cntl_hotplug)
                if cntl_hotplug != hotplug_option:
                    test.fail("The controller's hotplug option is {}, "
                              "but expect {}".format(cntl_hotplug,
                                                     hotplug_option))
                break
        if not cntl:
            test.fail("The controller with index {} is not found".format(contr_index))

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    setup_controller = params.get("setup_controller", 'yes') == 'yes'
    check_within_guest = params.get("check_within_guest", 'yes') == 'yes'
    check_disk_xml = params.get("check_disk_xml", 'no') == 'yes'
    check_cntl_xml = params.get("check_cntl_xml", 'no') == 'yes'
    contr_model = params.get("controller_model", 'pcie-root-port')
    contr_target = params.get("controller_target")
    contr_index = params.get("contr_index")
    hotplug_option = params.get("hotplug_option")
    hotplug = params.get("hotplug", 'yes') == 'yes'
    define_option = params.get("define_option")
    attach_extra = params.get("attach_extra")
    target_dev = params.get("target_dev")
    err_msg = params.get("err_msg")
    status_error = params.get("status_error", "no") == 'yes'
    restart_daemon = params.get("restart_daemon", "no") == 'yes'
    save_restore = params.get("save_restore", "no") == 'yes'
    hotplug_counts = params.get("hotplug_counts")

    virsh_options = {'debug': True, 'ignore_status': False}

    image_path_list = []
    vm = env.get_vm(vm_name)
    vm_xml_obj = VMXML.new_from_inactive_dumpxml(vm_name)
    vm_xml_backup = vm_xml_obj.copy()
    try:
        if check_within_guest:
            if not vm.is_alive():
                virsh.start(vm_name, **virsh_options)
            ori_disk_num = len(vm.get_disks())
            logging.debug("The original disk number in vm is %d", ori_disk_num)
            virsh.destroy(vm_name)

        vm_xml_obj.remove_all_device_by_type('controller')
        if setup_controller:
            contr_dict = {'controller_type': 'pci',
                          'controller_model': contr_model,
                          'controller_index': contr_index,
                          'controller_target': contr_target}
            contr_obj = libvirt.create_controller_xml(contr_dict)
            vm_xml_obj.add_device(contr_obj)
            logging.debug("Add a controller: %s" % contr_obj)

        virsh.define(vm_xml_obj.xml, options=define_option, **virsh_options)
        if not save_restore:
            disk_max = int(hotplug_counts) if hotplug_counts else 1
            for disk_inx in range(0, disk_max):
                image_path = os.path.join(data_dir.get_tmp_dir(),
                                          'disk{}.qcow2'.format(disk_inx))
                image_path_list.append(image_path)
                libvirt.create_local_disk("file", image_path, '10M',
                                          disk_format='qcow2')
        if not hotplug and not save_restore:
            # Do coldplug before hotunplug to prepare the interface device
            virsh.attach_disk(vm_name, image_path, target_dev,
                              extra=attach_extra,
                              **virsh_options)
        virsh.start(vm_name, **virsh_options)

        logging.debug("Test VM XML after starting:"
                      "\n%s", VMXML.new_from_dumpxml(vm_name))
        vm.wait_for_login().close()

        if restart_daemon:
            daemon_obj = Libvirtd()
            daemon_obj.restart()

        if save_restore:
            save_path = os.path.join(data_dir.get_tmp_dir(), 'rhel.save')
            virsh.save(vm_name, save_path, **virsh_options)
            time.sleep(10)
            virsh.restore(save_path, **virsh_options)
        # Create interface device xml
        if hotplug:
            virsh_options.update({'ignore_status': True})
            attach_times = 1 if not hotplug_counts else int(hotplug_counts)

            if attach_times == 1:
                ret = virsh.attach_disk(vm_name, image_path_list[0], target_dev,
                                        extra=attach_extra,
                                        **virsh_options)
                libvirt.check_result(ret, expected_fails=err_msg)
            else:
                for attach_inx in range(0, attach_times):
                    disk_dev = 'vd{}'.format(chr(98 + attach_inx))
                    ret = virsh.attach_disk(vm_name, image_path_list[attach_inx], disk_dev,
                                            extra=attach_extra,
                                            **virsh_options)
                    if ret.exit_status:
                        break
                libvirt.check_result(ret, expected_fails=err_msg)
        if not hotplug and check_within_guest:
            virsh_options.update({'ignore_status': True})
            ret = virsh.detach_disk(vm_name, target_dev, **virsh_options)
            libvirt.check_result(ret, expected_fails=err_msg)
        if check_disk_xml:
            time.sleep(5)
            check_guest_disks(hotplug)
        if check_cntl_xml:
            check_guest_contr()
        if hotplug_counts:
            bus_list = get_disk_bus()
            for one_bus in bus_list:
                if one_bus == '0x%02x' % int(contr_index):
                    test.fail("The disk should not be attached "
                              "to the controller with "
                              "index '{}'".format(contr_index))
            logging.debug("No disk is found to attach to the "
                          "controller with index '{}'".format(contr_index))
        if check_within_guest:
            check_inside_guest(hotplug)

    finally:
        vm_xml_backup.sync()
Beispiel #35
0
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "virt-tests-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "500K")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    status, output = commands.getstatusoutput(cmd)
    if status:
        raise error.TestError("Creating image file %s failed: %s"
                              % (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name, source=image_path, target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestError("Failed to attach disk %s to VM: %s."
                              % (image_path, result.stderr))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path,
                                   resize_value, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                # bz 1002813 will result in an error on this
                err_str = "unable to execute QEMU command 'block_resize': Could not resize: Invalid argument"
                if resize_value[-2] in "kb" and re.search(err_str, err):
                    raise error.TestNAError("BZ 1002813 not yet applied")
                else:
                    raise error.TestFail("Run failed with right "
                                         "virsh blockresize command")

        # Although kb should not be used, libvirt/virsh will accept it and
        # consider it as a 1000 bytes, which caused issues for qed & qcow2
        # since they expect a value evenly divisible by 512 (hence bz 1002813).
        if "kb" in resize_value:
            value = int(resize_value[:-2])
            if image_format in ["qed", "qcow2"]:
                # qcow2 and qed want a VIR_ROUND_UP value based on 512 byte
                # sectors - hence this less than visually appealing formula
                expected_size = (((value * 1000) + 512 - 1) / 512) * 512
            else:
                # Raw images...
                # Ugh - there's some rather ugly looking math when kb
                # (or mb, gb, tb, etc.) are used as the scale for the
                # value to create an image. The blockresize for the
                # running VM uses a qemu json call which differs from
                # qemu-img would do - resulting in (to say the least)
                # awkward sizes. We'll just have to make sure we don't
                # deviates more than a sector.
                expected_size = value * 1000
        elif "kib" in resize_value:
            value = int(resize_value[:-3])
            expected_size = value * 1024
        elif resize_value[-1] in "b":
            expected_size = int(resize_value[:-1])
        elif resize_value[-1] in "k":
            value = int(resize_value[:-1])
            expected_size = value * 1024
        elif resize_value[-1] == "m":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024
        elif resize_value[-1] == "g":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024 * 1024
        else:
            raise error.TestError("Unknown scale value")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info("The expected block size is %s bytes, "
                     "the actual block size is %s bytes",
                     expected_size, actual_size)

        # See comment above regarding Raw images
        if image_format == "raw" and resize_value[-2] in "kb":
            if abs(int(actual_size) - int(expected_size)) > 512:
                raise error.TestFail("New raw blocksize set by blockresize do "
                                     "not match the expected value")
        else:
            if int(actual_size) != int(expected_size):
                raise error.TestFail("New blocksize set by blockresize is "
                                     "different from actual size from "
                                     "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)
def run(test, params, env):
    """
    Test rbd disk device.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare disk image.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    additional_xml_file = os.path.join(data_dir.get_tmp_dir(), "additional_disk.xml")

    def config_ceph():
        """
        Write the configs to the file.
        """
        src_host = disk_src_host.split()
        src_port = disk_src_port.split()
        conf_str = "mon_host = "
        hosts = []
        for host, port in zip(src_host, src_port):
            hosts.append("%s:%s" % (host, port))
        with open(disk_src_config, 'w') as f:
            f.write(conf_str + ','.join(hosts) + '\n')

    def create_pool():
        """
        Define and start a pool.
        """
        sp = libvirt_storage.StoragePool()
        if create_by_xml:
            p_xml = pool_xml.PoolXML(pool_type=pool_type)
            p_xml.name = pool_name
            s_xml = pool_xml.SourceXML()
            s_xml.vg_name = disk_src_pool
            source_host = []
            for (host_name, host_port) in zip(
                    disk_src_host.split(), disk_src_port.split()):
                source_host.append({'name': host_name,
                                    'port': host_port})

            s_xml.hosts = source_host
            if auth_type:
                s_xml.auth_type = auth_type
            if auth_user:
                s_xml.auth_username = auth_user
            if auth_usage:
                s_xml.secret_usage = auth_usage
            p_xml.source = s_xml
            logging.debug("Pool xml: %s", p_xml)
            p_xml.xmltreefile.write()
            ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_build(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
            ret = virsh.pool_start(pool_name, **virsh_dargs)
            libvirt.check_exit_status(ret)
        else:
            auth_opt = ""
            if client_name and client_key:
                auth_opt = ("--auth-type %s --auth-username %s --secret-usage '%s'"
                            % (auth_type, auth_user, auth_usage))
            if not sp.define_rbd_pool(pool_name, mon_host,
                                      disk_src_pool, extra=auth_opt):
                test.fail("Failed to define storage pool")
            if not sp.build_pool(pool_name):
                test.fail("Failed to build storage pool")
            if not sp.start_pool(pool_name):
                test.fail("Failed to start storage pool")

        # Check pool operation
        ret = virsh.pool_refresh(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_uuid(pool_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        # pool-info
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'no':
            test.fail("Failed to check pool information")
        # pool-autostart
        if not sp.set_pool_autostart(pool_name):
            test.fail("Failed to set pool autostart")
        pool_info = sp.pool_info(pool_name)
        if pool_info["Autostart"] != 'yes':
            test.fail("Failed to check pool information")
        # pool-autostart --disable
        if not sp.set_pool_autostart(pool_name, "--disable"):
            test.fail("Failed to set pool autostart")
        # If port is not pre-configured, port value should not be hardcoded in pool information.
        if "yes" == params.get("rbd_port", "no"):
            if 'port' in virsh.pool_dumpxml(pool_name):
                test.fail("port attribute should not be in pool information")
        # find-storage-pool-sources-as
        if "yes" == params.get("find_storage_pool_sources_as", "no"):
            ret = virsh.find_storage_pool_sources_as("rbd", mon_host)
            libvirt.check_result(ret, skip_if=unsupported_err)

    def create_vol(vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        pvt = libvirt.PoolVolumeTest(test, params)
        if create_by_xml:
            pvt.pre_vol_by_xml(pool_name, **vol_params)
        else:
            pvt.pre_vol(vol_name, None, '2G', None, pool_name)

    def check_vol(vol_params):
        """
        Check volume information.
        """
        pv = libvirt_storage.PoolVolume(pool_name)
        # Supported operation
        if vol_name not in pv.list_volumes():
            test.fail("Volume %s doesn't exist" % vol_name)
        ret = virsh.vol_dumpxml(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        # vol-info
        if not pv.volume_info(vol_name):
            test.fail("Can't see volume info")
        # vol-key
        ret = virsh.vol_key(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume key isn't correct")
        # vol-path
        ret = virsh.vol_path(vol_name, pool_name)
        libvirt.check_exit_status(ret)
        if "%s/%s" % (disk_src_pool, vol_name) not in ret.stdout.strip():
            test.fail("Volume path isn't correct")
        # vol-pool
        ret = virsh.vol_pool("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if pool_name not in ret.stdout.strip():
            test.fail("Volume pool isn't correct")
        # vol-name
        ret = virsh.vol_name("%s/%s" % (disk_src_pool, vol_name))
        libvirt.check_exit_status(ret)
        if vol_name not in ret.stdout.strip():
            test.fail("Volume name isn't correct")
        # vol-resize
        ret = virsh.vol_resize(vol_name, "2G", pool_name)
        libvirt.check_exit_status(ret)

        # Not supported operation
        # vol-clone
        ret = virsh.vol_clone(vol_name, cloned_vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-create-from
        volxml = vol_xml.VolXML()
        vol_params.update({"name": "%s" % create_from_cloned_volume})
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()
        ret = virsh.vol_create_from(pool_name, v_xml.xml, vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # vol-wipe
        ret = virsh.vol_wipe(vol_name, pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-upload
        ret = virsh.vol_upload(vol_name, vm.get_first_disk_devices()['source'],
                               "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)
        # vol-download
        ret = virsh.vol_download(vol_name, cloned_vol_name, "--pool %s" % pool_name)
        libvirt.check_result(ret, skip_if=unsupported_err)

    def check_qemu_cmd():
        """
        Check qemu command line options.
        """
        cmd = ("ps -ef | grep %s | grep -v grep " % vm_name)
        process.run(cmd, shell=True)
        if disk_src_name:
            cmd += " | grep file=rbd:%s:" % disk_src_name
            if auth_user and auth_key:
                cmd += ('id=%s:auth_supported=cephx' % auth_user)
        if disk_src_config:
            cmd += " | grep 'conf=%s'" % disk_src_config
        elif mon_host:
            hosts = '\:6789\;'.join(mon_host.split())
            cmd += " | grep 'mon_host=%s'" % hosts
        if driver_iothread:
            cmd += " | grep iothread%s" % driver_iothread
        # Run the command
        process.run(cmd, shell=True)

    def check_save_restore():
        """
        Test save and restore operation
        """
        save_file = os.path.join(data_dir.get_tmp_dir(),
                                 "%s.save" % vm_name)
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)
        if os.path.exists(save_file):
            os.remove(save_file)
        # Login to check vm status
        vm.wait_for_login().close()

    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")

    def check_blockcopy(target):
        """
        Block copy operation test.
        """
        blk_file = os.path.join(data_dir.get_tmp_dir(), "blk.rbd")
        if os.path.exists(blk_file):
            os.remove(blk_file)
        blk_mirror = ("mirror type='file' file='%s' "
                      "format='raw' job='copy'" % blk_file)

        # Do blockcopy
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_result(ret, skip_if=unsupported_err)

        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count(blk_mirror):
            test.fail("Can't see block job in domain xml")

        # Abort
        ret = virsh.blockjob(vm_name, target, "--abort")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if dom_xml.count(blk_mirror):
            test.fail("Failed to abort block job")
        if os.path.exists(blk_file):
            os.remove(blk_file)

        # Sleep for a while after abort operation.
        time.sleep(5)
        # Do blockcopy again
        ret = virsh.blockcopy(vm_name, target, blk_file)
        libvirt.check_exit_status(ret)

        # Wait for complete
        def wait_func():
            ret = virsh.blockjob(vm_name, target, "--info")
            return ret.stderr.count("Block Copy: [100 %]")
        timeout = params.get("blockjob_timeout", 600)
        utils_misc.wait_for(wait_func, int(timeout))

        # Pivot
        ret = virsh.blockjob(vm_name, target, "--pivot")
        libvirt.check_exit_status(ret)
        dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
        if not dom_xml.count("source file='%s'" % blk_file):
            test.fail("Failed to pivot block job")
        # Remove the disk file.
        if os.path.exists(blk_file):
            os.remove(blk_file)

    def check_in_vm(vm_obj, target, old_parts, read_only=False):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm_obj.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            cmd = ("mount /dev/{0} /mnt && ls /mnt && (sleep 15;"
                   " touch /mnt/testfile; umount /mnt)"
                   .format(added_part))
            s, o = session.cmd_status_output(cmd, timeout=60)
            session.close()
            logging.info("Check disk operation in VM:\n, %s, %s", s, o)
            # Readonly fs, check the error messages.
            # The command may return True, read-only
            # messges can be found from the command output
            if read_only:
                if "Read-only file system" not in o:
                    return False
                else:
                    return True

            # Other errors
            if s != 0:
                return False
            return True

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def clean_up_volume_snapshots():
        """
        Get all snapshots for rbd_vol.img volume,unprotect and then clean up them.
        """
        cmd = ("rbd -m {0} {1} info {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        if process.run(cmd, ignore_status=True, shell=True).exit_status:
            return
        # Get snapshot list.
        cmd = ("rbd -m {0} {1} snap"
               " list {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        snaps_out = process.run(cmd, ignore_status=True, shell=True).stdout_text
        snap_names = []
        if snaps_out:
            for line in snaps_out.rsplit("\n"):
                if line.startswith("SNAPID") or line == "":
                    continue
                snap_line = line.rsplit()
                if len(snap_line) == 4:
                    snap_names.append(snap_line[1])
            logging.debug("Find snapshots: %s", snap_names)
            # Unprotect snapshot first,otherwise it will fail to purge volume
            for snap_name in snap_names:
                cmd = ("rbd -m {0} {1} snap"
                       " unprotect {2}@{3}"
                       "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name), snap_name))
                process.run(cmd, ignore_status=True, shell=True)
        # Purge volume,and then delete volume.
        cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
               " purge {2} && rbd -m {0} {1} rm {2}"
               "".format(mon_host, key_opt, os.path.join(disk_src_pool, vol_name)))
        process.run(cmd, ignore_status=True, shell=True)

    def make_snapshot():
        """
        make external snapshots.

        :return external snapshot path list
        """
        logging.info("Making snapshot...")
        first_disk_source = vm.get_first_disk_devices()['source']
        snapshot_path_list = []
        snapshot2_file = os.path.join(data_dir.get_tmp_dir(), "mem.s2")
        snapshot3_file = os.path.join(data_dir.get_tmp_dir(), "mem.s3")
        snapshot4_file = os.path.join(data_dir.get_tmp_dir(), "mem.s4")
        snapshot4_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s4")
        snapshot5_file = os.path.join(data_dir.get_tmp_dir(), "mem.s5")
        snapshot5_disk_file = os.path.join(data_dir.get_tmp_dir(), "disk.s5")

        # Attempt to take different types of snapshots.
        snapshots_param_dict = {"s1": "s1 --disk-only --no-metadata",
                                "s2": "s2 --memspec %s --no-metadata" % snapshot2_file,
                                "s3": "s3 --memspec %s --no-metadata --live" % snapshot3_file,
                                "s4": "s4 --memspec %s --diskspec vda,file=%s --no-metadata" % (snapshot4_file, snapshot4_disk_file),
                                "s5": "s5 --memspec %s --diskspec vda,file=%s --live --no-metadata" % (snapshot5_file, snapshot5_disk_file)}
        for snapshot_name in sorted(snapshots_param_dict.keys()):
            ret = virsh.snapshot_create_as(vm_name, snapshots_param_dict[snapshot_name],
                                           **virsh_dargs)
            libvirt.check_exit_status(ret)
            if snapshot_name != 's4' and snapshot_name != 's5':
                snapshot_path_list.append(first_disk_source.replace('qcow2', snapshot_name))
        return snapshot_path_list

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = results_stdout_52lts(secret_list_result).strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    mon_host = params.get("mon_host")
    disk_src_name = params.get("disk_source_name")
    disk_src_config = params.get("disk_source_config")
    disk_src_host = params.get("disk_source_host")
    disk_src_port = params.get("disk_source_port")
    disk_src_pool = params.get("disk_source_pool")
    disk_format = params.get("disk_format", "raw")
    driver_iothread = params.get("driver_iothread")
    snap_name = params.get("disk_snap_name")
    attach_device = "yes" == params.get("attach_device", "no")
    attach_disk = "yes" == params.get("attach_disk", "no")
    test_save_restore = "yes" == params.get("test_save_restore", "no")
    test_snapshot = "yes" == params.get("test_snapshot", "no")
    test_blockcopy = "yes" == params.get("test_blockcopy", "no")
    test_qemu_cmd = "yes" == params.get("test_qemu_cmd", "no")
    test_vm_parts = "yes" == params.get("test_vm_parts", "no")
    additional_guest = "yes" == params.get("additional_guest", "no")
    create_snapshot = "yes" == params.get("create_snapshot", "no")
    convert_image = "yes" == params.get("convert_image", "no")
    create_volume = "yes" == params.get("create_volume", "no")
    create_by_xml = "yes" == params.get("create_by_xml", "no")
    client_key = params.get("client_key")
    client_name = params.get("client_name")
    auth_key = params.get("auth_key")
    auth_user = params.get("auth_user")
    auth_type = params.get("auth_type")
    auth_usage = params.get("secret_usage")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    vol_name = params.get("vol_name")
    cloned_vol_name = params.get("cloned_volume", "cloned_test_volume")
    create_from_cloned_volume = params.get("create_from_cloned_volume", "create_from_cloned_test_volume")
    vol_cap = params.get("vol_cap")
    vol_cap_unit = params.get("vol_cap_unit")
    start_vm = "yes" == params.get("start_vm", "no")
    test_disk_readonly = "yes" == params.get("test_disk_readonly", "no")
    test_disk_internal_snapshot = "yes" == params.get("test_disk_internal_snapshot", "no")
    test_json_pseudo_protocol = "yes" == params.get("json_pseudo_protocol", "no")
    disk_snapshot_with_sanlock = "yes" == params.get("disk_internal_with_sanlock", "no")

    # Create /etc/ceph/ceph.conf file to suppress false warning error message.
    process.run("mkdir -p /etc/ceph", ignore_status=True, shell=True)
    cmd = ("echo 'mon_host = {0}' >/etc/ceph/ceph.conf"
           .format(mon_host))
    process.run(cmd, ignore_status=True, shell=True)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)
    if additional_guest:
        guest_name = "%s_%s" % (vm_name, '1')
        timeout = params.get("clone_timeout", 360)
        utils_libguestfs.virt_clone_cmd(vm_name, guest_name,
                                        True, timeout=timeout,
                                        ignore_status=False)
        additional_vm = vm.clone(guest_name)
        if start_vm:
            virsh.start(guest_name)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    key_opt = ""
    secret_uuid = None
    snapshot_path = None
    key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    front_end_img_file = os.path.join(data_dir.get_tmp_dir(),
                                      "%s_frontend_test.img" % vm_name)
    # Construct a unsupported error message list to skip these kind of tests
    unsupported_err = []
    if driver_iothread:
        unsupported_err.append('IOThreads not supported')
    if test_snapshot:
        unsupported_err.append('live disk snapshot not supported')
    if test_disk_readonly:
        if not libvirt_version.version_compare(5, 0, 0):
            unsupported_err.append('Could not create file: Permission denied')
            unsupported_err.append('Permission denied')
        else:
            unsupported_err.append('unsupported configuration: external snapshot ' +
                                   'for readonly disk vdb is not supported')
    if test_disk_internal_snapshot:
        unsupported_err.append('unsupported configuration: internal snapshot for disk ' +
                               'vdb unsupported for storage type raw')
    if test_blockcopy:
        unsupported_err.append('block copy is not supported')
    if attach_disk:
        unsupported_err.append('No such file or directory')
    if create_volume:
        unsupported_err.append("backing 'volume' disks isn't yet supported")
        unsupported_err.append('this function is not supported')

    try:
        # Clean up dirty secrets in test environments if there have.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Prepare test environment.
        qemu_config = LibvirtQemuConfig()

        if disk_snapshot_with_sanlock:
            # Install necessary package:sanlock,libvirt-lock-sanlock
            if not utils_package.package_install(["sanlock"]):
                test.error("fail to install sanlock")
            if not utils_package.package_install(["libvirt-lock-sanlock"]):
                test.error("fail to install libvirt-lock-sanlock")

            # Set virt_use_sanlock
            result = process.run("setsebool -P virt_use_sanlock 1", shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_sanlock value")

            # Update lock_manager in qemu.conf
            qemu_config.lock_manager = 'sanlock'

            # Update qemu-sanlock.conf.
            san_lock_config = LibvirtSanLockConfig()
            san_lock_config.user = '******'
            san_lock_config.group = 'sanlock'
            san_lock_config.host_id = 1
            san_lock_config.auto_disk_leases = True
            process.run("mkdir -p /var/lib/libvirt/sanlock", shell=True)
            san_lock_config.disk_lease_dir = "/var/lib/libvirt/sanlock"
            san_lock_config.require_lease_for_disks = False

            # Start sanlock service and restart libvirtd to enforce changes.
            result = process.run("systemctl start wdmd", shell=True)
            if result.exit_status:
                test.error("Failed to start wdmd service")
            result = process.run("systemctl start sanlock", shell=True)
            if result.exit_status:
                test.error("Failed to start sanlock service")
            utils_libvirtd.Libvirtd().restart()

            # Prepare lockspace and lease file for sanlock in order.
            sanlock_cmd_dict = OrderedDict()
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to truncate TEST_LS"
            sanlock_cmd_dict["sanlock direct init -s TEST_LS:0:/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to sanlock direct init TEST_LS:0"
            sanlock_cmd_dict["chown sanlock:sanlock /var/lib/libvirt/sanlock/TEST_LS"] = "Failed to chown sanlock TEST_LS"
            sanlock_cmd_dict["restorecon -R -v /var/lib/libvirt/sanlock"] = "Failed to restorecon sanlock"
            sanlock_cmd_dict["truncate -s 1M /var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to truncate test-disk-resource-lock"
            sanlock_cmd_dict["sanlock direct init -r TEST_LS:test-disk-resource-lock:" +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock:0"] = "Failed to sanlock direct init test-disk-resource-lock"
            sanlock_cmd_dict["chown sanlock:sanlock " +
                             "/var/lib/libvirt/sanlock/test-disk-resource-lock"] = "Failed to chown test-disk-resource-loc"
            sanlock_cmd_dict["sanlock client add_lockspace -s TEST_LS:1:" +
                             "/var/lib/libvirt/sanlock/TEST_LS:0"] = "Failed to client add_lockspace -s TEST_LS:0"
            for sanlock_cmd in sanlock_cmd_dict.keys():
                result = process.run(sanlock_cmd, shell=True)
                if result.exit_status:
                    test.error(sanlock_cmd_dict[sanlock_cmd])

            # Create one lease device and add it to VM.
            san_lock_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
            lease_device = Lease()
            lease_device.lockspace = 'TEST_LS'
            lease_device.key = 'test-disk-resource-lock'
            lease_device.target = {'path': '/var/lib/libvirt/sanlock/test-disk-resource-lock'}
            san_lock_vmxml.add_device(lease_device)
            san_lock_vmxml.sync()

        # Install ceph-common package which include rbd command
        if utils_package.package_install(["ceph-common"]):
            if client_name and client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (client_name, client_key))
                key_opt = "--keyring %s" % key_file

                # Create secret xml
                sec_xml = secret_xml.SecretXML("no", "no")
                sec_xml.usage = auth_type
                sec_xml.usage_name = auth_usage
                sec_xml.xmltreefile.write()

                logging.debug("Secret xml: %s", sec_xml)
                ret = virsh.secret_define(sec_xml.xml)
                libvirt.check_exit_status(ret)

                secret_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
                logging.debug("Secret uuid %s", secret_uuid)
                if secret_uuid is None:
                    test.error("Failed to get secret uuid")

                # Set secret value
                auth_key = params.get("auth_key")
                ret = virsh.secret_set_value(secret_uuid, auth_key,
                                             **virsh_dargs)
                libvirt.check_exit_status(ret)

            # Delete the disk if it exists
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        else:
            test.error("Failed to install ceph-common")

        if disk_src_config:
            config_ceph()
        disk_path = ("rbd:%s:mon_host=%s" %
                     (disk_src_name, mon_host))
        if auth_user and auth_key:
            disk_path += (":id=%s:key=%s" %
                          (auth_user, auth_key))
        targetdev = params.get("disk_target", "vdb")
        # To be compatible with create_disk_xml function,
        # some parameters need to be updated.
        params.update({
            "type_name": params.get("disk_type", "network"),
            "target_bus": params.get("disk_target_bus"),
            "target_dev": targetdev,
            "secret_uuid": secret_uuid,
            "source_protocol": params.get("disk_source_protocol"),
            "source_name": disk_src_name,
            "source_host_name": disk_src_host,
            "source_host_port": disk_src_port})
        # Prepare disk image
        if convert_image:
            first_disk = vm.get_first_disk_devices()
            blk_source = first_disk['source']
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert"
                        " -O %s %s %s" % (mon_host, key_opt,
                                          disk_src_name, disk_format,
                                          blk_source, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)

        elif create_volume:
            vol_params = {"name": vol_name, "capacity": int(vol_cap),
                          "capacity_unit": vol_cap_unit, "format": disk_format}

            create_pool()
            create_vol(vol_params)
            check_vol(vol_params)
        else:
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s 10M && mkfs.ext4 -F %s" %
                        (disk_format, img_file, img_file))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                        " %s %s %s" % (mon_host, key_opt, disk_src_name,
                                       disk_format, img_file, disk_path))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Create disk snapshot if needed.
            if create_snapshot:
                snap_cmd = ("rbd -m %s %s snap create %s@%s" %
                            (mon_host, key_opt, disk_src_name, snap_name))
                process.run(snap_cmd, ignore_status=False, shell=True)
            if test_json_pseudo_protocol:
                # Create one frontend image with the rbd backing file.
                json_str = ('json:{"file.driver":"rbd",'
                            '"file.filename":"rbd:%s:mon_host=%s"}'
                            % (disk_src_name, mon_host))
                # pass different json string according to the auth config
                if auth_user and auth_key:
                    json_str = ('%s:id=%s:key=%s"}' % (json_str[:-2], auth_user, auth_key))
                disk_cmd = ("qemu-img create -f qcow2 -b '%s' %s" %
                            (json_str, front_end_img_file))
                disk_path = front_end_img_file
                process.run(disk_cmd, ignore_status=False, shell=True)
        # If hot plug, start VM first, and then wait the OS boot.
        # Otherwise stop VM if running.
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login().close()
        else:
            if not vm.is_dead():
                vm.destroy()
        if attach_device:
            if create_volume:
                params.update({"source_pool": pool_name})
                params.update({"type_name": "volume"})
                # No need auth options for volume
                if "auth_user" in params:
                    params.pop("auth_user")
                if "auth_type" in params:
                    params.pop("auth_type")
                if "secret_type" in params:
                    params.pop("secret_type")
                if "secret_uuid" in params:
                    params.pop("secret_uuid")
                if "secret_usage" in params:
                    params.pop("secret_usage")
            xml_file = libvirt.create_disk_xml(params)
            if additional_guest:
                # Copy xml_file for additional guest VM.
                shutil.copyfile(xml_file, additional_xml_file)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
            if additional_guest:
                # Make sure the additional VM is running
                if additional_vm.is_dead():
                    additional_vm.start()
                    additional_vm.wait_for_login().close()
                ret = virsh.attach_device(guest_name, additional_xml_file,
                                          "", debug=True)
                libvirt.check_result(ret, skip_if=unsupported_err)
        elif attach_disk:
            opts = params.get("attach_option", "")
            ret = virsh.attach_disk(vm_name, disk_path,
                                    targetdev, opts)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_readonly:
            params.update({'readonly': "yes"})
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif test_disk_internal_snapshot:
            xml_file = libvirt.create_disk_xml(params)
            opts = params.get("attach_option", "")
            ret = virsh.attach_device(vm_name, xml_file,
                                      flagstr=opts, debug=True)
            libvirt.check_result(ret, skip_if=unsupported_err)
        elif disk_snapshot_with_sanlock:
            if vm.is_dead():
                vm.start()
            snapshot_path = make_snapshot()
            if vm.is_alive():
                vm.destroy()
        elif not create_volume:
            libvirt.set_vm_disk(vm, params)
        if test_blockcopy:
            logging.info("Creating %s...", vm_name)
            vmxml_for_test = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            vm.undefine()
            if virsh.create(vmxml_for_test.xml, **virsh_dargs).exit_status:
                vmxml_backup.define()
                test.fail("Can't create the domain")
        elif vm.is_dead():
            vm.start()
        # Wait for vm is running
        vm.wait_for_login(timeout=600).close()
        if additional_guest:
            if additional_vm.is_dead():
                additional_vm.start()
        # Check qemu command line
        if test_qemu_cmd:
            check_qemu_cmd()
        # Check partitions in vm
        if test_vm_parts:
            if not check_in_vm(vm, targetdev, old_parts,
                               read_only=create_snapshot):
                test.fail("Failed to check vm partitions")
            if additional_guest:
                if not check_in_vm(additional_vm, targetdev, old_parts):
                    test.fail("Failed to check vm partitions")
        # Save and restore operation
        if test_save_restore:
            check_save_restore()
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)
        if test_disk_readonly:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, 'vdb')
        if test_disk_internal_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option, targetdev)
        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                test.fail("Disk still exists in vm"
                          " after detachment")
            session.close()

    except virt_vm.VMStartError as details:
        for msg in unsupported_err:
            if msg in str(details):
                test.cancel(str(details))
        else:
            test.fail("VM failed to start."
                      "Error: %s" % str(details))
    finally:
        # Remove /etc/ceph/ceph.conf file if exists.
        if os.path.exists('/etc/ceph/ceph.conf'):
            os.remove('/etc/ceph/ceph.conf')
        # Delete snapshots.
        snapshot_lists = virsh.snapshot_list(vm_name)
        if len(snapshot_lists) > 0:
            libvirt.clean_up_snapshots(vm_name, snapshot_lists)
            for snap in snapshot_lists:
                virsh.snapshot_delete(vm_name, snap, "--metadata")

        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        if additional_guest:
            virsh.remove_domain(guest_name,
                                "--remove-all-storage",
                                ignore_stauts=True)
        # Remove the snapshot.
        if create_snapshot:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} snap"
                   " purge {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)
        elif create_volume:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, cloned_vol_name)))
            process.run(cmd, ignore_status=True, shell=True)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, os.path.join(disk_src_pool, create_from_cloned_volume)))
            process.run(cmd, ignore_status=True, shell=True)
            clean_up_volume_snapshots()
        else:
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm {2}"
                   "".format(mon_host, key_opt, disk_src_name))
            process.run(cmd, ignore_status=True, shell=True)

        # Delete tmp files.
        if os.path.exists(key_file):
            os.remove(key_file)
        if os.path.exists(img_file):
            os.remove(img_file)
        # Clean up volume, pool
        if vol_name and vol_name in str(virsh.vol_list(pool_name).stdout):
            virsh.vol_delete(vol_name, pool_name)
        if pool_name and pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)

        # Clean up secret
        secret_list = get_secret_list()
        if secret_list:
            for secret_uuid in secret_list:
                virsh.secret_undefine(secret_uuid)

        logging.info("Restoring vm...")
        vmxml_backup.sync()

        if disk_snapshot_with_sanlock:
            # Restore virt_use_sanlock setting.
            process.run("setsebool -P virt_use_sanlock 0", shell=True)
            # Restore qemu config
            qemu_config.restore()
            utils_libvirtd.Libvirtd().restart()
            # Force shutdown sanlock service.
            process.run("sanlock client shutdown -f 1", shell=True)
            # Clean up lockspace folder
            process.run("rm -rf  /var/lib/libvirt/sanlock/*", shell=True)
            if snapshot_path is not None:
                for snapshot in snapshot_path:
                    if os.path.exists(snapshot):
                        os.remove(snapshot)
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = ["dir", "fs", "netfs", "logical", "iscsi",
                           "disk", "gluster"]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    tmp_dir = data_dir.get_tmp_dir()
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target,
                         emulated_image, image_size="1G",
                         pre_disk_vol=["20M"])

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s", pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" % newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name, vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        # Do the attach action.
        out = utils.run("qemu-img info %s" % img_path)
        logging.debug("The img info is:\n%s" % out.stdout.strip())
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestNAError("Failed to attach disk %s to VM."
                                    "Detail: %s." % (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n"]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    snap_path = "%s.snap" % os.path.basename(disk['source'])
                    disk_external = os.path.join(tmp_dir, snap_path)
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            logging.debug("The xml content for snapshot create is:")
            with open(snapshot_xml_path, 'r') as fin:
                logging.debug(fin.read())

            options += " --xmlfile %s " % snapshot_xml_path
            snapshot_result = virsh.snapshot_create(
                vm_name, options, debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search("live disk snapshot not supported with this QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search("internal snapshot of a running VM" +
                                     " must include the memory state",
                                     out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = ["<domainsnapshot>\n",
                         "<description>Snapshot Test</description>\n",
                         "<state>running</state>\n",
                         "<creationTime>%s</creationTime>" % snapshot_name,
                         "</domainsnapshot>"]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                logging.debug("The xml content for snapshot create is:")
                with open(snapshot_xml_path, 'r') as fin:
                    logging.debug(fin.read())
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # Destroy vm for snapshot revert.
        if not libvirt_version.version_compare(1, 2, 3):
            virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options,
                                              debug=True)
        if revert_result.exit_status:
            # As commit d410e6f for libvirt 1.2.3, attempts to revert external
            # snapshots will FAIL with an error "revert to external snapshot
            # not supported yet". Thus, let's check for that and handle as a
            # SKIP for now. Check bug:
            # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
            if libvirt_version.version_compare(1, 2, 3):
                if re.search("revert to external snapshot not supported yet",
                             revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
            else:
                raise error.TestFail("Revert snapshot failed. %s" %
                                     revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused"
                                     "  option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("After revert cat file output='%s'", output)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name, snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s missing"
                                                 % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still"
                                                 % snap_xml_path + " exist")

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name, pool_type, pool_target,
                                 emulated_image)
            except error.TestFail, detail:
                logging.error(str(detail))
def run(test, params, env):
    """
    Test command: virsh domblklist.
    1.Prepare test environment.
    2.Run domblklist and check
    3.Do attach disk and rerun domblklist with check
    4.Clean test environment.
    """
    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        disk_info_list = []
        output_disk_info = {}
        output_disk_info_list = []
        result = virsh.domblklist(vm_ref,
                                  options,
                                  ignore_status=True,
                                  debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                test.fail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                test.fail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i - 2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s" %
                          output_disk_info)

            for (k, v) in list(iteritems(output_disk_info)):
                output_disk_info_list.append(v)

            if "--details" in options:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v)
            else:
                for (k, v) in list(iteritems(disk_info)):
                    disk_info_list.append(v[2:])

            disk_info_list.sort()
            logging.debug("The disk info list from xml is: %s" %
                          disk_info_list)
            output_disk_info_list.sort()
            logging.debug("The disk info list from command output is: %s" %
                          output_disk_info_list)

            if disk_info_list != output_disk_info_list:
                test.fail("The output did not match with disk"
                          " info from xml")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblklist_vm_ref")
    options = params.get("domblklist_options", "")
    info_options = params.get("info_options", "")
    status_error = params.get("status_error", "no")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")
    domblkinfo = params.get("domblkinfo", "no")
    extra = ""

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vm_state = vm.state()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # run domblklist and check
    domblklist_test()

    # Test domblkinfo function as well
    if domblkinfo == "yes":
        ret = virsh.domblklist(vm_ref, options, ignore_status=True, debug=True)
        target_disks = re.findall(r"[v,s]d[a-z]", ret.stdout)
        if info_options == "":
            check_list = ["Capacity", "Allocation", "Physical"]
            ret2 = virsh.domblkinfo(vm_ref, target_disks[0])
        elif info_options == "--human":
            check_list = ["Capacity", "Allocation", "Physical", "GiB"]
            cmd = "virsh domblkinfo %s %s %s" % (vm_ref, target_disks[0],
                                                 info_options)
            ret2 = process.run(cmd, shell=True, ignore_status=True)
        for check in check_list:
            if not re.search(check, ret2.stdout):
                test.fail("Cmd domblkinfo run failed")

    if status_error == "no":
        try:
            # attach disk and check
            with open(test_attach_disk, 'wb') as source_file:
                source_file.seek((512 * 1024 * 1024) - 1)
                source_file.write(str(0).encode())
            # since bug 1049529, --config will work with detach when
            # domain is running, so change it back using --config here
            if "--inactive" in options or vm_state == "shut off":
                extra = "--config"
            virsh.attach_disk(vm_name,
                              test_attach_disk,
                              front_dev,
                              extra,
                              debug=True)
            domblklist_test()
        finally:
            virsh.detach_disk(vm_name, front_dev, extra, debug=True)
            if os.path.exists(test_attach_disk):
                os.remove(test_attach_disk)
def run(test, params, env):
    """
    Test command: virsh domblklist.
    1.Prepare test environment.
    2.Run domblklist and check
    3.Do attach disk and rerun domblklist with check
    4.Clean test environment.
    """

    def domblklist_test():
        """
        Run domblklist and check result, raise error if check fail.
        """
        output_disk_info = {}
        result = virsh.domblklist(vm_ref, options,
                                  ignore_status=True, debug=True)
        status = result.exit_status
        output = result.stdout.strip()

        # Check status_error
        if status_error == "yes":
            if status == 0:
                raise error.TestFail("Run successfully with wrong command!")
        elif status_error == "no":
            if status == 1:
                raise error.TestFail("Run failed with right command")
            # Check disk information.
            disk_info = get_disk_info(vm_name, options)
            logging.debug("The disk info dict from xml is: %s" % disk_info)

            output_list = output.split('\n')
            for i in range(2, len(output_list)):
                output_disk_info[i-2] = output_list[i].split()
            logging.debug("The disk info dict from command output is: %s"
                          % output_disk_info)

            if "--details" in options:
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")
            else:
                for i in range(len(disk_info.keys())):
                    disk_info[i] = disk_info[i][2:]
                if disk_info != output_disk_info:
                    raise error.TestFail("The output did not match with disk"
                                         " info from xml")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Get all parameters from configuration.
    vm_ref = params.get("domblklist_vm_ref")
    options = params.get("domblklist_options", "")
    status_error = params.get("status_error", "no")
    front_dev = params.get("domblkinfo_front_dev", "vdd")
    test_attach_disk = os.path.join(test.virtdir, "tmp.img")
    extra = ""

    domid = vm.get_id()
    domuuid = vm.get_uuid()
    vm_state = vm.state()

    if vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref == "uuid":
        vm_ref = domuuid

    # run domblklist and check
    domblklist_test()

    if status_error == "no":
        try:
            # attach disk and check
            source_file = open(test_attach_disk, 'wb')
            source_file.seek((512 * 1024 * 1024) - 1)
            source_file.write(str(0))
            source_file.close()
            # since bug 1049529, --config will work with detach when
            # domain is running, so change it back using --config here
            if "--inactive" in options or vm_state == "shut off":
                extra = "--config"
            virsh.attach_disk(vm_name, test_attach_disk, front_dev, extra,
                              debug=True)
            domblklist_test()
        finally:
            virsh.detach_disk(vm_name, front_dev, extra, debug=True)
            if os.path.exists(test_attach_disk):
                os.remove(test_attach_disk)
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "avocado-vt-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "500K")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if image_format == "qed":
            test.cancel("QED support changed, check bug: "
                        "https://bugzilla.redhat.com/show_bug.cgi"
                        "?id=731570")

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    ret = process.run(cmd, allow_output_check='combined', shell=True)
    status, output = (ret.exit_status, ret.stdout_text.strip())
    if status:
        test.error("Creating image file %s failed: %s"
                   % (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name, source=image_path, target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        test.error("Failed to attach disk %s to VM: %s."
                   % (image_path, result.stderr.strip()))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path,
                                   resize_value, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                test.fail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                # bz 1002813 will result in an error on this
                err_str = "unable to execute QEMU command 'block_resize': Could not resize: Invalid argument"
                if resize_value[-2] in "kb" and re.search(err_str, err):
                    test.cancel("BZ 1002813 not yet applied")
                else:
                    test.fail("Run failed with right "
                              "virsh blockresize command")

        # Although kb should not be used, libvirt/virsh will accept it and
        # consider it as a 1000 bytes, which caused issues for qed & qcow2
        # since they expect a value evenly divisible by 512 (hence bz 1002813).
        if "kb" in resize_value:
            value = int(resize_value[:-2])
            if image_format in ["qed", "qcow2"]:
                # qcow2 and qed want a VIR_ROUND_UP value based on 512 byte
                # sectors - hence this less than visually appealing formula
                expected_size = (((value * 1000) + 512 - 1) // 512) * 512
            else:
                # Raw images...
                # Ugh - there's some rather ugly looking math when kb
                # (or mb, gb, tb, etc.) are used as the scale for the
                # value to create an image. The blockresize for the
                # running VM uses a qemu json call which differs from
                # qemu-img would do - resulting in (to say the least)
                # awkward sizes. We'll just have to make sure we don't
                # deviates more than a sector.
                expected_size = value * 1000
        elif "kib" in resize_value:
            value = int(resize_value[:-3])
            expected_size = value * 1024
        elif resize_value[-1] in "b":
            expected_size = int(resize_value[:-1])
        elif resize_value[-1] in "k":
            value = int(resize_value[:-1])
            expected_size = value * 1024
        elif resize_value[-1] == "m":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024
        elif resize_value[-1] == "g":
            value = int(resize_value[:-1])
            expected_size = value * 1024 * 1024 * 1024
        else:
            test.error("Unknown scale value")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info("The expected block size is %s bytes, "
                     "the actual block size is %s bytes",
                     expected_size, actual_size)

        # See comment above regarding Raw images
        if image_format == "raw" and resize_value[-2] in "kb":
            if abs(int(actual_size) - int(expected_size)) > 512:
                test.fail("New raw blocksize set by blockresize do "
                          "not match the expected value")
        else:
            if int(actual_size) != int(expected_size):
                test.fail("New blocksize set by blockresize is "
                          "different from actual size from "
                          "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)
Beispiel #41
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
def run_virsh_snapshot_disk(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name,
                               source=img_path,
                               target="vdf",
                               extra="--persistent --subdriver %s" %
                               image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = [
                "<domainsnapshot>\n",
                "<name>%s</name>\n" % snapshot_name,
                "<description>Snapshot Test</description>\n"
            ]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(
                        tmp_dir, "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = [
                    "<domainsnapshot>\n",
                    "<description>Snapshot Test</description>\n",
                    "<state>running</state>\n",
                    "<creationTime>%s</creationTime>" % snapshot_name,
                    "</domainsnapshot>"
                ]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail(
                "Success to create snapshot in negative case\n"
                "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail("Revert snapshot failed. %s" %
                                 revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail(
                    "Revert command successed, but VM is not "
                    "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
def run(test, params, env):
    """
    Test virsh blockresize command for block device of domain.

    1) Init the variables from params.
    2) Create an image with specified format.
    3) Attach a disk image to vm.
    4) Test blockresize for the disk
    5) Detach the disk
    """

    # MAIN TEST CODE ###
    # Process cartesian parameters
    vm_name = params.get("main_vm", "virt-tests-vm1")
    image_format = params.get("disk_image_format", "qcow2")
    initial_disk_size = params.get("initial_disk_size", "1M")
    status_error = "yes" == params.get("status_error", "yes")
    resize_value = params.get("resize_value")
    virsh_dargs = {'debug': True}

    # Create an image.
    tmp_dir = data_dir.get_tmp_dir()
    image_path = os.path.join(tmp_dir, "blockresize_test")
    logging.info("Create image: %s, "
                 "size %s, "
                 "format %s", image_path, initial_disk_size, image_format)

    cmd = "qemu-img create -f %s %s %s" % (image_format, image_path,
                                           initial_disk_size)
    status, output = commands.getstatusoutput(cmd)
    if status:
        raise error.TestError("Creating image file %s failed: %s" % \
                                (image_path, output))

    # Hotplug the image as disk device
    result = virsh.attach_disk(vm_name, source=image_path, target="vdd",
                               extra=" --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestError("Failed to attach disk %s to VM: %s." %
                                (image_path, result.stderr))

    if resize_value == "over_size":
        # Use byte unit for over_size test
        resize_value = "%s" % OVER_SIZE + "b"

    # Run the test
    try:
        result = virsh.blockresize(vm_name, image_path,
                                   resize_value, **virsh_dargs)
        status = result.exit_status
        err = result.stderr.strip()

        # Check status_error
        if status_error:
            if status == 0 or err == "":
                raise error.TestFail("Expect failure, but run successfully!")
            # No need to do more test
            return
        else:
            if status != 0 or err != "":
                raise error.TestFail("Run failed with right "
                                     "virsh blockresize command")

        if resize_value[-1] in "bkm":
            expected_size = 1024*1024
        elif resize_value[-1] == "g":
            expected_size = 1024*1024*1024
        else:
            raise  error.TestError("Unknown infomation of unit")

        image_info = utils_misc.get_image_info(image_path)
        actual_size = int(image_info['vsize'])

        logging.info("The expected block size is %s bytes,"
                     "the actual block size is %s bytes",
                     expected_size, actual_size)

        if int(actual_size) != int(expected_size):
            raise error.TestFail("New blocksize set by blockresize is "
                                 "different from actual size from "
                                 "'qemu-img info'")
    finally:
        virsh.detach_disk(vm_name, target="vdd")

        if os.path.exists(image_path):
            os.remove(image_path)
Beispiel #44
0
    def trigger_events(dom, events_list=[]):
        """
        Trigger various events in events_list

        :param dom: the vm objects corresponding to the domain
        :return: the expected output that virsh event command prints out
        """
        expected_events_list = []
        save_path = os.path.join(tmpdir, "%s_event.save" % dom.name)
        print(dom.name)
        xmlfile = dom.backup_xml()
        new_disk = os.path.join(tmpdir, "%s_new_disk.img" % dom.name)

        try:
            for event in events_list:
                logging.debug("Current event is: %s", event)
                if event in [
                        'start', 'restore', 'create', 'edit', 'define',
                        'undefine', 'crash', 'device-removal-failed',
                        'watchdog', 'io-error'
                ]:
                    if dom.is_alive():
                        dom.destroy()
                        if event in ['create', 'define']:
                            dom.undefine()
                else:
                    if not dom.is_alive():
                        dom.start()
                        dom.wait_for_login().close()
                        if event == "resume":
                            dom.pause()

                if event == "undefine":
                    virsh.undefine(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Undefined Removed")
                elif event == "create":
                    virsh.create(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                elif event == "destroy":
                    virsh.destroy(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Destroyed")
                elif event == "define":
                    virsh.define(xmlfile, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Added")
                elif event == "start":
                    virsh.start(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Started Booted")
                    dom.wait_for_login().close()
                elif event == "suspend":
                    virsh.suspend(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    if not libvirt_version.version_compare(5, 3, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Suspended Paused")
                elif event == "resume":
                    virsh.resume(dom.name, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "save":
                    virsh.save(dom.name, save_path, **virsh_dargs)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Suspended Paused")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Saved")
                elif event == "restore":
                    if not os.path.exists(save_path):
                        logging.error("%s not exist", save_path)
                    else:
                        virsh.restore(save_path, **virsh_dargs)
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Started Restored")
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Resumed Snapshot")
                elif event == "edit":
                    #Check whether 'description' element exists.
                    domxml = virsh.dumpxml(dom.name).stdout.strip()
                    find_desc = parseString(domxml).getElementsByTagName(
                        "description")
                    if find_desc == []:
                        #If not exists, add one for it.
                        logging.info("Adding <description> to guest")
                        virsh.desc(dom.name, "--config",
                                   "Added desc for testvm", **virsh_dargs)
                    #The edit operation is to delete 'description' element.
                    edit_cmd = [r":g/<description.*<\/description>/d"]
                    utlv.exec_virsh_edit(dom.name, edit_cmd)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Defined Updated")
                elif event == "shutdown":
                    if signal_name is None:
                        virsh.shutdown(dom.name, **virsh_dargs)
                        # Wait a few seconds for shutdown finish
                        time.sleep(3)
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            #Shutdown reason distinguished from qemu_2.9.0-9
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after guest request")
                    else:
                        os.kill(dom.get_pid(), getattr(signal, signal_name))
                        if utils_misc.compare_qemu_version(2, 9, 0):
                            expected_events_list.append(
                                "'lifecycle' for %s:"
                                " Shutdown Finished after host request")
                    if not utils_misc.compare_qemu_version(2, 9, 0):
                        expected_events_list.append("'lifecycle' for %s:"
                                                    " Shutdown Finished")
                    wait_for_shutoff(dom)
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Stopped Shutdown")
                elif event == "crash":
                    if not vmxml.xmltreefile.find('devices').findall('panic'):
                        # Set panic device
                        panic_dev = Panic()
                        panic_dev.model = panic_model
                        panic_dev.addr_type = addr_type
                        panic_dev.addr_iobase = addr_iobase
                        vmxml.add_device(panic_dev)
                    vmxml.on_crash = "coredump-restart"
                    vmxml.sync()
                    logging.info("Guest xml now is: %s", vmxml)
                    dom.start()
                    session = dom.wait_for_login()
                    # Stop kdump in the guest
                    session.cmd("systemctl stop kdump", ignore_all_errors=True)
                    # Enable sysRq
                    session.cmd("echo 1 > /proc/sys/kernel/sysrq")
                    try:
                        # Crash the guest
                        session.cmd("echo c > /proc/sysrq-trigger", timeout=90)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Crashed Panicked")
                    expected_events_list.append("'lifecycle' for %s:"
                                                " Resumed Unpaused")
                elif event == "reset":
                    virsh.reset(dom.name, **virsh_dargs)
                    expected_events_list.append("'reboot' for %s")
                elif event == "vcpupin":
                    virsh.vcpupin(dom.name, '0', '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.vcpupin0: 0")
                elif event == "emulatorpin":
                    virsh.emulatorpin(dom.name, '0', **virsh_dargs)
                    expected_events_list.append("'tunable' for %s:"
                                                "\n\tcputune.emulatorpin: 0")
                elif event == "setmem":
                    mem_size = int(params.get("mem_size", 512000))
                    virsh.setmem(dom.name, mem_size, **virsh_dargs)
                    expected_events_list.append("'balloon-change' for %s:")
                elif event == "device-added-removed":
                    add_disk(dom.name, new_disk, 'vdb', '')
                    expected_events_list.append("'device-added' for %s:"
                                                " virtio-disk1")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " virtio-disk1")
                    iface_xml_obj = create_iface_xml()
                    iface_xml_obj.xmltreefile.write()
                    virsh.detach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-removed' for %s:"
                                                " net0")
                    time.sleep(2)
                    virsh.attach_device(dom.name, iface_xml_obj.xml,
                                        **virsh_dargs)
                    expected_events_list.append("'device-added' for %s:"
                                                " net0")
                elif event == "block-threshold":
                    add_disk(dom.name, new_disk, 'vdb', '', format=disk_format)
                    logging.debug(process.run('qemu-img info %s -U' %
                                              new_disk))
                    virsh.domblkthreshold(vm_name, 'vdb', '100M')
                    session = dom.wait_for_login()
                    session.cmd(
                        "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                        "dd if=/dev/urandom of=/mnt/bigfile bs=1M count=300 && sync"
                    )
                    time.sleep(5)
                    session.close()
                    expected_events_list.append(
                        "'block-threshold' for %s:"
                        " dev: vdb(%s)  104857600 29368320")
                    virsh.detach_disk(dom.name, 'vdb', **virsh_dargs)
                elif event == "change-media":
                    target_device = "hdc"
                    device_target_bus = params.get("device_target_bus", "ide")
                    disk_blk = vm_xml.VMXML.get_disk_blk(dom.name)
                    logging.info("disk_blk %s", disk_blk)
                    if target_device not in disk_blk:
                        logging.info("Adding cdrom to guest")
                        if dom.is_alive():
                            dom.destroy()
                        add_disk(
                            dom.name, new_disk, target_device,
                            ("--type cdrom --sourcetype file --driver qemu " +
                             "--config --targetbus %s" % device_target_bus))
                        dom.start()
                    all_options = new_disk + " --insert"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " closed")
                    all_options = new_disk + " --eject"
                    virsh.change_media(dom.name, target_device, all_options,
                                       **virsh_dargs)
                    expected_events_list.append("'tray-change' for %s disk" +
                                                " .*%s.*:" %
                                                device_target_bus + " opened")
                elif event == "hwclock":
                    session = dom.wait_for_login()
                    try:
                        session.cmd("hwclock --systohc", timeout=60)
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        logging.info(details)
                    session.close()
                    expected_events_list.append("'rtc-change' for %s:")
                elif event == "metadata_set":
                    metadata_uri = params.get("metadata_uri")
                    metadata_key = params.get("metadata_key")
                    metadata_value = params.get("metadata_value")
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   new_metadata=metadata_value,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_edit":
                    metadata_uri = "http://herp.derp/"
                    metadata_key = "herp"
                    metadata_value = "<derp xmlns:foobar='http://foo.bar/'>foo<bar></bar></derp>"
                    virsh_cmd = r"virsh metadata %s --uri %s --key %s %s"
                    virsh_cmd = virsh_cmd % (dom.name, metadata_uri,
                                             metadata_key, "--edit")
                    session = aexpect.ShellSession("sudo -s")
                    logging.info("Running command: %s", virsh_cmd)
                    try:
                        session.sendline(virsh_cmd)
                        session.sendline(r":insert")
                        session.sendline(metadata_value)
                        session.sendline(".")
                        session.send('ZZ')
                        remote.handle_prompts(session,
                                              None,
                                              None,
                                              r"[\#\$]\s*$",
                                              debug=True,
                                              timeout=60)
                    except Exception as e:
                        test.error("Error occured: %s" % e)
                    session.close()
                    # Check metadata after edit
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "metadata_remove":
                    virsh.metadata(dom.name,
                                   metadata_uri,
                                   options="--remove",
                                   key=metadata_key,
                                   **virsh_dargs)
                    expected_events_list.append("'metadata-change' for %s: "
                                                "element http://app.org/")
                elif event == "detach-dimm":
                    prepare_vmxml_mem(vmxml)
                    tg_size = params.get("dimm_size")
                    tg_sizeunit = params.get("dimm_unit")
                    dimm_xml = utils_hotplug.create_mem_xml(
                        tg_size, None, None, tg_sizeunit)
                    virsh.attach_device(dom.name,
                                        dimm_xml.xml,
                                        flagstr="--config",
                                        **virsh_dargs)
                    vmxml_dimm = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug(
                        "Current vmxml with plugged dimm dev is %s\n" %
                        vmxml_dimm)
                    virsh.start(dom.name, **virsh_dargs)
                    dom.wait_for_login().close()
                    result = virsh.detach_device(dom.name,
                                                 dimm_xml.xml,
                                                 debug=True,
                                                 ignore_status=True)
                    expected_fails = params.get("expected_fails")
                    utlv.check_result(result, expected_fails)
                    vmxml_live = vm_xml.VMXML.new_from_dumpxml(dom.name)
                    logging.debug(
                        "Current vmxml after hot-unplug dimm is %s\n" %
                        vmxml_live)
                    expected_events_list.append(
                        "'device-removal-failed' for %s: dimm0")
                elif event == "watchdog":
                    vmxml.remove_all_device_by_type('watchdog')
                    watchdog_dev = Watchdog()
                    watchdog_dev.model_type = params.get("watchdog_model")
                    action = params.get("action")
                    watchdog_dev.action = action
                    vmxml.add_device(watchdog_dev)
                    vmxml.sync()
                    logging.debug("Current vmxml with watchdog dev is %s\n" %
                                  vmxml)
                    virsh.start(dom.name, **virsh_dargs)
                    session = dom.wait_for_login()
                    try:
                        session.cmd("echo 0 > /dev/watchdog")
                    except (ShellTimeoutError,
                            ShellProcessTerminatedError) as details:
                        test.fail("Failed to trigger watchdog: %s" % details)
                    session.close()
                    # watchdog acts slowly, waiting for it.
                    time.sleep(30)
                    expected_events_list.append("'watchdog' for %s: " +
                                                "%s" % action)
                    if action == 'pause':
                        expected_events_list.append(
                            "'lifecycle' for %s: Suspended Watchdog")
                        virsh.resume(dom.name, **virsh_dargs)
                    else:
                        # action == 'reset'
                        expected_events_list.append("'reboot' for %s")
                elif event == "io-error":
                    part_size = params.get("part_size")
                    resume_event = params.get("resume_event")
                    suspend_event = params.get("suspend_event")
                    process.run("truncate -s %s %s" % (part_size, small_part),
                                shell=True)
                    utlv.mkfs(small_part, part_format)
                    utils_misc.mount(small_part, mount_point, None)
                    add_disk(dom.name, new_disk, 'vdb',
                             '--subdriver qcow2 --config', 'qcow2')
                    dom.start()
                    session = dom.wait_for_login()
                    session.cmd(
                        "mkfs.ext4 /dev/vdb && mount /dev/vdb /mnt && ls /mnt && "
                        "dd if=/dev/zero of=/mnt/test.img bs=1M count=50",
                        ignore_all_errors=True)
                    time.sleep(5)
                    session.close()
                    expected_events_list.append("'io-error' for %s: " +
                                                "%s" % new_disk +
                                                r" \(virtio-disk1\) pause")
                    expected_events_list.append(
                        "'io-error-reason' for %s: " + "%s" % new_disk +
                        r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    process.run("df -hT")
                    virsh.resume(dom.name, **virsh_dargs)
                    time.sleep(5)
                    expected_events_list.append(resume_event)
                    expected_events_list.append("'io-error' for %s: " +
                                                "%s" % new_disk +
                                                r" \(virtio-disk1\) pause")
                    expected_events_list.append(
                        "'io-error-reason' for %s: " + "%s" % new_disk +
                        r" \(virtio-disk1\) pause due to enospc")
                    expected_events_list.append(suspend_event)
                    ret = virsh.domstate(dom.name, "--reason", **virsh_dargs)
                    if ret.stdout.strip() != "paused (I/O error)":
                        test.fail(
                            "Domain state should still be paused due to I/O error!"
                        )
                else:
                    test.error("Unsupported event: %s" % event)
                # Event may not received immediately
                time.sleep(3)
        finally:
            if os.path.exists(save_path):
                os.unlink(save_path)
            if os.path.exists(new_disk):
                os.unlink(new_disk)
        return [(dom.name, event) for event in expected_events_list]
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command for lxc.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    # Disk specific attributes.
    device_source = params.get("at_dt_disk_device_source", "/dev/sdc1")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy(gracefully=False)
    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file if user doesn't prepare a partition.
    test_block_dev = False
    if device_source.count("ENTER"):
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        test_block_dev = True
        if not device_source:
            # We should skip this case
            raise error.TestNAError("Can not get iscsi device name in host")

    if vm.is_alive():
        vm.destroy(gracefully=False)

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     "--config").exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")

        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         "--config").exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":
        status = virsh.attach_disk(vm_ref, device_source, device_target,
                                   at_options, debug=True).exit_status
    elif test_cmd == "detach-disk":
        status = virsh.detach_disk(vm_ref, device_target, dt_options,
                                   debug=True).exit_status
    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref, device_source,
                                       device_target2, at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref, device_target2, dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log'
                      % test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' + 'grep "%s" | tail -n1 | grep "res=success"'
               % device_source)
        if utils.run(cmd).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check disk type after attach.
    check_disk_type = True
    try:
        check_disk_type = vm_xml.VMXML.check_disk_type(vm_name,
                                                       device_source,
                                                       "block")
    except xcepts.LibvirtXMLError:
        # No disk found
        check_disk_type = False

    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if utils_test.canonicalize_disk_address(address) !=\
           utils_test.canonicalize_disk_address(disk_address):
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if utils_test.canonicalize_disk_address(address2) !=\
           utils_test.canonicalize_disk_address(disk_address2):
            check_disk_address2 = False

    # Destroy VM.
    vm.destroy(gracefully=False)

    # Check disk count after VM shutdown (with --config).
    check_count_after_shutdown = True
    disk_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_shutdown == disk_count_before_cmd:
            check_count_after_shutdown = False
    elif test_cmd == "detach-disk":
        if disk_count_after_shutdown < disk_count_before_cmd:
            check_count_after_shutdown = False

    # Recover VM.
    if vm.is_alive():
        vm.destroy(gracefully=False)
    backup_xml.sync()
    if test_block_dev:
        libvirt.setup_or_cleanup_iscsi(False)

    # Check results.
    if status_error:
        if not status:
            raise error.TestFail("virsh %s exit with unexpected value."
                                 % test_cmd)
    else:
        if status:
            raise error.TestFail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    raise error.TestFail("Cannot see config attached device "
                                         "in xml file after VM shutdown.")
                if not check_disk_serial:
                    raise error.TestFail("Serial set failed after attach")
                if not check_disk_address:
                    raise error.TestFail("Address set failed after attach")
                if not check_disk_address2:
                    raise error.TestFail("Address(multifunction) set failed"
                                         " after attach")
            else:
                if not check_count_after_cmd:
                    raise error.TestFail("Cannot see device in xml file"
                                         " after attach.")
                if not check_disk_type:
                    raise error.TestFail("Check disk type failed after"
                                         " attach.")
                if not check_audit_after_cmd:
                    raise error.TestFail("Audit hotplug failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        raise error.TestFail("Cannot see device attached "
                                             "with persistent after "
                                             "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        raise error.TestFail("See non-config attached device "
                                             "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    raise error.TestFail("See config detached device in "
                                         "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    raise error.TestFail("See device in xml file "
                                         "after detach.")
                if not check_audit_after_cmd:
                    raise error.TestFail("Audit hotunplug failure "
                                         "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        raise error.TestFail("See device deattached "
                                             "with persistent after "
                                             "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        raise error.TestFail("See non-config detached "
                                             "device in xml file after "
                                             "VM shutdown.")

        else:
            raise error.TestError("Unknown command %s." % test_cmd)
def run(test, params, env):
    """
    Test virsh detach-device command.

    The command can detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh detach-device operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def create_device_file(device_source="/tmp/attach.img"):
        """
        Create a device source file.

        :param device_source: Device source file.
        """
        try:
            with open(device_source, 'wb') as device_file:
                device_file.seek((512 * 1024 * 1024) - 1)
                device_file.write(str(0).encode())
        except IOError:
            logging.error("Image file %s created failed.", device_source)

    def check_vm_partition(vm, device, os_type, target_name):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                if device == "disk":
                    s, o = session.cmd_status_output(
                        "grep %s /proc/partitions" % target_name)
                    logging.info("Virtio devices in VM:\n%s", o)
                elif device == "cdrom":
                    s, o = session.cmd_status_output("ls /dev/cdrom")
                    logging.info("CDROM in VM:\n%s", o)
                elif device == "iface":
                    s, o = session.cmd_status_output("ls /")
                session.close()
                if s != 0:
                    return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output("rpm -qa | grep"
                                                    " redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, _ = session.cmd_status_output("modprobe acpiphp")
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def create_device_xml(params, xml_path, device_source):
        """
        Create a xml file for device
        """
        device_xml_name = params.get("dt_device_xml", "device.xml")
        device_xml_file = os.path.join(xml_path, device_xml_name)
        device_type = params.get("dt_device_device", "disk")
        if device_type in ["disk", 'cdrom']:
            disk_class = vm_xml.VMXML.get_device_class('disk')
            if test_block_dev:
                disk = disk_class(type_name='block')
                stype = 'dev'
            else:
                disk = disk_class(type_name='file')
                stype = 'file'
            disk.device = device_type
            disk.driver = dict(name='qemu', type='raw')
            disk.source = disk.new_disk_source(attrs={stype: device_source})
            disk.target = dict(bus=device_bus, dev=device_target)
            disk.xmltreefile.write()
            shutil.copyfile(disk.xml, device_xml_file)
        else:
            iface_class = vm_xml.VMXML.get_device_class('interface')
            iface = iface_class(type_name='network')
            iface.mac_address = iface_mac_address
            iface.source = dict(network=iface_network)
            iface.model = iface_model_type
            iface.xmltreefile.write()
            shutil.copyfile(iface.xml, device_xml_file)
        return device_xml_file

    vm_ref = params.get("dt_device_vm_ref", "name")
    dt_options = params.get("dt_device_options", "")
    pre_vm_state = params.get("dt_device_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = "yes" == params.get("dt_device_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    device = params.get("dt_device_device", "disk")
    readonly = "yes" == params.get("detach_readonly", "no")
    test_cmd = "detach-device"
    if not virsh.has_command_help_match(test_cmd, dt_options) and\
       not status_error:
        test.cancel("Current libvirt version doesn't support '%s'"
                    " for %s" % (dt_options, test_cmd))

    # Disk specific attributes.
    device_source_name = params.get("dt_device_device_source", "attach.img")
    device_target = params.get("dt_device_device_target", "vdd")
    device_bus = params.get("dt_device_bus_type")
    test_block_dev = "yes" == params.get("dt_device_iscsi_device", "no")

    # interface specific attributes.
    iface_network = params.get("dt_device_iface_network")
    iface_model_type = params.get("dt_device_iface_model_type")
    iface_mac_address = params.get("dt_device_iface_mac_address")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    if readonly:
        device_source = os.path.join(test.tmpdir, device_source_name)
    else:
        device_source = os.path.join(test.virtdir, device_source_name)

    # Create virtual device file.
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
    else:
        create_device_file(device_source)

    try:
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # If we are testing cdrom device, we need to detach hdc in VM first.
        if device == "cdrom":
            virsh.detach_disk(vm_name,
                              device_target,
                              "--config",
                              ignore_status=True)

        device_xml = create_device_xml(params, test.virtdir, device_source)
        if not no_attach:
            s_attach = virsh.attach_device(vm_name,
                                           device_xml,
                                           flagstr="--config").exit_status
            if s_attach != 0:
                logging.error("Attach device failed before testing "
                              "detach-device")

        vm.start()
        vm.wait_for_serial_login()

        # Add acpiphp module before testing if VM's os type is rhle5.*
        if device in ['disk', 'cdrom']:
            if not acpiphp_module_modprobe(vm, os_type):
                test.error("Add acpiphp module failed before test.")

        # Turn VM into certain state.
        if pre_vm_state == "paused":
            logging.info("Suspending %s...", vm_name)
            if vm.is_alive():
                vm.pause()
        elif pre_vm_state == "shut off":
            logging.info("Shutting down %s...", vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)

        # Get disk count before test.
        if device in ['disk', 'cdrom']:
            device_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_before_cmd = len(vm_cls.devices)

        # Test.
        domid = vm.get_id()
        domuuid = vm.get_uuid()

        # Confirm how to reference a VM.
        if vm_ref == "name":
            vm_ref = vm_name
        elif vm_ref.find("invalid") != -1:
            vm_ref = params.get(vm_ref)
        elif vm_ref == "id":
            vm_ref = domid
        elif vm_ref == "hex_id":
            vm_ref = hex(int(domid))
        elif vm_ref == "uuid":
            vm_ref = domuuid
        else:
            vm_ref = ""

        status = virsh.detach_device(vm_ref,
                                     device_xml,
                                     readonly=readonly,
                                     flagstr=dt_options,
                                     debug=True).exit_status

        # Resume guest after command. On newer libvirt this is fixed as it has
        # been a bug. The change in xml file is done after the guest is
        # resumed.
        if pre_vm_state == "paused":
            vm.resume()

        # Check disk count after command.
        check_count_after_cmd = True
        if device in ['disk', 'cdrom']:
            device_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_cmd = len(vm_cls.devices)
        if device_count_after_cmd < device_count_before_cmd:
            check_count_after_cmd = False

        # Recover VM state.
        if pre_vm_state == "shut off" and device in ['disk', 'cdrom']:
            vm.start()

        # Check in VM after command.
        check_vm_after_cmd = True
        if device in ['disk', 'cdrom']:
            check_vm_after_cmd = check_vm_partition(vm, device, os_type,
                                                    device_target)

        # Destroy VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        if device in ['disk', 'cdrom']:
            device_count_after_shutdown = vm_xml.VMXML.get_disk_count(vm_name)
        else:
            vm_cls = vm_xml.VMXML.new_from_dumpxml(vm_name)
            device_count_after_shutdown = len(vm_cls.devices)
        if device_count_after_shutdown < device_count_before_cmd:
            check_count_after_shutdown = False
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if test_block_dev:
            libvirt.setup_or_cleanup_iscsi(False)
        elif os.path.exists(device_source):
            os.remove(device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("detach-device exit with unexpected value.")
    else:
        if status:
            test.fail("virsh detach-device failed.")
        if dt_options.count("config"):
            if check_count_after_shutdown:
                test.fail("See config detached device in "
                          "xml file after VM shutdown.")
            if pre_vm_state == "shut off":
                if check_count_after_cmd:
                    test.fail("See device in xml after detach with"
                              " --config option")
            elif pre_vm_state == "running":
                if not check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("Cannot see device in VM after"
                              " detach with '--config' option"
                              " when VM is running.")

        elif dt_options.count("live"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          "--live option")
            if not check_count_after_shutdown:
                test.fail("Cannot see config detached device in"
                          " xml file after VM shutdown with"
                          " '--live' option.")
            if check_vm_after_cmd and device in ['disk', 'cdrom']:
                test.fail("See device in VM with '--live' option"
                          " when VM is running")
        elif dt_options.count("current"):
            if check_count_after_cmd:
                test.fail("See device in xml after detach with"
                          " --current option")
            if pre_vm_state == "running":
                if not check_count_after_shutdown:
                    test.fail("Cannot see config detached device in"
                              " xml file after VM shutdown with"
                              " '--current' option.")
                if check_vm_after_cmd and device in ['disk', 'cdrom']:
                    test.fail("See device in VM with '--live'"
                              " option when VM is running")
        elif dt_options.count("persistent"):
            if check_count_after_shutdown:
                test.fail("See device deattached with "
                          "'--persistent' option after "
                          "VM shutdown.")
        # Check disk save and restore.
        if test_disk_save_restore:
            save_file = "/tmp/%s.save" % vm_name
            check_disk_save_restore(save_file, device_targets,
                                    startup_policy)
            if os.path.exists(save_file):
                os.remove(save_file)

        # If we testing hotplug, detach the disk at last.
        if device_at_dt_disk:
            for i in range(len(disks)):
                dt_options = ""
                if devices[i] == "cdrom":
                    dt_options = "--config"
                ret = virsh.detach_disk(vm_name, device_targets[i],
                                        dt_options)
                libvirt.check_exit_status(ret)
            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices,
                                           device_targets, False):
                    raise error.TestFail("See device in VM after hotunplug")

        elif hotplug:
            for i in range(len(disks_xml)):
                if len(device_attach_error) > i:
                    if device_attach_error[i] == "yes":
                        continue
                ret = virsh.detach_device(vm_name, disks_xml[i].xml,
                                          flagstr=attach_option)
                os.remove(disks_xml[i].xml)
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    image_format = params.get("snapshot_image_format", "qcow2")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))

    # Do xml backup for final recovery
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")

    # Get a tmp_dir.
    tmp_dir = data_dir.get_tmp_dir()
    # Create a image.
    params['image_name'] = "snapshot_test"
    params['image_format'] = image_format
    params['image_size'] = "1M"
    image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
    img_path, _ = image.create(params)
    # Do the attach action.
    result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                               extra="--persistent --subdriver %s" % image_format)
    if result.exit_status:
        raise error.TestNAError("Failed to attach disk %s to VM."
                                "Detail: %s." % (img_path, result.stderr))

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    try:
        # Create snapshot.
        if snapshot_from_xml:
            snapshot_name = "snapshot_test"
            lines = ["<domainsnapshot>\n",
                     "<name>%s</name>\n" % snapshot_name,
                     "<description>Snapshot Test</description>\n"]
            if snapshot_memory == "external":
                memory_external = os.path.join(tmp_dir, "snapshot_memory")
                snapshot_external_disk.append(memory_external)
                lines.append("<memory snapshot=\'%s\' file='%s'/>\n" %
                             (snapshot_memory, memory_external))
            else:
                lines.append("<memory snapshot='%s'/>\n" % snapshot_memory)

            # Add all disks into xml file.
            disks = vm.get_disk_devices().values()
            lines.append("<disks>\n")
            for disk in disks:
                lines.append("<disk name='%s' snapshot='%s'>\n" %
                             (disk['source'], snapshot_disk))
                if snapshot_disk == "external":
                    disk_external = os.path.join(tmp_dir,
                                                 "%s.snap" % os.path.basename(disk['source']))
                    snapshot_external_disk.append(disk_external)
                    lines.append("<source file='%s'/>\n" % disk_external)
                lines.append("</disk>\n")
            lines.append("</disks>\n")
            lines.append("</domainsnapshot>")

            snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
            snapshot_xml_file = open(snapshot_xml_path, "w")
            snapshot_xml_file.writelines(lines)
            snapshot_xml_file.close()
            snapshot_result = virsh.snapshot_create(
                vm_name, ("--xmlfile %s" % snapshot_xml_path))
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
        else:
            options = ""
            snapshot_result = virsh.snapshot_create(vm_name, options)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)
            if snapshot_current:
                lines = ["<domainsnapshot>\n",
                         "<description>Snapshot Test</description>\n",
                         "<state>running</state>\n",
                         "<creationTime>%s</creationTime>" % snapshot_name,
                         "</domainsnapshot>"]
                snapshot_xml_path = "%s/snapshot_xml" % tmp_dir
                snapshot_xml_file = open(snapshot_xml_path, "w")
                snapshot_xml_file.writelines(lines)
                snapshot_xml_file.close()
                options += "--redefine %s --current" % snapshot_xml_path
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            raise error.TestFail("Success to create snapshot in negative case\n"
                                 "Detail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        status, output = session.cmd_status_output("touch %s" % tmp_file_path)
        if status:
            raise error.TestFail("Touch file in vm failed. %s" % output)

        session.close()

        # Destroy vm for snapshot revert.
        virsh.destroy(vm_name)
        # Revert snapshot.
        revert_options = ""
        if snapshot_revert_paused:
            revert_options += " --paused"
        revert_result = virsh.snapshot_revert(vm_name, snapshot_name,
                                              revert_options)
        if revert_result.exit_status:
            raise error.TestFail(
                "Revert snapshot failed. %s" % revert_result.stderr.strip())

        if vm.is_dead():
            raise error.TestFail("Revert snapshot failed.")

        if snapshot_revert_paused:
            if vm.is_paused():
                vm.resume()
            else:
                raise error.TestFail("Revert command successed, but VM is not "
                                     "paused after reverting with --paused option.")
        # login vm.
        session = vm.wait_for_login()
        # Check the result of revert.
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        if not status:
            raise error.TestFail("Tmp file exists, revert failed.")

        # Close the session.
        session.close()

    finally:
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        image.remove()
        if snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")
    vm.start()
    vm.wait_for_login()

    # Create virtual device file.
    create_device_file(device_source)

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        raise error.TestError("Add acpiphp module failed before test.")

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target,  "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")
        vm.start()

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        if bus_type == "ide" and vm.is_alive():
            vm.destroy(gracefully=False)
        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     "--driver qemu --config").exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")
        if vm.is_dead():
            vm.start()
Beispiel #50
0
def run_svirt_attach_disk(test, params, env):
    """
    Test svirt in adding disk to VM.

    (1).Init variables for test.
    (2).Create a image to attached to VM.
    (3).Attach disk.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("svirt_attach_disk_host_selinux", "enforcing")
    # Get variables about seclabel for VM.
    sec_type = params.get("svirt_attach_disk_vm_sec_type", "dynamic")
    sec_model = params.get("svirt_attach_disk_vm_sec_model", "selinux")
    sec_label = params.get("svirt_attach_disk_vm_sec_label", None)
    sec_relabel = params.get("svirt_attach_disk_vm_sec_relabel", "yes")
    sec_dict = {'type': sec_type, 'model': sec_model, 'label': sec_label,
                'relabel': sec_relabel}
    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    # Get varialbles about image.
    img_label = params.get('svirt_attach_disk_disk_label')
    img_name = "svirt_disk"
    # Default label for the other disks.
    # To ensure VM is able to access other disks.
    default_label = params.get('svirt_attach_disk_disk_default_label', None)

    # Set selinux of host.
    backup_sestatus = utils_selinux.get_status()
    utils_selinux.set_status(host_sestatus)
    # Set the default label to other disks of vm.
    disks = vm.get_disk_devices()
    for disk in disks.values():
        utils_selinux.set_context_of_file(filename=disk['source'],
                                          context=default_label)
    # Init a QemuImg instance.
    params['image_name'] = img_name
    tmp_dir = data_dir.get_tmp_dir()
    image = qemu_storage.QemuImg(params, tmp_dir, img_name)
    # Create a image.
    img_path, result = image.create(params)
    # Set the context of the image.
    utils_selinux.set_context_of_file(filename=img_path, context=img_label)
    # Set the context of the VM.
    vmxml.set_seclabel(sec_dict)
    vmxml.sync()

    # Do the attach action.
    try:
        virsh.attach_disk(vm_name, source=img_path, target="vdf",
                          extra="--persistent", ignore_status=False)
    except error.CmdError:
        raise error.TestFail("Attach disk %s to vdf on VM %s failed."
                             % (img_path, vm.name))

    # Check result.
    try:
        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.
            # VM with set seclabel can access the image with the
            # set context.
            if status_error:
                raise error.TestFail('Test successed in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            # VM with set seclabel can not access the image with the
            # set context.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)
    finally:
        # clean up
        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              ignore_status=False)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
        image.remove()
        backup_xml.sync()
        utils_selinux.set_status(backup_sestatus)
    # Create virtual device file.
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            raise error.TestNAError("Can not get iscsi device name in host")
    else:
        create_device_file(device_source)

    if vm.is_alive():
        vm.destroy(gracefully=False)

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        virsh.detach_disk(vm_name, device_target, "--config",
                          ignore_status=True)
   
    device_xml = create_device_xml(params, test.virtdir, device_source)
    if not no_attach:
        s_attach = virsh.attach_device(vm_name, device_xml,
                                       flagstr="--config").exit_status
        if s_attach != 0:
            logging.error("Attach device failed before testing detach-device")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if device in ['disk', 'cdrom']:
        if not acpiphp_module_modprobe(vm, os_type):
            raise error.TestError("Add acpiphp module failed before test.")
Beispiel #52
0
        # Check disk save and restore.
        if test_disk_save_restore:
            save_file = "/tmp/%s.save" % vm_name
            check_disk_save_restore(save_file, device_targets,
                                    startup_policy)
            if os.path.exists(save_file):
                os.remove(save_file)

        # If we testing hotplug, detach the disk at last.
        if device_at_dt_disk:
            for i in range(len(disks)):
                dt_options = ""
                if devices[i] == "cdrom":
                    dt_options = "--config"
                ret = virsh.detach_disk(vm_name, device_targets[i],
                                        dt_options, **virsh_dargs)
                libvirt.check_exit_status(ret)
            # Check disks in VM after hotunplug.
            if check_patitions_hotunplug:
                if not check_vm_partitions(devices,
                                           device_targets, False):
                    test.fail("See device in VM after hotunplug")

        elif hotplug:
            for i in range(len(disks_xml)):
                if len(device_attach_error) > i:
                    if device_attach_error[i] == "yes":
                        continue
                ret = virsh.detach_device(vm_name, disks_xml[i].xml,
                                          flagstr=attach_option, **virsh_dargs)
                os.remove(disks_xml[i].xml)
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """
    def check_vm_partition(vm, device, os_type, target_name, old_parts):
        """
        Check VM disk's partition.

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :param target_name. Device target type.
        :return: True if check successfully.
        """
        logging.info("Checking VM partittion...")
        if vm.is_dead():
            vm.start()
        try:
            attached = False
            if os_type == "linux":
                session = vm.wait_for_login()
                new_parts = libvirt.get_parts_list(session)
                added_parts = list(set(new_parts).difference(set(old_parts)))
                logging.debug("Added parts: %s" % added_parts)
                for i in range(len(added_parts)):
                    if device == "disk":
                        if target_name.startswith("vd"):
                            if added_parts[i].startswith("vd"):
                                attached = True
                        elif target_name.startswith(
                                "hd") or target_name.startswith("sd"):
                            if added_parts[i].startswith("sd"):
                                attached = True
                    elif device == "cdrom":
                        if added_parts[i].startswith("sr"):
                            attached = True
                session.close()
            return attached
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def acpiphp_module_modprobe(vm, os_type):
        """
        Add acpiphp module if VM's os type is rhle5.*

        :param vm. VM guest.
        :param os_type. VM's operation system type.
        :return: True if operate successfully.
        """
        if vm.is_dead():
            vm.start()
        try:
            if os_type == "linux":
                session = vm.wait_for_login()
                s_rpm, _ = session.cmd_status_output("rpm --version")
                # If status is different from 0, this
                # guest OS doesn't support the rpm package
                # manager
                if s_rpm:
                    session.close()
                    return True
                _, o_vd = session.cmd_status_output(
                    "rpm -qa | grep redhat-release")
                if o_vd.find("5Server") != -1:
                    s_mod, o_mod = session.cmd_status_output(
                        "modprobe acpiphp")
                    del o_mod
                    if s_mod != 0:
                        session.close()
                        return False
                session.close()
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Get test command.
    test_cmd = params.get("at_dt_disk_test_cmd", "attach-disk")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    at_with_shareable = "yes" == params.get("at_with_shareable", 'no')
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    status_error = "yes" == params.get("status_error", 'no')
    no_attach = params.get("at_dt_disk_no_attach", 'no')
    os_type = params.get("os_type", "linux")
    qemu_file_lock = params.get("qemu_file_lock", "")
    if qemu_file_lock:
        if utils_misc.compare_qemu_version(2, 9, 0):
            logging.info('From qemu-kvm-rhev 2.9.0:'
                         'QEMU image locking, which should prevent multiple '
                         'runs of QEMU or qemu-img when a VM is running.')
            if test_cmd == "detach-disk" or pre_vm_state == "shut off":
                test.cancel('This case is not supported.')
            else:
                logging.info(
                    'The expect result is failure as opposed with succeed')
                status_error = True

    # Disk specific attributes.
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_source_format = params.get("at_dt_disk_device_source_format", "raw")
    device_target = params.get("at_dt_disk_device_target", "vdd")
    device_disk_bus = params.get("at_dt_disk_bus_type", "virtio")
    source_path = "yes" == params.get("at_dt_disk_device_source_path", "yes")
    create_img = "yes" == params.get("at_dt_disk_create_image", "yes")
    test_twice = "yes" == params.get("at_dt_disk_test_twice", "no")
    test_type = "yes" == params.get("at_dt_disk_check_type", "no")
    test_audit = "yes" == params.get("at_dt_disk_check_audit", "no")
    test_block_dev = "yes" == params.get("at_dt_disk_iscsi_device", "no")
    test_logcial_dev = "yes" == params.get("at_dt_disk_logical_device", "no")
    restart_libvirtd = "yes" == params.get("at_dt_disk_restart_libvirtd", "no")
    vg_name = params.get("at_dt_disk_vg", "vg_test_0")
    lv_name = params.get("at_dt_disk_lv", "lv_test_0")
    serial = params.get("at_dt_disk_serial", "")
    address = params.get("at_dt_disk_address", "")
    address2 = params.get("at_dt_disk_address2", "")
    cache_options = params.get("cache_options", "")
    time_sleep = params.get("time_sleep", 3)
    if at_with_shareable:
        at_options += " --mode shareable"
    if serial:
        at_options += (" --serial %s" % serial)
    if address2:
        at_options_twice = at_options + (" --address %s" % address2)
    if address:
        at_options += (" --address %s" % address)
    if cache_options:
        if cache_options.count("directsync"):
            if not libvirt_version.version_compare(1, 0, 0):
                test.cancel("'directsync' cache option doesn't "
                            "support in current libvirt version.")
        at_options += (" --cache %s" % cache_options)

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Create virtual device file.
    device_source_path = os.path.join(data_dir.get_tmp_dir(),
                                      device_source_name)
    if test_block_dev:
        device_source = libvirt.setup_or_cleanup_iscsi(True)
        if not device_source:
            # We should skip this case
            test.cancel("Can not get iscsi device name in host")
        if test_logcial_dev:
            lv_utils.vg_create(vg_name, device_source)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)
    else:
        if source_path and create_img:
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1G",
                disk_format=device_source_format)
        else:
            device_source = device_source_name

    # if we are testing audit, we need to start audit servcie first.
    if test_audit:
        auditd_service = Factory.create_service("auditd")
        if not auditd_service.status():
            auditd_service.start()
        logging.info("Auditd service status: %s" % auditd_service.status())

    # If we are testing cdrom device, we need to detach hdc in VM first.
    if device == "cdrom":
        if vm.is_alive():
            vm.destroy(gracefully=False)
        s_detach = virsh.detach_disk(vm_name, device_target, "--config")
        if not s_detach:
            logging.error("Detach hdc failed before test.")

    # If we are testing detach-disk, we need to attach certain device first.
    if test_cmd == "detach-disk" and no_attach != "yes":
        s_at_options = "--driver qemu --config"
        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if at_with_shareable or (test_twice
                                 and libvirt_version.version_compare(3, 9, 0)):
            s_at_options += ' --mode shareable'
        s_attach = virsh.attach_disk(vm_name, device_source, device_target,
                                     s_at_options).exit_status
        if s_attach != 0:
            logging.error("Attaching device failed before testing detach-disk")

        if test_twice:
            device_target2 = params.get("at_dt_disk_device_target2",
                                        device_target)
            device_source = libvirt.create_local_disk(
                "file",
                path=device_source_path,
                size="1",
                disk_format=device_source_format)
            s_attach = virsh.attach_disk(vm_name, device_source,
                                         device_target2,
                                         s_at_options).exit_status
            if s_attach != 0:
                logging.error("Attaching device failed before testing "
                              "detach-disk test_twice")

    vm.start()
    vm.wait_for_login()

    # Add acpiphp module before testing if VM's os type is rhle5.*
    if not acpiphp_module_modprobe(vm, os_type):
        test.error("Add acpiphp module failed before test.")

    # Turn VM into certain state.
    if pre_vm_state == "paused":
        logging.info("Suspending %s..." % vm_name)
        if vm.is_alive():
            vm.pause()
    elif pre_vm_state == "shut off":
        logging.info("Shuting down %s..." % vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)

    # Get disk count before test.
    disk_count_before_cmd = vm_xml.VMXML.get_disk_count(vm_name)

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    if test_cmd == "attach-disk":

        #Since lock feature is introduced in libvirt 3.9.0 afterwards, disk shareable options
        #need be set if disk needs be attached multitimes
        if test_twice and libvirt_version.version_compare(3, 9, 0):
            if not at_with_shareable:
                at_options += " --mode shareable"
        status = virsh.attach_disk(vm_ref,
                                   device_source,
                                   device_target,
                                   at_options,
                                   debug=True).exit_status
    elif test_cmd == "detach-disk":
        status = virsh.detach_disk(vm_ref,
                                   device_target,
                                   dt_options,
                                   debug=True).exit_status

    if restart_libvirtd:
        libvirtd_serv = utils_libvirtd.Libvirtd()
        libvirtd_serv.restart()

    if test_twice:
        device_target2 = params.get("at_dt_disk_device_target2", device_target)
        device_source = libvirt.create_local_disk(
            "file",
            path=device_source_path,
            size="1G",
            disk_format=device_source_format)
        if test_cmd == "attach-disk":
            if address2:
                at_options = at_options_twice
            status = virsh.attach_disk(vm_ref,
                                       device_source,
                                       device_target2,
                                       at_options,
                                       debug=True).exit_status
        elif test_cmd == "detach-disk":
            status = virsh.detach_disk(vm_ref,
                                       device_target2,
                                       dt_options,
                                       debug=True).exit_status

    # Resume guest after command. On newer libvirt this is fixed as it has
    # been a bug. The change in xml file is done after the guest is resumed.
    if pre_vm_state == "paused":
        vm.resume()

    # Check audit log
    check_audit_after_cmd = True
    if test_audit:
        grep_audit = ('grep "%s" /var/log/audit/audit.log' %
                      test_cmd.split("-")[0])
        cmd = (grep_audit + ' | ' +
               'grep "%s" | tail -n1 | grep "res=success"' % device_source)
        if process.run(cmd, shell=True).exit_status:
            logging.error("Audit check failed")
            check_audit_after_cmd = False

    # Need wait a while for xml to sync
    time.sleep(float(time_sleep))
    # Check disk count after command.
    check_count_after_cmd = True
    disk_count_after_cmd = vm_xml.VMXML.get_disk_count(vm_name)
    if test_cmd == "attach-disk":
        if disk_count_after_cmd == disk_count_before_cmd:
            check_count_after_cmd = False
    elif test_cmd == "detach-disk":
        if disk_count_after_cmd < disk_count_before_cmd:
            check_count_after_cmd = False

    # Recover VM state.
    if pre_vm_state == "shut off":
        vm.start()

    # Check in VM after command.
    check_vm_after_cmd = True
    check_vm_after_cmd = check_vm_partition(vm, device, os_type, device_target,
                                            old_parts)

    # Check disk type after attach.
    check_disk_type = True
    if test_type:
        if test_block_dev:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "block")
        else:
            check_disk_type = vm_xml.VMXML.check_disk_type(
                vm_name, device_source, "file")
    # Check disk serial after attach.
    check_disk_serial = True
    if serial:
        disk_serial = vm_xml.VMXML.get_disk_serial(vm_name, device_target)
        if serial != disk_serial:
            check_disk_serial = False

    # Check disk address after attach.
    check_disk_address = True
    if address:
        disk_address = vm_xml.VMXML.get_disk_address(vm_name, device_target)
        if address != disk_address:
            check_disk_address = False

    # Check multifunction address after attach.
    check_disk_address2 = True
    if address2:
        disk_address2 = vm_xml.VMXML.get_disk_address(vm_name, device_target2)
        if address2 != disk_address2:
            check_disk_address2 = False

    # Check disk cache option after attach.
    check_cache_after_cmd = True
    if cache_options:
        disk_cache = vm_xml.VMXML.get_disk_attr(vm_name, device_target,
                                                "driver", "cache")
        if cache_options == "default":
            if disk_cache is not None:
                check_cache_after_cmd = False
        elif disk_cache != cache_options:
            check_cache_after_cmd = False

    # Eject cdrom test
    eject_cdrom = "yes" == params.get("at_dt_disk_eject_cdrom", "no")
    save_vm = "yes" == params.get("at_dt_disk_save_vm", "no")
    save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
    try:
        if eject_cdrom:
            eject_params = {
                'type_name': "file",
                'device_type': "cdrom",
                'target_dev': device_target,
                'target_bus': device_disk_bus
            }
            eject_xml = libvirt.create_disk_xml(eject_params)
            with open(eject_xml) as eject_file:
                logging.debug("Eject CDROM by XML: %s", eject_file.read())
            # Run command tiwce to make sure cdrom tray open first #BZ892289
            # Open tray
            virsh.attach_device(domainarg=vm_name,
                                filearg=eject_xml,
                                debug=True)
            # Add time sleep between two attach commands.
            if time_sleep:
                time.sleep(float(time_sleep))
            # Eject cdrom
            result = virsh.attach_device(domainarg=vm_name,
                                         filearg=eject_xml,
                                         debug=True)
            if result.exit_status != 0:
                test.fail("Eject CDROM failed")
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do eject" % device_source)
        # Save and restore VM
        if save_vm:
            result = virsh.save(vm_name, save_file, debug=True)
            libvirt.check_exit_status(result)
            result = virsh.restore(save_file, debug=True)
            libvirt.check_exit_status(result)
            if vm_xml.VMXML.check_disk_exist(vm_name, device_source):
                test.fail("Find %s after do restore" % device_source)

        # Destroy VM.
        vm.destroy(gracefully=False)

        # Check disk count after VM shutdown (with --config).
        check_count_after_shutdown = True
        inactive_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        disk_count_after_shutdown = len(inactive_vmxml.get_disk_all())
        if test_cmd == "attach-disk":
            if disk_count_after_shutdown == disk_count_before_cmd:
                check_count_after_shutdown = False
        elif test_cmd == "detach-disk":
            if disk_count_after_shutdown < disk_count_before_cmd:
                check_count_after_shutdown = False

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()
        if os.path.exists(save_file):
            os.remove(save_file)
        if test_block_dev:
            if test_logcial_dev:
                libvirt.delete_local_disk("lvm",
                                          vgname=vg_name,
                                          lvname=lv_name)
                lv_utils.vg_remove(vg_name)
                process.run("pvremove %s" % device_source,
                            shell=True,
                            ignore_status=True)
            libvirt.setup_or_cleanup_iscsi(False)
        else:
            libvirt.delete_local_disk("file", device_source)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh %s exit with unexpected value." % test_cmd)
    else:
        if status:
            test.fail("virsh %s failed." % test_cmd)
        if test_cmd == "attach-disk":
            if at_options.count("config"):
                if not check_count_after_shutdown:
                    test.fail("Cannot see config attached device "
                              "in xml file after VM shutdown.")
                if not check_disk_serial:
                    test.fail("Serial set failed after attach")
                if not check_disk_address:
                    test.fail("Address set failed after attach")
                if not check_disk_address2:
                    test.fail("Address(multifunction) set failed"
                              " after attach")
            else:
                if not check_count_after_cmd:
                    test.fail("Cannot see device in xml file" " after attach.")
                if not check_vm_after_cmd:
                    test.fail("Cannot see device in VM after" " attach.")
                if not check_disk_type:
                    test.fail("Check disk type failed after" " attach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotplug failure after attach")
                if not check_cache_after_cmd:
                    test.fail("Check cache failure after attach")
                if at_options.count("persistent"):
                    if not check_count_after_shutdown:
                        test.fail("Cannot see device attached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if check_count_after_shutdown:
                        test.fail("See non-config attached device "
                                  "in xml file after VM shutdown.")
        elif test_cmd == "detach-disk":
            if dt_options.count("config"):
                if check_count_after_shutdown:
                    test.fail("See config detached device in "
                              "xml file after VM shutdown.")
            else:
                if check_count_after_cmd:
                    test.fail("See device in xml file " "after detach.")
                if check_vm_after_cmd:
                    test.fail("See device in VM after detach.")
                if not check_audit_after_cmd:
                    test.fail("Audit hotunplug failure " "after detach")

                if dt_options.count("persistent"):
                    if check_count_after_shutdown:
                        test.fail("See device deattached "
                                  "with persistent after "
                                  "VM shutdown.")
                else:
                    if not check_count_after_shutdown:
                        test.fail("See non-config detached "
                                  "device in xml file after "
                                  "VM shutdown.")

        else:
            test.error("Unknown command %s." % test_cmd)
Beispiel #54
0
def run(test, params, env):
    """
    Test virsh {at|de}tach-disk command.

    The command can attach new disk/detach disk.
    1.Prepare test environment,destroy or suspend a VM.
    2.Perform virsh attach/detach-disk operation.
    3.Recover test environment.
    4.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    pre_vm_state = params.get("at_dt_disk_pre_vm_state", "running")
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def is_attached(vmxml_devices, disk_type, source_file, target_dev):
        """
        Check attached device and disk exist or not.

        :param vmxml_devices: VMXMLDevices instance
        :param disk_type: disk's device type: cdrom or floppy
        :param source_file : disk's source file to check
        :param target_dev : target device name
        :return: True/False if backing file and device found
        """
        disks = vmxml_devices.by_device_tag('disk')
        for disk in disks:
            if disk.device != disk_type:
                continue
            if disk.target['dev'] != target_dev:
                continue
            if disk.xmltreefile.find('source') is not None:
                if disk.source.attrs['file'] != source_file:
                    continue
            else:
                continue
            # All three conditions met
            logging.debug("Find %s in given disk XML", source_file)
            return True
        logging.debug("Not find %s in gievn disk XML", source_file)
        return False

    def check_result(disk_source, disk_type, disk_target, flags, attach=True):
        """
        Check the test result of attach/detach-device command.
        """
        active_vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        if not attach:
            utils_misc.wait_for(
                lambda: not is_attached(active_vmxml.devices, disk_type,
                                        disk_source, disk_target), 20)
        active_attached = is_attached(active_vmxml.devices, disk_type,
                                      disk_source, disk_target)
        vm_state = pre_vm_state
        if vm_state != "transient":
            inactive_vmxml = vm_xml.VMXML.new_from_dumpxml(
                vm_name, options="--inactive")
            inactive_attached = is_attached(inactive_vmxml.devices, disk_type,
                                            disk_source, disk_target)
        if flags.count("config") and not flags.count("live"):
            if vm_state != "transient":
                if attach:
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " attachment")
                    if vm_state != "shutoff":
                        if active_attached:
                            raise exceptions.TestFail(
                                "Active domain XML updated "
                                "when --config options used "
                                "for attachment")
                else:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --config options used for"
                            " detachment")
        elif flags.count("live") and not flags.count("config"):
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live options used for"
                            " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live options used for"
                            " detachment")
        elif flags.count("live") and flags.count("config"):
            if attach:
                if vm_state in ["paused", "running"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --live --config options"
                            " used for attachment")
                    if not inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML not updated"
                            " when --live --config options "
                            "used for attachment")
            else:
                if vm_state in ["paused", "running"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated "
                            "when --live --config options "
                            "used for detachment")
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --live --config options"
                            " used for detachment")
        elif flags.count("current") or flags == "":
            if attach:
                if vm_state in ["paused", "running", "transient"]:
                    if not active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for attachment")
                if vm_state in ["paused", "running"]:
                    if inactive_attached:
                        raise exceptions.TestFail(
                            "Inactive domain XML updated "
                            "when --current options used "
                            "for live attachment")
                if vm_state == "shutoff" and not inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " attachment")
            else:
                if vm_state in ["paused", "running", "transient"]:
                    if active_attached:
                        raise exceptions.TestFail(
                            "Active domain XML not updated"
                            " when --current options used "
                            "for detachment")
                if vm_state == "shutoff" and inactive_attached:
                    raise exceptions.TestFail(
                        "Inactive domain XML not updated"
                        " when --current options used for"
                        " detachment")

    vm_ref = params.get("at_dt_disk_vm_ref", "name")
    at_status_error = "yes" == params.get("at_status_error", 'no')
    dt_status_error = "yes" == params.get("dt_status_error", 'no')
    # Disk specific attributes.
    at_options = params.get("at_dt_disk_at_options", "")
    dt_options = params.get("at_dt_disk_dt_options", "")
    device = params.get("at_dt_disk_device", "disk")
    device_source_name = params.get("at_dt_disk_device_source", "attach.img")
    device_target = params.get("at_dt_disk_device_target", "vdd")

    if vm.is_alive():
        vm.destroy(gracefully=False)

    # Back up xml file.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Turn VM into certain state.
    if pre_vm_state == "running":
        logging.info("Starting %s...", vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
    elif pre_vm_state == "shutoff":
        logging.info("Shuting down %s...", vm_name)
        if vm.is_alive():
            vm.destroy(gracefully=False)
    elif pre_vm_state == "paused":
        logging.info("Pausing %s...", vm_name)
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()
        if not vm.pause():
            raise exceptions.TestSkipError("Cann't pause the domain")
    elif pre_vm_state == "transient":
        logging.info("Creating %s...", vm_name)
        vm.undefine()
        if virsh.create(backup_xml.xml, **virsh_dargs).exit_status:
            backup_xml.define()
            raise exceptions.TestSkipError("Cann't create the domain")
        vm.wait_for_login().close()

    # Test.
    domid = vm.get_id()
    domuuid = vm.get_uuid()

    # Confirm how to reference a VM.
    if vm_ref == "name":
        vm_ref = vm_name
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)
    elif vm_ref == "id":
        vm_ref = domid
    elif vm_ref == "hex_id":
        vm_ref = hex(int(domid))
    elif vm_ref == "uuid":
        vm_ref = domuuid
    else:
        vm_ref = ""

    try:
        # Create disk image.
        device_source = os.path.join(data_dir.get_tmp_dir(),
                                     device_source_name)
        libvirt.create_local_disk("file", device_source, "1")

        # Attach the disk.
        ret = virsh.attach_disk(vm_ref,
                                device_source,
                                device_target,
                                at_options,
                                debug=True)
        libvirt.check_exit_status(ret, at_status_error)

        # Check if command take affect in config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        time.sleep(3)
        if not ret.exit_status:
            check_result(device_source, device, device_target, at_options)

        # Detach the disk.
        if pre_vm_state == "paused":
            if not vm.pause():
                raise exceptions.TestFail("Cann't pause the domain")
        ret = virsh.detach_disk(vm_ref, device_target, dt_options, debug=True)
        libvirt.check_exit_status(ret, dt_status_error)

        # Check if command take affect config file.
        if vm.is_paused():
            vm.resume()
            vm.wait_for_login().close()

        #Sleep a while for vm is stable
        if not ret.exit_status:
            check_result(device_source, device, device_target, dt_options,
                         False)

        # Try to start vm at last.
        if vm.is_dead():
            vm.start()
            vm.wait_for_login().close()

    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        backup_xml.sync()

        if os.path.exists(device_source):
            os.remove(device_source)
def run(test, params, env):
    """
    Test migration of multi vms.
    """
    vm_names = params.get("migrate_vms").split()
    if len(vm_names) < 2:
        raise exceptions.TestSkipError("No multi vms provided.")

    # Prepare parameters
    method = params.get("virsh_migrate_method")
    jobabort = "yes" == params.get("virsh_migrate_jobabort", "no")
    options = params.get("virsh_migrate_options", "")
    status_error = "yes" == params.get("status_error", "no")
    remote_host = params.get("remote_host", "DEST_HOSTNAME.EXAMPLE.COM")
    local_host = params.get("local_host", "SOURCE_HOSTNAME.EXAMPLE.COM")
    host_user = params.get("host_user", "root")
    host_passwd = params.get("host_password", "PASSWORD")
    nfs_shared_disk = params.get("nfs_shared_disk", True)
    migration_type = params.get("virsh_migration_type", "simultaneous")
    migrate_timeout = int(params.get("virsh_migrate_thread_timeout", 900))
    migration_time = int(params.get("virsh_migrate_timeout", 60))

    # Params for NFS and SSH setup
    params["server_ip"] = params.get("migrate_dest_host")
    params["server_user"] = "******"
    params["server_pwd"] = params.get("migrate_dest_pwd")
    params["client_ip"] = params.get("migrate_source_host")
    params["client_user"] = "******"
    params["client_pwd"] = params.get("migrate_source_pwd")
    params["nfs_client_ip"] = params.get("migrate_dest_host")
    params["nfs_server_ip"] = params.get("migrate_source_host")
    desturi = libvirt_vm.get_uri_with_transport(transport="ssh",
                                                dest_ip=remote_host)
    srcuri = libvirt_vm.get_uri_with_transport(transport="ssh",
                                               dest_ip=local_host)

    # Don't allow the defaults.
    if srcuri.count('///') or srcuri.count('EXAMPLE'):
        raise exceptions.TestSkipError("The srcuri '%s' is invalid" % srcuri)
    if desturi.count('///') or desturi.count('EXAMPLE'):
        raise exceptions.TestSkipError("The desturi '%s' is invalid" % desturi)

    # Config ssh autologin for remote host
    ssh_key.setup_remote_ssh_key(remote_host, host_user, host_passwd, port=22,
                                 public_key="rsa")

    # Prepare local session and remote session
    localrunner = remote.RemoteRunner(host=remote_host, username=host_user,
                                      password=host_passwd)
    remoterunner = remote.RemoteRunner(host=remote_host, username=host_user,
                                       password=host_passwd)
    # Configure NFS in remote host
    if nfs_shared_disk:
        nfs_client = nfs.NFSClient(params)
        nfs_client.setup()

    # Prepare MigrationHelper instance
    vms = []
    for vm_name in vm_names:
        vm = env.get_vm(vm_name)
        vms.append(vm)

    try:
        option = make_migration_options(method, options, migration_time)

        # make sure cache=none
        if "unsafe" not in options:
            device_target = params.get("virsh_device_target", "sda")
            for vm in vms:
                if vm.is_alive():
                    vm.destroy()
            for each_vm in vm_names:
                logging.info("configure cache=none")
                vmxml = vm_xml.VMXML.new_from_dumpxml(each_vm)
                device_source = str(vmxml.get_disk_attr(each_vm, device_target,
                                                        'source', 'file'))
                ret_detach = virsh.detach_disk(each_vm, device_target,
                                               "--config")
                status = ret_detach.exit_status
                output = ret_detach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_detach:
                    raise exceptions.TestError("Detach disks fails")

                subdriver = utils_test.get_image_info(device_source)['format']
                ret_attach = virsh.attach_disk(each_vm, device_source,
                                               device_target, "--driver qemu "
                                               "--config --cache none "
                                               "--subdriver %s" % subdriver)
                status = ret_attach.exit_status
                output = ret_attach.stdout.strip()
                logging.info("Status:%s", status)
                logging.info("Output:\n%s", output)
                if not ret_attach:
                    raise exceptions.TestError("Attach disks fails")

        for vm in vms:
            if vm.is_dead():
                vm.start()
                vm.wait_for_login()
        multi_migration(vms, srcuri, desturi, option, migration_type,
                        migrate_timeout, jobabort, lrunner=localrunner,
                        rrunner=remoterunner)
    except Exception, info:
        logging.error("Test failed: %s" % info)
        flag_migration = False
        if test_snapshot:
            snap_option = params.get("snapshot_option", "")
            check_snapshot(snap_option)
        if test_blockcopy:
            check_blockcopy(targetdev)

        # Detach the device.
        if attach_device:
            xml_file = libvirt.create_disk_xml(params)
            ret = virsh.detach_device(vm_name, xml_file)
            libvirt.check_exit_status(ret)
            if additional_guest:
                ret = virsh.detach_device(guest_name, xml_file)
                libvirt.check_exit_status(ret)
        elif attach_disk:
            ret = virsh.detach_disk(vm_name, targetdev)
            libvirt.check_exit_status(ret)

        # Check disk in vm after detachment.
        if attach_device or attach_disk:
            session = vm.wait_for_login()
            new_parts = libvirt.get_parts_list(session)
            if len(new_parts) != len(old_parts):
                raise exceptions.TestFail("Disk still exists in vm"
                                          " after detachment")
            session.close()

    except virt_vm.VMStartError, details:
        for msg in unsupported_err:
            if msg in str(details):
                raise exceptions.TestSkipError(detail)
Beispiel #57
0
 def trigger_events(events_list=[]):
     """
     Trigger various events in events_list
     """
     expected_events_list = []
     tmpdir = data_dir.get_tmp_dir()
     save_path = os.path.join(tmpdir, "vm_event.save")
     new_disk = os.path.join(tmpdir, "new_disk.img")
     try:
         for event in events_list:
             if event in ["start", "restore"]:
                 if vm.is_alive():
                     vm.destroy()
             else:
                 if not vm.is_alive():
                     vm.start()
                     vm.wait_for_login().close()
             if event == "start":
                 virsh.start(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Started Booted")
                 vm.wait_for_login().close()
             elif event == "save":
                 virsh.save(vm_name, save_path, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Saved")
             elif event == "restore":
                 if not os.path.exists(save_path):
                     logging.error("%s not exist", save_path)
                 else:
                     virsh.restore(save_path, **virsh_dargs)
                     expected_events_list.append("'lifecycle' for %s:" " Started Restored")
             elif event == "destroy":
                 virsh.destroy(vm_name, **virsh_dargs)
                 expected_events_list.append("'lifecycle' for %s:" " Stopped Destroyed")
             elif event == "reset":
                 virsh.reset(vm_name, **virsh_dargs)
                 expected_events_list.append("'reboot' for %s")
             elif event == "vcpupin":
                 virsh.vcpupin(vm_name, "0", "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.vcpupin0: 0")
             elif event == "emulatorpin":
                 virsh.emulatorpin(vm_name, "0", **virsh_dargs)
                 expected_events_list.append("'tunable' for %s:" "\n\tcputune.emulatorpin: 0")
             elif event == "setmem":
                 virsh.setmem(vm_name, 1048576, **virsh_dargs)
                 expected_events_list.append("'balloon-change' for %s:")
             elif event == "detach-disk":
                 if not os.path.exists(new_disk):
                     open(new_disk, "a").close()
                 # Attach disk firstly, this event will not be catched
                 virsh.attach_disk(vm_name, new_disk, "vdb", **virsh_dargs)
                 virsh.detach_disk(vm_name, "vdb", **virsh_dargs)
                 expected_events_list.append("'device-removed' for %s:" " virtio-disk1")
             else:
                 raise error.TestError("Unsupported event: %s" % event)
             # Event may not received immediately
             time.sleep(3)
     finally:
         if os.path.exists(save_path):
             os.unlink(save_path)
         if os.path.exists(new_disk):
             os.unlink(new_disk)
         return expected_events_list
    try:
        # For safety and easily reasons, we'd better define a new vm
        new_vm_name = "%s_vsmtest" % vm.name
        mig = utlv.MigrationTest()
        if vm.is_alive():
            vm.destroy()
        utlv.define_new_vm(vm.name, new_vm_name)
        vm = libvirt_vm.VM(new_vm_name, vm.params, vm.root_dir,
                           vm.address_cache)

        # Change the disk of the vm to shared disk
        # Detach exist devices
        devices = vm.get_blk_devices()
        for device in devices:
            s_detach = virsh.detach_disk(vm.name, device, "--config",
                                         debug=True)
            if not s_detach:
                raise error.TestError("Detach %s failed before test.", device)

        # Attach system image as vda
        # Then added scsi disks will be sda,sdb...
        attach_args = "--subdriver %s --config" % sys_image_fmt
        virsh.attach_disk(vm.name, sys_image_source, "vda",
                          attach_args, debug=True)

        vms = [vm]

        def start_check_vm(vm):
            try:
                vm.start()
            except virt_vm.VMStartError, detail:
Beispiel #59
0
def run(test, params, env):
    """
    Test DAC in adding nfs pool disk to VM.

    (1).Init variables for test.
    (2).Create nfs pool and vol.
    (3).Attach the nfs pool vol to VM.
    (4).Start VM and check result.
    """
    # Get general variables.
    status_error = ('yes' == params.get("status_error", 'no'))
    host_sestatus = params.get("dac_nfs_disk_host_selinux", "enforcing")
    # Get qemu.conf config variables
    qemu_user = params.get("qemu_user")
    qemu_group = params.get("qemu_group")
    dynamic_ownership = "yes" == params.get("dynamic_ownership", "yes")
    # Get variables about pool vol
    virt_use_nfs = params.get("virt_use_nfs", "off")
    nfs_server_dir = params.get("nfs_server_dir", "nfs-server")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    export_options = params.get("export_options",
                                "rw,async,no_root_squash,fsid=0")
    emulated_image = params.get("emulated_image")
    vol_name = params.get("vol_name")
    vol_format = params.get("vol_format")
    bk_file_name = params.get("bk_file_name")
    # Get pool vol variables
    img_tup = ("img_user", "img_group", "img_mode")
    img_val = []
    for i in img_tup:
        try:
            img_val.append(int(params.get(i)))
        except ValueError:
            raise error.TestNAError("%s value '%s' is not a number." %
                                    (i, params.get(i)))
    img_user, img_group, img_mode = img_val

    # Get variables about VM and get a VM object and VMXML instance.
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()

    # Backup domain disk label
    disks = vm.get_disk_devices()
    backup_labels_of_disks = {}
    for disk in disks.values():
        disk_path = disk['source']
        f = os.open(disk_path, 0)
        stat_re = os.fstat(f)
        backup_labels_of_disks[disk_path] = "%s:%s" % (stat_re.st_uid,
                                                       stat_re.st_gid)
        os.close(f)

    # Backup selinux status of host.
    backup_sestatus = utils_selinux.get_status()

    pvt = None
    snapshot_name = None
    disk_snap_path = []
    qemu_conf = utils_config.LibvirtQemuConfig()
    libvirtd = utils_libvirtd.Libvirtd()
    try:
        # chown domain disk to qemu:qemu to avoid fail on local disk
        for disk in disks.values():
            disk_path = disk['source']
            if qemu_user == "root":
                os.chown(disk_path, 0, 0)
            elif qemu_user == "qemu":
                os.chown(disk_path, 107, 107)

        # Set selinux of host.
        utils_selinux.set_status(host_sestatus)

        # set qemu conf
        qemu_conf.user = qemu_user
        qemu_conf.group = qemu_user
        if dynamic_ownership:
            qemu_conf.dynamic_ownership = 1
        else:
            qemu_conf.dynamic_ownership = 0
        logging.debug("the qemu.conf content is: %s" % qemu_conf)
        libvirtd.restart()

        # Create dst pool for create attach vol img
        logging.debug("export_options is: %s" % export_options)
        pvt = utlv.PoolVolumeTest(test, params)
        pvt.pre_pool(pool_name, pool_type, pool_target,
                     emulated_image, image_size="1G",
                     pre_disk_vol=["20M"],
                     export_options=export_options)

        # set virt_use_nfs
        result = utils.run("setsebool virt_use_nfs %s" % virt_use_nfs)
        if result.exit_status:
            raise error.TestNAError("Failed to set virt_use_nfs value")

        # Init a QemuImg instance and create img on nfs server dir.
        params['image_name'] = vol_name
        tmp_dir = data_dir.get_tmp_dir()
        nfs_path = os.path.join(tmp_dir, nfs_server_dir)
        image = qemu_storage.QemuImg(params, nfs_path, vol_name)
        # Create a image.
        server_img_path, result = image.create(params)

        if params.get("image_name_backing_file"):
            params['image_name'] = bk_file_name
            params['has_backing_file'] = "yes"
            image = qemu_storage.QemuImg(params, nfs_path, bk_file_name)
            server_img_path, result = image.create(params)

        # Get vol img path
        vol_name = server_img_path.split('/')[-1]
        virsh.pool_refresh(pool_name, debug=True)
        cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
        if cmd_result.exit_status:
            raise error.TestNAError("Failed to get volume path from pool.")
        img_path = cmd_result.stdout.strip()

        # Do the attach action.
        extra = "--persistent --subdriver qcow2"
        result = virsh.attach_disk(vm_name, source=img_path, target="vdf",
                                   extra=extra, debug=True)
        if result.exit_status:
            raise error.TestFail("Failed to attach disk %s to VM."
                                 "Detail: %s." % (img_path, result.stderr))

        # Change img ownership and mode on nfs server dir
        os.chown(server_img_path, img_user, img_group)
        os.chmod(server_img_path, img_mode)

        img_label_before = check_ownership(server_img_path)
        if img_label_before:
            logging.debug("attached image ownership on nfs server before "
                          "start: %s" % img_label_before)

        # Start VM to check the VM is able to access the image or not.
        try:
            vm.start()
            # Start VM successfully.

            img_label_after = check_ownership(server_img_path)
            if img_label_after:
                logging.debug("attached image ownership on nfs server after"
                              " start: %s" % img_label_after)

            if status_error:
                raise error.TestFail('Test succeeded in negative case.')
        except virt_vm.VMStartError, e:
            # Starting VM failed.
            if not status_error:
                raise error.TestFail("Test failed in positive case."
                                     "error: %s" % e)

        if params.get("image_name_backing_file"):
            options = "--disk-only"
            snapshot_result = virsh.snapshot_create(vm_name, options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if not status_error:
                    raise error.TestFail("Failed to create snapshot. Error:%s."
                                         % snapshot_result.stderr.strip())
            snapshot_name = re.search(
                "\d+", snapshot_result.stdout.strip()).group(0)

        if snapshot_name:
            disks_snap = vm.get_disk_devices()
            for disk in disks_snap.values():
                disk_snap_path.append(disk['source'])
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata",
                                  debug=True)

        try:
            virsh.detach_disk(vm_name, target="vdf", extra="--persistent",
                              debug=True)
        except error.CmdError:
            raise error.TestFail("Detach disk 'vdf' from VM %s failed."
                                 % vm.name)
def run(test, params, env):
    """
    Test virsh snapshot command when disk in all kinds of type.

    (1). Init the variables from params.
    (2). Create a image by specifice format.
    (3). Attach disk to vm.
    (4). Snapshot create.
    (5). Snapshot revert.
    (6). cleanup.
    """
    # Init variables.
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_state = params.get("vm_state", "running")
    image_format = params.get("snapshot_image_format", "qcow2")
    snapshot_del_test = "yes" == params.get("snapshot_del_test", "no")
    status_error = ("yes" == params.get("status_error", "no"))
    snapshot_from_xml = ("yes" == params.get("snapshot_from_xml", "no"))
    snapshot_current = ("yes" == params.get("snapshot_current", "no"))
    snapshot_revert_paused = ("yes" == params.get("snapshot_revert_paused",
                                                  "no"))
    replace_vm_disk = "yes" == params.get("replace_vm_disk", "no")
    disk_source_protocol = params.get("disk_source_protocol")
    vol_name = params.get("vol_name")
    tmp_dir = data_dir.get_tmp_dir()
    pool_name = params.get("pool_name", "gluster-pool")
    brick_path = os.path.join(tmp_dir, pool_name)
    multi_gluster_disks = "yes" == params.get("multi_gluster_disks", "no")

    # Pool variables.
    snapshot_with_pool = "yes" == params.get("snapshot_with_pool", "no")
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    emulated_image = params.get("emulated_image", "emulated-image")
    vol_format = params.get("vol_format")
    lazy_refcounts = "yes" == params.get("lazy_refcounts")
    options = params.get("snapshot_options", "")
    export_options = params.get("export_options", "rw,no_root_squash,fsid=0")

    # Set volume xml attribute dictionary, extract all params start with 'vol_'
    # which are for setting volume xml, except 'lazy_refcounts'.
    vol_arg = {}
    for key in params.keys():
        if key.startswith('vol_'):
            if key[4:] in ['capacity', 'allocation', 'owner', 'group']:
                vol_arg[key[4:]] = int(params[key])
            else:
                vol_arg[key[4:]] = params[key]
    vol_arg['lazy_refcounts'] = lazy_refcounts

    supported_pool_list = [
        "dir", "fs", "netfs", "logical", "iscsi", "disk", "gluster"
    ]
    if snapshot_with_pool:
        if pool_type not in supported_pool_list:
            raise error.TestNAError("%s not in support list %s" %
                                    (pool_target, supported_pool_list))

    # Do xml backup for final recovery
    vmxml_backup = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    # Some variable for xmlfile of snapshot.
    snapshot_memory = params.get("snapshot_memory", "internal")
    snapshot_disk = params.get("snapshot_disk", "internal")
    no_memory_snap = "yes" == params.get("no_memory_snap", "no")

    # Skip 'qed' cases for libvirt version greater than 1.1.0
    if libvirt_version.version_compare(1, 1, 0):
        if vol_format == "qed" or image_format == "qed":
            raise error.TestNAError("QED support changed, check bug: "
                                    "https://bugzilla.redhat.com/show_bug.cgi"
                                    "?id=731570")

    if not libvirt_version.version_compare(1, 2, 7):
        # As bug 1017289 closed as WONTFIX, the support only
        # exist on 1.2.7 and higher
        if disk_source_protocol == 'gluster':
            raise error.TestNAError("Snapshot on glusterfs not support in "
                                    "current version. Check more info with "
                                    "https://bugzilla.redhat.com/buglist.cgi?"
                                    "bug_id=1017289,1032370")

    # Init snapshot_name
    snapshot_name = None
    snapshot_external_disk = []
    snapshot_xml_path = None
    del_status = None
    image = None
    pvt = None
    # Get a tmp dir
    snap_cfg_path = "/var/lib/libvirt/qemu/snapshot/%s/" % vm_name
    try:
        if replace_vm_disk:
            utlv.set_vm_disk(vm, params, tmp_dir)
            if multi_gluster_disks:
                new_params = params.copy()
                new_params["pool_name"] = "gluster-pool2"
                new_params["vol_name"] = "gluster-vol2"
                new_params["disk_target"] = "vdf"
                new_params["image_convert"] = 'no'
                utlv.set_vm_disk(vm, new_params, tmp_dir)

        if snapshot_with_pool:
            # Create dst pool for create attach vol img
            pvt = utlv.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name,
                         pool_type,
                         pool_target,
                         emulated_image,
                         image_size="1G",
                         pre_disk_vol=["20M"],
                         source_name=vol_name,
                         export_options=export_options)

            if pool_type in ["iscsi", "disk"]:
                # iscsi and disk pool did not support create volume in libvirt,
                # logical pool could use libvirt to create volume but volume
                # format is not supported and will be 'raw' as default.
                pv = libvirt_storage.PoolVolume(pool_name)
                vols = pv.list_volumes().keys()
                if vols:
                    vol_name = vols[0]
                else:
                    raise error.TestNAError("No volume in pool: %s" %
                                            pool_name)
            else:
                # Set volume xml file
                volxml = libvirt_xml.VolXML()
                newvol = volxml.new_vol(**vol_arg)
                vol_xml = newvol['xml']

                # Run virsh_vol_create to create vol
                logging.debug("create volume from xml: %s" %
                              newvol.xmltreefile)
                cmd_result = virsh.vol_create(pool_name,
                                              vol_xml,
                                              ignore_status=True,
                                              debug=True)
                if cmd_result.exit_status:
                    raise error.TestNAError("Failed to create attach volume.")

            cmd_result = virsh.vol_path(vol_name, pool_name, debug=True)
            if cmd_result.exit_status:
                raise error.TestNAError("Failed to get volume path from pool.")
            img_path = cmd_result.stdout.strip()

            if pool_type in ["logical", "iscsi", "disk"]:
                # Use qemu-img to format logical, iscsi and disk block device
                if vol_format != "raw":
                    cmd = "qemu-img create -f %s %s 10M" % (vol_format,
                                                            img_path)
                    cmd_result = utils.run(cmd, ignore_status=True)
                    if cmd_result.exit_status:
                        raise error.TestNAError("Failed to format volume, %s" %
                                                cmd_result.stdout.strip())
            extra = "--persistent --subdriver %s" % vol_format
        else:
            # Create a image.
            params['image_name'] = "snapshot_test"
            params['image_format'] = image_format
            params['image_size'] = "1M"
            image = qemu_storage.QemuImg(params, tmp_dir, "snapshot_test")
            img_path, _ = image.create(params)
            extra = "--persistent --subdriver %s" % image_format

        if not multi_gluster_disks:
            # Do the attach action.
            out = utils.run("qemu-img info %s" % img_path)
            logging.debug("The img info is:\n%s" % out.stdout.strip())
            result = virsh.attach_disk(vm_name,
                                       source=img_path,
                                       target="vdf",
                                       extra=extra,
                                       debug=True)
            if result.exit_status:
                raise error.TestNAError("Failed to attach disk %s to VM."
                                        "Detail: %s." %
                                        (img_path, result.stderr))

        # Create snapshot.
        if snapshot_from_xml:
            snap_xml = libvirt_xml.SnapshotXML()
            snapshot_name = "snapshot_test"
            snap_xml.snap_name = snapshot_name
            snap_xml.description = "Snapshot Test"
            if not no_memory_snap:
                if "--disk-only" not in options:
                    if snapshot_memory == "external":
                        memory_external = os.path.join(tmp_dir,
                                                       "snapshot_memory")
                        snap_xml.mem_snap_type = snapshot_memory
                        snap_xml.mem_file = memory_external
                        snapshot_external_disk.append(memory_external)
                    else:
                        snap_xml.mem_snap_type = snapshot_memory

            # Add all disks into xml file.
            vmxml = libvirt_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            disks = vmxml.devices.by_device_tag('disk')
            new_disks = []
            for src_disk_xml in disks:
                disk_xml = snap_xml.SnapDiskXML()
                disk_xml.xmltreefile = src_disk_xml.xmltreefile
                del disk_xml.device
                del disk_xml.address
                disk_xml.snapshot = snapshot_disk
                disk_xml.disk_name = disk_xml.target['dev']

                # Only qcow2 works as external snapshot file format, update it
                # here
                driver_attr = disk_xml.driver
                driver_attr.update({'type': 'qcow2'})
                disk_xml.driver = driver_attr

                if snapshot_disk == 'external':
                    new_attrs = disk_xml.source.attrs
                    if disk_xml.source.attrs.has_key('file'):
                        new_file = "%s.snap" % disk_xml.source.attrs['file']
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None
                    elif disk_xml.source.attrs.has_key('name'):
                        new_name = "%s.snap" % disk_xml.source.attrs['name']
                        new_attrs.update({'name': new_name})
                        hosts = disk_xml.source.hosts
                    elif (disk_xml.source.attrs.has_key('dev')
                          and disk_xml.type_name == 'block'):
                        # Use local file as external snapshot target for block type.
                        # As block device will be treat as raw format by default,
                        # it's not fit for external disk snapshot target. A work
                        # around solution is use qemu-img again with the target.
                        disk_xml.type_name = 'file'
                        del new_attrs['dev']
                        new_file = "%s/blk_src_file.snap" % tmp_dir
                        snapshot_external_disk.append(new_file)
                        new_attrs.update({'file': new_file})
                        hosts = None

                    new_src_dict = {"attrs": new_attrs}
                    if hosts:
                        new_src_dict.update({"hosts": hosts})
                    disk_xml.source = disk_xml.new_disk_source(**new_src_dict)
                else:
                    del disk_xml.source

                new_disks.append(disk_xml)

            snap_xml.set_disks(new_disks)
            snapshot_xml_path = snap_xml.xml
            logging.debug("The snapshot xml is: %s" % snap_xml.xmltreefile)

            options += " --xmlfile %s " % snapshot_xml_path

            if vm_state == "shut off":
                vm.destroy(gracefully=False)

            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            out_err = snapshot_result.stderr.strip()
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    if re.search(
                            "live disk snapshot not supported with this "
                            "QEMU binary", out_err):
                        raise error.TestNAError(out_err)

                    if libvirt_version.version_compare(1, 2, 5):
                        # As commit d2e668e in 1.2.5, internal active snapshot
                        # without memory state is rejected. Handle it as SKIP
                        # for now. This could be supportted in future by bug:
                        # https://bugzilla.redhat.com/show_bug.cgi?id=1103063
                        if re.search(
                                "internal snapshot of a running VM" +
                                " must include the memory state", out_err):
                            raise error.TestNAError("Check Bug #1083345, %s" %
                                                    out_err)

                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." % out_err)
        else:
            snapshot_result = virsh.snapshot_create(vm_name,
                                                    options,
                                                    debug=True)
            if snapshot_result.exit_status:
                if status_error:
                    return
                else:
                    raise error.TestFail(
                        "Failed to create snapshot. Error:%s." %
                        snapshot_result.stderr.strip())
            snapshot_name = re.search("\d+",
                                      snapshot_result.stdout.strip()).group(0)

            if snapshot_current:
                snap_xml = libvirt_xml.SnapshotXML()
                new_snap = snap_xml.new_from_snapshot_dumpxml(
                    vm_name, snapshot_name)
                # update an element
                new_snap.creation_time = snapshot_name
                snapshot_xml_path = new_snap.xml
                options += "--redefine %s --current" % snapshot_xml_path
                snapshot_result = virsh.snapshot_create(vm_name,
                                                        options,
                                                        debug=True)
                if snapshot_result.exit_status:
                    raise error.TestFail("Failed to create snapshot --current."
                                         "Error:%s." %
                                         snapshot_result.stderr.strip())

        if status_error:
            if not snapshot_del_test:
                raise error.TestFail("Success to create snapshot in negative"
                                     " case\nDetail: %s" % snapshot_result)

        # Touch a file in VM.
        if vm.is_dead():
            vm.start()
        session = vm.wait_for_login()

        # Init a unique name for tmp_file.
        tmp_file = tempfile.NamedTemporaryFile(prefix=("snapshot_test_"),
                                               dir="/tmp")
        tmp_file_path = tmp_file.name
        tmp_file.close()

        echo_cmd = "echo SNAPSHOT_DISK_TEST >> %s" % tmp_file_path
        status, output = session.cmd_status_output(echo_cmd)
        logging.debug("The echo output in domain is: '%s'", output)
        if status:
            raise error.TestFail("'%s' run failed with '%s'" %
                                 (tmp_file_path, output))
        status, output = session.cmd_status_output("cat %s" % tmp_file_path)
        logging.debug("File created with content: '%s'", output)

        session.close()

        # As only internal snapshot revert works now, let's only do revert
        # with internal, and move the all skip external cases back to pass.
        # After external also supported, just move the following code back.
        if snapshot_disk == 'internal':
            # Destroy vm for snapshot revert.
            if not libvirt_version.version_compare(1, 2, 3):
                virsh.destroy(vm_name)
            # Revert snapshot.
            revert_options = ""
            if snapshot_revert_paused:
                revert_options += " --paused"
            revert_result = virsh.snapshot_revert(vm_name,
                                                  snapshot_name,
                                                  revert_options,
                                                  debug=True)
            if revert_result.exit_status:
                # Attempts to revert external snapshots will FAIL with an error
                # "revert to external disk snapshot not supported yet" or "revert
                # to external snapshot not supported yet" since d410e6f. Thus,
                # let's check for that and handle as a SKIP for now. Check bug:
                # https://bugzilla.redhat.com/show_bug.cgi?id=1071264
                if re.search(
                        "revert to external \w* ?snapshot not supported yet",
                        revert_result.stderr):
                    raise error.TestNAError(revert_result.stderr.strip())
                else:
                    raise error.TestFail("Revert snapshot failed. %s" %
                                         revert_result.stderr.strip())

            if vm.is_dead():
                raise error.TestFail("Revert snapshot failed.")

            if snapshot_revert_paused:
                if vm.is_paused():
                    vm.resume()
                else:
                    raise error.TestFail(
                        "Revert command successed, but VM is not "
                        "paused after reverting with --paused"
                        "  option.")
            # login vm.
            session = vm.wait_for_login()
            # Check the result of revert.
            status, output = session.cmd_status_output("cat %s" %
                                                       tmp_file_path)
            logging.debug("After revert cat file output='%s'", output)
            if not status:
                raise error.TestFail("Tmp file exists, revert failed.")

            # Close the session.
            session.close()

        # Test delete snapshot without "--metadata", delete external disk
        # snapshot will fail for now.
        # Only do this when snapshot creat succeed which filtered in cfg file.
        if snapshot_del_test:
            if snapshot_name:
                del_result = virsh.snapshot_delete(vm_name,
                                                   snapshot_name,
                                                   debug=True,
                                                   ignore_status=True)
                del_status = del_result.exit_status
                snap_xml_path = snap_cfg_path + "%s.xml" % snapshot_name
                if del_status:
                    if not status_error:
                        raise error.TestFail("Failed to delete snapshot.")
                    else:
                        if not os.path.exists(snap_xml_path):
                            raise error.TestFail(
                                "Snapshot xml file %s missing" % snap_xml_path)
                else:
                    if status_error:
                        err_msg = "Snapshot delete succeed but expect fail."
                        raise error.TestFail(err_msg)
                    else:
                        if os.path.exists(snap_xml_path):
                            raise error.TestFail("Snapshot xml file %s still" %
                                                 snap_xml_path + " exist")

    finally:
        if vm.is_alive():
            vm.destroy(gracefully=False)
        virsh.detach_disk(vm_name, target="vdf", extra="--persistent")
        if image:
            image.remove()
        if del_status and snapshot_name:
            virsh.snapshot_delete(vm_name, snapshot_name, "--metadata")
        for disk in snapshot_external_disk:
            if os.path.exists(disk):
                os.remove(disk)
        vmxml_backup.sync("--snapshots-metadata")

        libvirtd = utils_libvirtd.Libvirtd()
        if disk_source_protocol == 'gluster':
            utlv.setup_or_cleanup_gluster(False, vol_name, brick_path)
            if multi_gluster_disks:
                brick_path = os.path.join(tmp_dir, "gluster-pool2")
                utlv.setup_or_cleanup_gluster(False, "gluster-vol2",
                                              brick_path)
            libvirtd.restart()

        if snapshot_xml_path:
            if os.path.exists(snapshot_xml_path):
                os.unlink(snapshot_xml_path)
        if pvt:
            try:
                pvt.cleanup_pool(pool_name,
                                 pool_type,
                                 pool_target,
                                 emulated_image,
                                 source_name=vol_name)
            except error.TestFail, detail:
                libvirtd.restart()
                logging.error(str(detail))