示例#1
0
def clean_up_snapshots(vm_name, snapshot_list=[]):
    """
    Do recovery after snapshot

    :param vm_name: Name of domain
    :param snapshot_list: The list of snapshot name you want to remove
    """
    if not snapshot_list:
        # Get all snapshot names from virsh snapshot-list
        snapshot_list = virsh.snapshot_list(vm_name)

        # Get snapshot disk path
        for snap_name in snapshot_list:
            # Delete useless disk snapshot file if exists
            snap_xml = virsh.snapshot_dumpxml(vm_name,
                                              snap_name).stdout.strip()
            xtf_xml = xml_utils.XMLTreeFile(snap_xml)
            disks_path = xtf_xml.findall('disks/disk/source')
            for disk in disks_path:
                os.system('rm -f %s' % disk.get('file'))
            # Delete snapshots of vm
            virsh.snapshot_delete(vm_name, snap_name)
    else:
        # Get snapshot disk path from domain xml because
        # there is no snapshot info with the name
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile
        disk_path = dom_xml.find('devices/disk/source').get('file')
        for name in snapshot_list:
            snap_disk_path = disk_path.split(".")[0] + "." + name
            os.system('rm -f %s' % snap_disk_path)
示例#2
0
文件: libvirt.py 项目: ypu/virt-test
def clean_up_snapshots(vm_name, snapshot_list=[]):
    """
    Do recovery after snapshot

    :param vm_name: Name of domain
    :param snapshot_list: The list of snapshot name you want to remove
    """
    if not snapshot_list:
        # Get all snapshot names from virsh snapshot-list
        snapshot_list = virsh.snapshot_list(vm_name)

        # Get snapshot disk path
        for snap_name in snapshot_list:
            # Delete useless disk snapshot file if exists
            snap_xml = virsh.snapshot_dumpxml(vm_name,
                                              snap_name).stdout.strip()
            xtf_xml = xml_utils.XMLTreeFile(snap_xml)
            disks_path = xtf_xml.findall('disks/disk/source')
            for disk in disks_path:
                os.system('rm -f %s' % disk.get('file'))
            # Delete snapshots of vm
            virsh.snapshot_delete(vm_name, snap_name)
    else:
        # Get snapshot disk path from domain xml because
        # there is no snapshot info with the name
        dom_xml = vm_xml.VMXML.new_from_dumpxml(vm_name).xmltreefile
        disk_path = dom_xml.find('devices/disk/source').get('file')
        for name in snapshot_list:
            snap_disk_path = disk_path.split(".")[0] + "." + name
            os.system('rm -f %s' % snap_disk_path)
示例#3
0
    def do_snapshot(vm_name, expected_str):
        """
        Run snapshot related commands: snapshot-create-as, snapshot-list
        snapshot-dumpxml, snapshot-revert

        :param vm_name: vm name
        :param expected_str: expected string in snapshot-dumpxml
        :raise: test.fail if virsh command failed
        """
        snapshot_name = vm_name + "-snap"
        virsh_dargs = {'debug': True}

        cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name,
                                              **virsh_dargs)
        libvirt.check_exit_status(cmd_result)

        try:
            snapshots = virsh.snapshot_list(vm_name, **virsh_dargs)
        except process.CmdError:
            test.fail("Failed to get snapshots list for %s" % vm_name)
        if snapshot_name not in snapshots:
            test.fail("The snapshot '%s' was not in snapshot-list."
                      % snapshot_name)
        cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name,
                                            **virsh_dargs)
        libvirt.check_result(cmd_result, expected_match=expected_str)

        cmd_result = virsh.snapshot_revert(vm_name, "", "--current",
                                           **virsh_dargs)
        libvirt.check_exit_status(cmd_result)
示例#4
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(TMP_DATA_DIR, "rbd.mem")
        snap_disk = os.path.join(TMP_DATA_DIR, "rbd.disk")
        xml_snap_exp = [
            "disk name='%s' snapshot='external' type='file'" % target_dev
        ]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        elif test_disk_readonly:
            if libvirt_version.version_compare(6, 0, 0):
                libvirt.check_result(ret)
            else:
                libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if utils.run(cmd, ignore_status=True).exit_status:
            raise error.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise error.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
示例#6
0
    def check_bootorder_snapshot(disk_name):
        """
        Check VM disk's bootorder option with snapshot.

        :param disk_name. The target disk to be checked.
        """
        logging.info("Checking diskorder option with snapshot...")
        snapshot1 = "s1"
        snapshot2 = "s2"
        snapshot2_file = os.path.join(test.tmpdir, "s2")
        ret = virsh.snapshot_create(vm_name, "", **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_create_as(vm_name, "%s --disk-only" % snapshot1,
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.snapshot_dumpxml(vm_name, snapshot1)
        libvirt.check_exit_status(ret)

        cmd = "echo \"%s\" | grep %s.%s" % (ret.stdout, disk_name, snapshot1)
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk failed")

        ret = virsh.snapshot_create_as(vm_name,
                                       "%s --memspec file=%s,snapshot=external"
                                       % (snapshot2, snapshot2_file),
                                       **virsh_dargs)
        libvirt.check_exit_status(ret)

        ret = virsh.dumpxml(vm_name)
        libvirt.check_exit_status(ret)

        cmd = ("echo \"%s\" | grep -A 16 %s.%s | grep \"boot order='%s'\""
               % (ret.stdout, disk_name, snapshot2, bootorder))
        if process.system(cmd, ignore_status=True, shell=True):
            raise exceptions.TestError("Check snapshot disk with bootorder failed")

        snap_lists = virsh.snapshot_list(vm_name)
        if snapshot1 not in snap_lists or snapshot2 not in snap_lists:
            raise exceptions.TestError("Check snapshot list failed")

        # Check virsh save command after snapshot.
        save_file = "/tmp/%s.save" % vm_name
        ret = virsh.save(vm_name, save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        # Check virsh restore command after snapshot.
        ret = virsh.restore(save_file, **virsh_dargs)
        libvirt.check_exit_status(ret)

        #Passed all test.
        os.remove(save_file)
示例#7
0
    def check_snapshot(snap_option, target_dev='vda'):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(data_dir.get_tmp_dir(), "rbd.mem")
        snap_disk = os.path.join(data_dir.get_tmp_dir(), "rbd.disk")
        xml_snap_exp = ["disk name='%s' snapshot='external' type='file'" % target_dev]
        xml_dom_exp = ["source file='%s'" % snap_disk,
                       "backingStore type='network' index='1'",
                       "source protocol='rbd' name='%s'" % disk_src_name]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec %s,file=%s --disk-only" %
                       (snap_name, target_dev, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec %s,file="
                       "%s" % (snap_name, snap_mem, target_dev, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'"
                                % snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        if test_disk_internal_snapshot or test_disk_readonly:
            libvirt.check_result(ret, expected_fails=unsupported_err)
        else:
            libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                test.fail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                test.fail("Failed to check domain xml")
    def check_snapshot(snap_option):
        """
        Test snapshot operation.
        """
        snap_name = "s1"
        snap_mem = os.path.join(test.tmpdir, "rbd.mem")
        snap_disk = os.path.join(test.tmpdir, "rbd.disk")
        xml_snap_exp = ["disk name='vda' snapshot='external' type='file'"]
        xml_dom_exp = [
            "source file='%s'" % snap_disk,
            "backingStore type='network' index='1'",
            "source protocol='rbd' name='%s'" % disk_src_name
        ]
        if snap_option.count("disk-only"):
            options = ("%s --diskspec vda,file=%s --disk-only" %
                       (snap_name, snap_disk))
        elif snap_option.count("disk-mem"):
            options = ("%s --memspec file=%s --diskspec vda,file="
                       "%s" % (snap_name, snap_mem, snap_disk))
            xml_snap_exp.append("memory snapshot='external' file='%s'" %
                                snap_mem)
        else:
            options = snap_name

        ret = virsh.snapshot_create_as(vm_name, options)
        libvirt.check_result(ret, skip_if=unsupported_err)

        # check xml file.
        if not ret.exit_status:
            snap_xml = virsh.snapshot_dumpxml(vm_name, snap_name,
                                              debug=True).stdout.strip()
            dom_xml = virsh.dumpxml(vm_name, debug=True).stdout.strip()
            # Delete snapshots.
            libvirt.clean_up_snapshots(vm_name)
            if os.path.exists(snap_mem):
                os.remove(snap_mem)
            if os.path.exists(snap_disk):
                os.remove(snap_disk)

            if not all([x in snap_xml for x in xml_snap_exp]):
                raise exceptions.TestFail("Failed to check snapshot xml")
            if not all([x in dom_xml for x in xml_dom_exp]):
                raise exceptions.TestFail("Failed to check domain xml")
示例#9
0
def domainsnapshot_validate(test, vm_name, file=None, **virsh_dargs):
    """
    Test for schema domainsnapshot
    """
    snapshot_name = "snap-%s-%s" % (vm_name, time.time())
    cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name)
    libvirt.check_exit_status(cmd_result)

    def check_info(s1, s2, errorstr="Values differ"):
        if s1 != s2:
            test.fail("%s (%s != %s)" % (errorstr, s1, s2))
    try:
        ss_info = virsh.snapshot_info(vm_name, snapshot_name)
        check_info(ss_info["Name"], snapshot_name, "Incorrect snapshot name")
        check_info(ss_info["Domain"], vm_name, "Incorrect domain name")
    except process.CmdError as e:
        test.fail(str(e))
    except exceptions.TestFail as e:
        test.fail(str(e))

    cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name, to_file=file)
    libvirt.check_exit_status(cmd_result)
示例#10
0
def domainsnapshot_validate(test, vm_name, file=None, **virsh_dargs):
    """
    Test for schema domainsnapshot
    """
    snapshot_name = "snap-%s-%s" % (vm_name, time.time())
    cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name)
    libvirt.check_exit_status(cmd_result)

    def check_info(s1, s2, errorstr="Values differ"):
        if s1 != s2:
            test.fail("%s (%s != %s)" % (errorstr, s1, s2))

    try:
        ss_info = virsh.snapshot_info(vm_name, snapshot_name)
        check_info(ss_info["Name"], snapshot_name, "Incorrect snapshot name")
        check_info(ss_info["Domain"], vm_name, "Incorrect domain name")
    except process.CmdError as e:
        test.fail(str(e))
    except exceptions.TestFail as e:
        test.fail(str(e))

    cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name, to_file=file)
    libvirt.check_exit_status(cmd_result)
            result = virsh.snapshot_create_as(vm_name, opt)
            if result.exit_status:
                raise error.TestFail("Failed to create snapshot. Error:%s."
                                     % result.stderr.strip())
            time.sleep(1)

        snapshot_oldlist = virsh.snapshot_list(vm_name)

        # Get the snapshot xml before edit
        if len(snap_name) > 0:
            pre_name = check_name = snap_name
        else:
            cmd_result = virsh.snapshot_current(vm_name)
            pre_name = check_name = cmd_result.stdout.strip()

        ret = virsh.snapshot_dumpxml(vm_name, pre_name)
        if ret.exit_status == 0:
            pre_xml = ret.stdout
        else:
            raise error.TestFail("Fail to dumpxml of snapshot %s:%s" %
                                 (pre_name, ret.stderr.strip()))

        edit_cmd = []
        # Delete description element first then add it back with replacement
        edit_cmd.append(":g/<description>.*</d")
        replace_cmd = '%s<\/name>/%s<\/name>' % (pre_name, pre_name)
        replace_cmd += '\\r<description>%s<\/description>' % snap_desc
        replace_cmd = ":%s/" + replace_cmd + "/"
        edit_cmd.append(replace_cmd)
        # if have --clone or --rename, need to change snapshot name in xml
        if len(snap_opt) > 0:
示例#12
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(data_dir.get_tmp_dir(),
                                                snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name,
                                               snp_item,
                                               to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name,
                            pool_type,
                            pool_target,
                            emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            ret = virsh.attach_disk(vm_name,
                                    volume,
                                    disk_target,
                                    "--config",
                                    debug=True)
            if ret.exit_status != 0:
                test.error("Attach disk failed: %s" % ret.stderr)

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref,
                                       option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True,
                                       debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip,
                                             remote_user,
                                             remote_pwd,
                                             hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError,
                    aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists(
                "/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri,
                                    ignore_status=True,
                                    debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type, pool_target,
                                emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            if libvirtd_state == "off" and libvirt_version.version_compare(
                    5, 6, 0):
                logging.info(
                    "From libvirt version 5.6.0 libvirtd is restarted "
                    "and command should succeed")
            else:
                test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")
def check_snapslist(vm_name, options, option_dict, output,
                    snaps_before, snaps_list):
    no_metadata = options.find("--no-metadata")
    fdisks = "disks"

    # command with print-xml will not really create snapshot
    if options.find("print-xml") >= 0:
        xtf = xml_utils.XMLTreeFile(output)

        # With --print-xml there isn't new snapshot created
        if len(snaps_before) != len(snaps_list):
            raise error.TestFail("--print-xml create new snapshot")

    else:
        # The following does not check with print-xml
        get_sname = output.split()[2]

        # check domain/snapshot xml depends on if have metadata
        if no_metadata < 0:
            output_dump = virsh.snapshot_dumpxml(vm_name,
                                                 get_sname).stdout.strip()
        else:
            output_dump = virsh.dumpxml(vm_name).stdout.strip()
            fdisks = "devices"

        xtf = xml_utils.XMLTreeFile(output_dump)

        find = 0
        for snap in snaps_list:
            if snap == get_sname:
                find = 1
                break

        # Should find snap in snaplist without --no-metadata
        if (find == 0 and no_metadata < 0):
            raise error.TestFail("Can not find snapshot %s!"
                                 % get_sname)
        # Should not find snap in list without metadata
        elif (find == 1 and no_metadata >= 0):
            raise error.TestFail("Can find snapshot metadata even "
                                 "if have --no-metadata")
        elif (find == 0 and no_metadata >= 0):
            logging.info("Can not find snapshot %s as no-metadata "
                         "is given" % get_sname)

            # Check snapshot only in qemu-img
            if (options.find("--disk-only") < 0 and
                    options.find("--memspec") < 0):
                ret = check_snap_in_image(vm_name, get_sname)

                if ret is False:
                    raise error.TestFail("No snap info in image")

        else:
            logging.info("Find snapshot %s in snapshot list."
                         % get_sname)

        # Check if the disk file exist when disk-only is given
        if options.find("disk-only") >= 0:
            for disk in xtf.find(fdisks).findall('disk'):
                diskpath = disk.find('source').get('file')
                if os.path.isfile(diskpath):
                    logging.info("disk file %s exist" % diskpath)
                    os.remove(diskpath)
                else:
                    # Didn't find <source file="path to disk"/>
                    # in output - this could leave a file around
                    # wherever the main OS image file is found
                    logging.debug("output_dump=%s", output_dump)
                    raise error.TestFail("Can not find disk %s"
                                         % diskpath)

        # Check if the guest is halted when 'halt' is given
        if options.find("halt") >= 0:
            domstate = virsh.domstate(vm_name)
            if re.match("shut off", domstate.stdout):
                logging.info("Domain is halted after create "
                             "snapshot")
            else:
                raise error.TestFail("Domain is not halted after "
                                     "snapshot created")

    # Check the snapshot xml regardless of having print-xml or not
    if (options.find("name") >= 0 and no_metadata < 0):
        if xtf.findtext('name') == option_dict["name"]:
            logging.info("get snapshot name same as set")
        else:
            raise error.TestFail("Get wrong snapshot name %s" %
                                 xtf.findtext('name'))

    if (options.find("description") >= 0 and no_metadata < 0):
        desc = xtf.findtext('description')
        if desc == option_dict["description"]:
            logging.info("get snapshot description same as set")
        else:
            raise error.TestFail("Get wrong description on xml")

    if options.find("diskspec") >= 0:
        if isinstance(option_dict['diskspec'], list):
            index = len(option_dict['diskspec'])
        else:
            index = 1

        disks = xtf.find(fdisks).findall('disk')

        for num in range(index):
            if isinstance(option_dict['diskspec'], list):
                option_disk = option_dict['diskspec'][num]
            else:
                option_disk = option_dict['diskspec']

            option_disk = "name=" + option_disk
            disk_dict = utils_misc.valued_option_dict(option_disk,
                                                      ",", 0, "=")
            logging.debug("disk_dict is %s", disk_dict)

            # For no metadata snapshot do not check name and
            # snapshot
            if no_metadata < 0:
                dname = disks[num].get('name')
                logging.debug("dname is %s", dname)
                if dname == disk_dict['name']:
                    logging.info("get disk%d name same as set in "
                                 "diskspec", num)
                else:
                    raise error.TestFail("Get wrong disk%d name %s"
                                         % num, dname)

                if option_disk.find('snapshot=') >= 0:
                    dsnap = disks[num].get('snapshot')
                    logging.debug("dsnap is %s", dsnap)
                    if dsnap == disk_dict['snapshot']:
                        logging.info("get disk%d snapshot type same"
                                     " as set in diskspec", num)
                    else:
                        raise error.TestFail("Get wrong disk%d "
                                             "snapshot type %s" %
                                             num, dsnap)

            if option_disk.find('driver=') >= 0:
                dtype = disks[num].find('driver').get('type')
                if dtype == disk_dict['driver']:
                    logging.info("get disk%d driver type same as "
                                 "set in diskspec", num)
                else:
                    raise error.TestFail("Get wrong disk%d driver "
                                         "type %s" % num, dtype)

            if option_disk.find('file=') >= 0:
                sfile = disks[num].find('source').get('file')
                if sfile == disk_dict['file']:
                    logging.info("get disk%d source file same as "
                                 "set in diskspec", num)
                    if os.path.exists(sfile):
                        os.unlink(sfile)
                else:
                    raise error.TestFail("Get wrong disk%d source "
                                         "file %s" % num, sfile)

    # For memspec check if the xml is same as setting
    # Also check if the mem file exists
    if options.find("memspec") >= 0:
        memspec = option_dict['memspec']
        if re.search('file=', option_dict['memspec']) < 0:
            memspec = 'file=' + option_dict['memspec']

        mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
                                                 "=")
        logging.debug("mem_dict is %s", mem_dict)

        if no_metadata < 0:
            if memspec.find('snapshot=') >= 0:
                snap = xtf.find('memory').get('snapshot')
                if snap == mem_dict['snapshot']:
                    logging.info("get memory snapshot type same as"
                                 " set in diskspec")
                else:
                    raise error.TestFail("Get wrong memory snapshot"
                                         " type on print xml")

            memfile = xtf.find('memory').get('file')
            if memfile == mem_dict['file']:
                logging.info("get memory file same as set in "
                             "diskspec")
            else:
                raise error.TestFail("Get wrong memory file on "
                                     "print xml %s", memfile)

        if options.find("print-xml") < 0:
            if os.path.isfile(mem_dict['file']):
                logging.info("memory file generated")
                os.remove(mem_dict['file'])
            else:
                raise error.TestFail("Fail to generate memory file"
                                     " %s", mem_dict['file'])
示例#14
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
示例#15
0
def run_virsh_snapshot_dumpxml(test, params, env):
    """
    Test snapshot-dumpxml command, make sure that the xml you get is correct

    Test scenaries:
    1. live snapshot dump
    2. shutoff snapshot dump
    3. dumpxml with security info
    4. readonly mode
    """

    if not virsh.has_help_command('snapshot-dumpxml'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-dumpxml test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    passwd = params.get("snapshot_passwd")
    secu_opt = params.get("snapshot_secure_option")
    desc_opt = params.get("snapshot_desc_option")
    mem_opt = params.get("snapshot_mem_option")
    disk_opt = params.get("disk_only_snap")
    snap_name = params.get("snapshot_name", "snap_test")
    readonly = params.get("readonly", False)

    try:
        snap_opt = ""
        opt_dict = {}
        # collect all the parameters at one time
        opt_name = locals()
        for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]:
            if opt_name[opt] is not None:
                # Integrate snapshot create options
                snap_opt = snap_opt + " " + opt_name[opt]

        # Do xml backup for final recovery
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Add passwd in guest graphics
        if passwd is not None:
            vm = env.get_vm(vm_name)
            if vm.is_alive():
                vm.destroy()
            vm_xml.VMXML.add_security_info(
                         vm_xml.VMXML.new_from_dumpxml(vm_name), passwd)
            vm.start()
            if secu_opt is not None:
                opt_dict['passwd'] = passwd

        logging.debug("snapshot create options are %s", snap_opt)

        # Get state to do snapshot xml state check
        dom_state = virsh.domstate(vm_name).stdout.strip()

        # Create disk snapshot before all to make the origin image clean
        virsh.snapshot_create_as(vm_name, "--disk-only")

        # Create snapshot with options
        snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt,
                                                   readonly=readonly)
        if snapshot_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to create snapshot. Error:%s."
                                     % snapshot_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Create snapshot failed as expected, Error:%s.",
                             snapshot_result.stderr.strip())
                return

        ctime = get_snap_createtime(vm_name, snap_name)

        # Run virsh command for snapshot-dumpxml
        dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt)
        if dumpxml_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to dump snapshot xml. Error:%s."
                                     % dumpxml_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Dumpxml snapshot failed as expected, Error:%s.",
                             dumpxml_result.stderr.strip())
                return

        # Record all the parameters in dict at one time
        check_name = locals()
        for var in ["vm_name", "snap_name", "desc_opt", "dom_state", "ctime",
                    "disk_opt"]:
            if check_name[var] is not None:
                opt_dict[var] = check_name[var]

        logging.debug("opt_dict is %s", opt_dict)
        output = dumpxml_result.stdout.strip()
        snapshot_dumpxml_check(output, opt_dict)

    finally:
        # Recovery
        utils_test.libvirt.clean_up_snapshots(vm_name)
        vmxml_backup.sync("--snapshots-metadata")
示例#16
0
def run_virsh_snapshot_dumpxml(test, params, env):
    """
    Test snapshot-dumpxml command, make sure that the xml you get is correct

    Test scenaries:
    1. live snapshot dump
    2. shutoff snapshot dump
    3. dumpxml with security info
    4. readonly mode
    """

    if not virsh.has_help_command('snapshot-dumpxml'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-dumpxml test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    passwd = params.get("snapshot_passwd")
    secu_opt = params.get("snapshot_secure_option")
    desc_opt = params.get("snapshot_desc_option")
    mem_opt = params.get("snapshot_mem_option")
    disk_opt = params.get("disk_only_snap")
    snap_name = params.get("snapshot_name", "snap_test")
    readonly = params.get("readonly", False)

    try:
        snap_opt = ""
        opt_dict = {}
        # collect all the parameters at one time
        opt_name = locals()
        for opt in ["snap_name", "desc_opt", "mem_opt", "disk_opt"]:
            if opt_name[opt] is not None:
                # Integrate snapshot create options
                snap_opt = snap_opt + " " + opt_name[opt]

        # Do xml backup for final recovery
        vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
        # Add passwd in guest graphics
        if passwd is not None:
            vm = env.get_vm(vm_name)
            if vm.is_alive():
                vm.destroy()
            vm_xml.VMXML.add_security_info(
                vm_xml.VMXML.new_from_dumpxml(vm_name), passwd)
            vm.start()
            if secu_opt is not None:
                opt_dict['passwd'] = passwd

        logging.debug("snapshot create options are %s", snap_opt)

        # Get state to do snapshot xml state check
        dom_state = virsh.domstate(vm_name).stdout.strip()

        # Create disk snapshot before all to make the origin image clean
        virsh.snapshot_create_as(vm_name, "--disk-only")

        # Create snapshot with options
        snapshot_result = virsh.snapshot_create_as(vm_name,
                                                   snap_opt,
                                                   readonly=readonly)
        if snapshot_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to create snapshot. Error:%s." %
                                     snapshot_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Create snapshot failed as expected, Error:%s.",
                             snapshot_result.stderr.strip())
                return

        ctime = get_snap_createtime(vm_name, snap_name)

        # Run virsh command for snapshot-dumpxml
        dumpxml_result = virsh.snapshot_dumpxml(vm_name, snap_name, secu_opt)
        if dumpxml_result.exit_status:
            if status_error == "no":
                raise error.TestFail("Failed to dump snapshot xml. Error:%s." %
                                     dumpxml_result.stderr.strip())
            elif status_error == "yes":
                logging.info("Dumpxml snapshot failed as expected, Error:%s.",
                             dumpxml_result.stderr.strip())
                return

        # Record all the parameters in dict at one time
        check_name = locals()
        for var in [
                "vm_name", "snap_name", "desc_opt", "dom_state", "ctime",
                "disk_opt"
        ]:
            if check_name[var] is not None:
                opt_dict[var] = check_name[var]

        logging.debug("opt_dict is %s", opt_dict)
        output = dumpxml_result.stdout.strip()
        snapshot_dumpxml_check(output, opt_dict)

    finally:
        # Recovery
        utils_test.libvirt.clean_up_snapshots(vm_name)
        vmxml_backup.sync("--snapshots-metadata")
示例#17
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                               is_login=False,
                                                               image_size='1G',
                                                               chap_user=chap_user,
                                                               chap_passwd=chap_passwd,
                                                               portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocol,
                               'source_name': iscsi_target + "/%s" % lun_num,
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'driver_type': 'qcow2',
                               'source_mode': disk_src_mode}
        else:
            test.cancel("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocol,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(data_dir.get_tmp_dir(), snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s"
                            % (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
示例#18
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in" +
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in" +
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s",
                          open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/1",
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': disk_src_pool,
                'source_volume': vol_name,
                'source_mode': disk_src_mode
            }
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
示例#19
0
def run(test, params, env):
    """
    Test command: snapshot-edit
    Test options: --current, --rename, --clone
    """

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    snap_desc = params.get("snapshot_edit_description")
    snap_cur = params.get("snapshot_edit_current", "")
    snap_opt = params.get("snapshot_edit_option", "")
    snap_name = params.get("snapshot_edit_snapname", "")
    snap_newname = params.get("snapshot_edit_newname", "new-snap")
    snap_create_opt1 = params.get("snapshot_create_option1", "")
    snap_create_opt2 = params.get("snapshot_create_option2", "")

    # Do xml backup for final recovery
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    def edit_snap_xml(dom_name, edit_opts, edit_cmd):
        """
        Edit domain snapshot xml

        :param dom_name: name of domain
        :param snap_name: name of snapshot
        :param edit_opts: snapshot-edit options
        :param edit_cmd: edit command list in interactive mode
        """

        session = aexpect.ShellSession("sudo -s")
        try:
            logging.debug("snapshot-edit options is: %s" % edit_opts)
            logging.debug("edit cmd is: %s" % edit_cmd)
            session.sendline("virsh snapshot-edit %s %s"
                             % (dom_name, edit_opts))
            for i in edit_cmd:
                session.sendline(i)
            # Press ESC
            session.send('\x1b')
            # Save and quit
            session.send('ZZ')
            # use sleep(1) to make sure the modify has been completed.
            remote.handle_prompts(session, None, None, r"[\#\$]\s*$")
            session.close()
            logging.info("Succeed to do snapshot edit")
        except (aexpect.ShellError, aexpect.ExpectError) as details:
            log = session.get_output()
            session.close()
            test.fail("Failed to do snapshot-edit: %s\n%s"
                      % (details, log))

    def snap_xml_compare(pre_xml, after_xml):
        """
        Do xml compare when snapshot-edit have --clone or --name option

        :param pre_xml: snapshot xml before edit
        :param after_xml: snapshot xml after edit
        """

        desc_sec = "<description>%s</description>" % snap_desc
        name_sec = "<name>%s</name>" % snap_newname
        name_pat = "<name>\S+</name>"
        desc_pat = "<description>.*?</description>"
        if re.search(r"%s\s+?%s" % (name_pat, desc_pat), pre_xml):
            pre_xml = re.sub(r"%s\s+?%s" % (name_pat, desc_pat),
                             name_sec + '\n' + desc_sec, pre_xml)
        else:
            pre_xml = re.subn(r"%s" % name_pat, name_sec + '\n' +
                              desc_sec, pre_xml, 1)[0]
        # change to list and remove the description element in list
        pre_xml_list = pre_xml.strip().splitlines()
        after_xml_list = after_xml.strip().splitlines()
        pre_list = [pl.strip() for pl in pre_xml_list]
        after_list = [al.strip() for al in after_xml_list]
        if not snap_desc:
            for i in pre_list:
                if desc_sec in i:
                    pre_list.remove(i)
        if pre_list == after_list:
            logging.info("Succeed to check the xml for description and name")
        else:
            # Print just the differences rather than printing both
            # files and forcing the eyeball comparison between lines
            elems = list(map(None, pre_xml.splitlines(), after_xml.splitlines()))
            for pre_line, aft_line in elems:
                if pre_line.lstrip().strip() != aft_line.lstrip().strip():
                    if pre_line is not None:
                        logging.debug("diff before='%s'",
                                      pre_line.lstrip().strip())
                    if aft_line is not None:
                        logging.debug("diff  after='%s'",
                                      aft_line.lstrip().strip())
            test.fail("Failed xml before/after comparison")

    snapshot_oldlist = None
    try:
        # Create disk snapshot before all to make the origin image clean
        logging.debug("Create snap-temp --disk-only")
        ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only")
        if ret.exit_status != 0:
            test.fail("Fail to create temp snap, Error: %s"
                      % ret.stderr.strip())

        # Create snapshots
        for opt in [snap_create_opt1, snap_create_opt2]:
            logging.debug("...use option %s", opt)
            result = virsh.snapshot_create_as(vm_name, opt)
            if result.exit_status:
                test.fail("Failed to create snapshot. Error:%s."
                          % result.stderr.strip())
            time.sleep(1)

        snapshot_oldlist = virsh.snapshot_list(vm_name)

        # Get the snapshot xml before edit
        if len(snap_name) > 0:
            pre_name = check_name = snap_name
        else:
            cmd_result = virsh.snapshot_current(vm_name)
            pre_name = check_name = cmd_result.stdout.strip()

        ret = virsh.snapshot_dumpxml(vm_name, pre_name)
        if ret.exit_status == 0:
            pre_xml = ret.stdout.strip()
        else:
            test.fail("Fail to dumpxml of snapshot %s:%s" %
                      (pre_name, ret.stderr.strip()))

        edit_cmd = []
        replace_cmd = '%s<\/name>/%s<\/name>' % (pre_name, pre_name)
        replace_cmd += '\\r<description>%s<\/description>' % snap_desc
        replace_cmd = ":%s/" + replace_cmd + "/"
        edit_cmd.append(replace_cmd)
        # if have --clone or --rename, need to change snapshot name in xml
        if len(snap_opt) > 0:
            edit_cmd.append(":2")
            edit_cmd.append(":s/<name>.*</<name>" + snap_newname + "<")
            check_name = snap_newname
        edit_opts = " " + snap_name + " " + snap_cur + " " + snap_opt

        # Do snapshot edit
        if status_error == "yes":
            output = virsh.snapshot_edit(vm_name, edit_opts)
            if output.exit_status == 0:
                test.fail("Succeed to do the snapshot-edit but"
                          " expect fail")
            else:
                logging.info("Fail to do snapshot-edit as expect: %s",
                             output.stderr.strip())
                return

        edit_snap_xml(vm_name, edit_opts, edit_cmd)

        # Do edit check
        snapshots = virsh.snapshot_list(vm_name)
        after_xml = virsh.snapshot_dumpxml(vm_name, check_name).stdout
        match_str = "<description>" + snap_desc + "</description>"
        if not re.search(match_str, after_xml.strip("\n")):
            if snap_desc:
                logging.debug("Failed to edit snapshot edit_opts=%s, match=%s",
                              edit_opts, match_str)
                # Only print first 15 lines - they are most relevant
                for i in range(15):
                    logging.debug("before xml=%s", pre_xml.split()[i].lstrip())
                    logging.debug(" after xml=%s",
                                  after_xml.split()[i].lstrip())
                test.fail("Failed to edit snapshot description")

        # Check edit options --clone
        if snap_opt == "--clone":
            if pre_name not in snapshots:
                test.fail("After clone, previous snapshot missing")
            snap_xml_compare(pre_xml, after_xml)

        if snap_opt == "--rename":
            if pre_name in snapshots:
                test.fail("After rename, snapshot %s still exist"
                          % pre_name)
            snap_xml_compare(pre_xml, after_xml)

        # Check if --current effect take effect
        if len(snap_cur) > 0 and len(snap_name) > 0:
            cmd_result = virsh.snapshot_current(vm_name)
            snap_cur = cmd_result.stdout.strip()
            if snap_cur == check_name:
                logging.info("Check current is same as set %s", check_name)
            else:
                test.fail("Fail to check --current, current is %s "
                          "but set is %s" % (snap_cur, check_name))

    finally:
        utils_test.libvirt.clean_up_snapshots(vm_name, snapshot_oldlist)
        vmxml_backup.sync("--snapshots-metadata")
示例#20
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            raise error.TestNAError("'iscsi' disk doesn't support in"
                                    " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s",
                          open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
            is_setup=True,
            is_login=False,
            image_size='1G',
            chap_user=chap_user,
            chap_passwd=chap_passwd,
            portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                raise error.TestError("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            process.run('qemu-img create -f qcow2 %s %s' % (vol_path, '100M'),
                        shell=True)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            disk_params_src = {
                'source_pool': disk_src_pool,
                'source_volume': vol_name,
                'driver_type': 'qcow2',
                'source_mode': disk_src_mode
            }
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        disk_xml_f = open(disk_xml)
        disk_xml_content = disk_xml_f.read()
        disk_xml_f.close()
        logging.debug("Attach disk by XML: %s", disk_xml_content)
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots list for %s" %
                               vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                error.TestFail("Failed getting snapshots info for %s" %
                               vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            snapshot_file = os.path.join(test.tmpdir, snapshot_name2)
            sn_create_op = ("%s --disk-only --diskspec %s,file=%s" %
                            (snapshot_name2, disk_target, snapshot_file))
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                raise error.TestError("Snapshot %s not found" % snapshot_name2)

        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
示例#21
0
def check_snapslist(test, vm_name, options, option_dict, output, snaps_before,
                    snaps_list):
    no_metadata = options.find("--no-metadata")
    fdisks = "disks"

    # command with print-xml will not really create snapshot
    if options.find("print-xml") >= 0:
        xtf = xml_utils.XMLTreeFile(output)

        # With --print-xml there isn't new snapshot created
        if len(snaps_before) != len(snaps_list):
            test.fail("--print-xml create new snapshot")

    else:
        # The following does not check with print-xml
        get_sname = output.split()[2]

        # check domain/snapshot xml depends on if have metadata
        if no_metadata < 0:
            output_dump = virsh.snapshot_dumpxml(vm_name,
                                                 get_sname).stdout.strip()
        else:
            output_dump = virsh.dumpxml(vm_name).stdout.strip()
            fdisks = "devices"

        xtf = xml_utils.XMLTreeFile(output_dump)

        find = 0
        for snap in snaps_list:
            if snap == get_sname:
                find = 1
                break

        # Should find snap in snaplist without --no-metadata
        if (find == 0 and no_metadata < 0):
            test.fail("Can not find snapshot %s!" % get_sname)
        # Should not find snap in list without metadata
        elif (find == 1 and no_metadata >= 0):
            test.fail("Can find snapshot metadata even "
                      "if have --no-metadata")
        elif (find == 0 and no_metadata >= 0):
            logging.info("Can not find snapshot %s as no-metadata "
                         "is given" % get_sname)

            # Check snapshot only in qemu-img
            if (options.find("--disk-only") < 0
                    and options.find("--memspec") < 0):
                ret = check_snap_in_image(vm_name, get_sname)

                if ret is False:
                    test.fail("No snap info in image")

        else:
            logging.info("Find snapshot %s in snapshot list." % get_sname)

        # Check if the disk file exist when disk-only is given
        if options.find("disk-only") >= 0:
            for disk in xtf.find(fdisks).findall('disk'):
                if disk.get('snapshot') == 'no':
                    continue
                diskpath = disk.find('source').get('file')
                if os.path.isfile(diskpath):
                    logging.info("disk file %s exist" % diskpath)
                    os.remove(diskpath)
                else:
                    # Didn't find <source file="path to disk"/>
                    # in output - this could leave a file around
                    # wherever the main OS image file is found
                    logging.debug("output_dump=%s", output_dump)
                    test.fail("Can not find disk %s" % diskpath)

        # Check if the guest is halted when 'halt' is given
        if options.find("halt") >= 0:
            domstate = virsh.domstate(vm_name)
            if re.match("shut off", domstate.stdout):
                logging.info("Domain is halted after create " "snapshot")
            else:
                test.fail("Domain is not halted after " "snapshot created")

    # Check the snapshot xml regardless of having print-xml or not
    if (options.find("name") >= 0 and no_metadata < 0):
        if xtf.findtext('name') == option_dict["name"]:
            logging.info("get snapshot name same as set")
        else:
            test.fail("Get wrong snapshot name %s" % xtf.findtext('name'))

    if (options.find("description") >= 0 and no_metadata < 0):
        desc = xtf.findtext('description')
        if desc == option_dict["description"]:
            logging.info("get snapshot description same as set")
        else:
            test.fail("Get wrong description on xml")

    if options.find("diskspec") >= 0:
        if isinstance(option_dict['diskspec'], list):
            index = len(option_dict['diskspec'])
        else:
            index = 1

        disks = xtf.find(fdisks).findall('disk')

        for num in range(index):
            if isinstance(option_dict['diskspec'], list):
                option_disk = option_dict['diskspec'][num]
            else:
                option_disk = option_dict['diskspec']

            option_disk = "name=" + option_disk
            disk_dict = utils_misc.valued_option_dict(option_disk, ",", 0, "=")
            logging.debug("disk_dict is %s", disk_dict)

            # For no metadata snapshot do not check name and
            # snapshot
            if no_metadata < 0:
                dname = disks[num].get('name')
                logging.debug("dname is %s", dname)
                if dname == disk_dict['name']:
                    logging.info("get disk%d name same as set in "
                                 "diskspec", num)
                else:
                    test.fail("Get wrong disk%d name %s" % (num, dname))

                if option_disk.find('snapshot=') >= 0:
                    dsnap = disks[num].get('snapshot')
                    logging.debug("dsnap is %s", dsnap)
                    if dsnap == disk_dict['snapshot']:
                        logging.info(
                            "get disk%d snapshot type same"
                            " as set in diskspec", num)
                    else:
                        test.fail("Get wrong disk%d "
                                  "snapshot type %s" % (num, dsnap))

            if option_disk.find('driver=') >= 0:
                dtype = disks[num].find('driver').get('type')
                if dtype == disk_dict['driver']:
                    logging.info(
                        "get disk%d driver type same as "
                        "set in diskspec", num)
                else:
                    test.fail("Get wrong disk%d driver "
                              "type %s" % (num, dtype))

            if option_disk.find('file=') >= 0:
                sfile = disks[num].find('source').get('file')
                if sfile == disk_dict['file']:
                    logging.info(
                        "get disk%d source file same as "
                        "set in diskspec", num)
                    if os.path.exists(sfile):
                        os.unlink(sfile)
                else:
                    test.fail("Get wrong disk%d source "
                              "file %s" % (num, sfile))

    # For memspec check if the xml is same as setting
    # Also check if the mem file exists
    if options.find("memspec") >= 0:
        memspec = option_dict['memspec']
        if not re.search('file=', option_dict['memspec']):
            memspec = 'file=' + option_dict['memspec']

        mem_dict = utils_misc.valued_option_dict(memspec, ",", 0, "=")
        logging.debug("mem_dict is %s", mem_dict)

        if no_metadata < 0:
            if memspec.find('snapshot=') >= 0:
                snap = xtf.find('memory').get('snapshot')
                if snap == mem_dict['snapshot']:
                    logging.info("get memory snapshot type same as"
                                 " set in diskspec")
                else:
                    test.fail("Get wrong memory snapshot" " type on print xml")

            memfile = xtf.find('memory').get('file')
            if memfile == mem_dict['file']:
                logging.info("get memory file same as set in " "diskspec")
            else:
                test.fail("Get wrong memory file on " "print xml %s", memfile)

        if options.find("print-xml") < 0:
            if os.path.isfile(mem_dict['file']):
                logging.info("memory file generated")
                os.remove(mem_dict['file'])
            else:
                test.fail("Fail to generate memory file"
                          " %s", mem_dict['file'])
示例#22
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocal = params.get("disk_source_protocal", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")

    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            raise error.TestNAError("'volume' type disk doesn't support in"
                                    + " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocal
            secret_xml.target = secret_usage_target
            logging.debug("Define secret by XML: %s", open(secret_xml.xml).read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                raise error.TestError("Fail to get new created secret uuid")

            # Set secret value
            secret_string = base64.b64encode(chap_passwd)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        iscsi_target = libvirt.setup_or_cleanup_iscsi(is_setup=True,
                                                      is_login=False,
                                                      chap_user=chap_user,
                                                      chap_passwd=chap_passwd)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.hostname = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_host
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = "/dev/disk/by-path"
            # Create iscsi pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get volume name
            cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            try:
                vol_name = re.findall(r"(\S+)\ +(\S+)[\ +\n]",
                                      str(cmd_result.stdout))[1][0]
            except IndexError:
                raise error.TestError("Fail to get volume name")

        # Create iscsi network disk XML
        disk_params = {'device_type': disk_device,
                       'type_name': disk_type,
                       'target_dev': disk_target,
                       'target_bus': disk_target_bus,
                       'readonly': disk_readonly}
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {'source_protocol': disk_src_protocal,
                               'source_name': iscsi_target + "/1",
                               'source_host_name': disk_src_host,
                               'source_host_port': disk_src_port}
        elif disk_type == "volume":
            disk_params_src = {'source_pool': disk_src_pool,
                               'source_volume': vol_name,
                               'source_mode': disk_src_mode}
        else:
            error.TestNAError("Unsupport disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth:
            disk_params_auth = {'auth_user': chap_user,
                                'secret_type': disk_src_protocal,
                                'secret_usage': secret_xml.target}
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)

        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
        else:
            if not vm.is_dead():
                vm.destroy()
        attach_option = params.get("attach_option", "")
        # Attach the iscsi network disk to domain
        logging.debug("Attach disk by XML: %s", open(disk_xml).read())
        cmd_result = virsh.attach_device(domainarg=vm_name, filearg=disk_xml,
                                         flagstrs=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            vm.start()
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(test.tmpdir, "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_info(vm_name, snapshot_name1,
                                             **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            sn_create_op = "%s --disk_ony %s" % (snapshot_name2, disk_target)
            cmd_result = virsh.snapshot_create_as(vm_name, sn_create_op,
                                                  **virsh_dargs)

            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_delete(vm_name, snapshot_name2,
                                               **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            pass
        else:
            logging.error("Unsupport operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                raise error.TestError("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError), e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                raise error.TestError("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name, disk_target)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)
示例#23
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
    if option.count("managedsave") and vm.is_alive():
        virsh.managedsave(vm_name)

    snp_list = virsh.snapshot_list(vm_name)
    if option.count("snapshot"):
        snp_file_list = []
        if not len(snp_list):
            virsh.snapshot_create(vm_name)
            logging.debug("Create a snapshot for test!")
        else:
            # Backup snapshots for domain
            for snp_item in snp_list:
                tmp_file = os.path.join(test.tmpdir, snp_item+".xml")
                virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                snp_file_list.append(tmp_file)
    else:
        if len(snp_list):
            raise error.TestNAError("This domain has snapshot(s), "
                                    "cannot be undefined!")

    # Turn libvirtd into certain state.
    if libvirtd_state == "off":
        utils_libvirtd.libvirtd_stop()

    # Test virsh undefine command.
    if vm_ref != "remote":
        vm_ref = "%s %s" % (vm_ref, extra)
        cmdresult = virsh.undefine(vm_ref, option,
                                   ignore_status=True, debug=True)
        status = cmdresult.exit_status
        if status:
            logging.debug("Error status, command output: %s", cmdresult.stdout)
        if undefine_twice:
            status2 = virsh.undefine(vm_ref,
                                     ignore_status=True).exit_status
    else:
        if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
            raise error.TestNAError("remote_ip and/or local_ip parameters not"
                                    " changed from default values")
        try:
            uri = libvirt_vm.complete_uri(local_ip)
            session = remote.remote_login("ssh", remote_ip, "22", remote_user,
                                          remote_pwd, remote_prompt)
            cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
            status, output = session.cmd_status_output(cmd_undefine)
            logging.info("Undefine output: %s", output)
        except (error.CmdError, remote.LoginError, aexpect.ShellError), detail:
            logging.error("Detail: %s", detail)
            status = 1
示例#24
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"

    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            raise error.TestNAError("API acl test not supported in current"
                                    " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(test.tmpdir, snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    raise error.TestNAError("This domain has snapshot(s), "
                                            "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                raise error.TestFail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                raise error.TestNAError("remote_ip and/or local_ip parameters"
                                        " not changed from default values")
            try:
                uri = libvirt_vm.complete_uri(local_ip)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (error.CmdError, remote.LoginError, aexpect.ShellError), de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except error.CmdError, detail:
                logging.error("Detail: %s", detail)
示例#25
0
def run(test, params, env):
    """
    Attach/Detach an iscsi network/volume disk to domain

    1. For secret usage testing:
        1.1. Setup an iscsi target with CHAP authentication.
        1.2. Define a secret for iscsi target usage
        1.3. Set secret value
    2. Create
    4. Create an iscsi network disk XML
    5. Attach disk with the XML file and check the disk inside the VM
    6. Detach the disk
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    disk_device = params.get("disk_device", "disk")
    disk_type = params.get("disk_type", "network")
    disk_src_protocol = params.get("disk_source_protocol", "iscsi")
    disk_src_host = params.get("disk_source_host", "127.0.0.1")
    disk_src_port = params.get("disk_source_port", "3260")
    disk_src_pool = params.get("disk_source_pool")
    disk_src_mode = params.get("disk_source_mode", "host")
    pool_type = params.get("pool_type", "iscsi")
    pool_src_host = params.get("pool_source_host", "127.0.0.1")
    pool_target = params.get("pool_target", "/dev/disk/by-path")
    disk_target = params.get("disk_target", "vdb")
    disk_target_bus = params.get("disk_target_bus", "virtio")
    disk_readonly = params.get("disk_readonly", "no")
    chap_auth = "yes" == params.get("chap_auth", "no")
    chap_user = params.get("chap_username", "")
    chap_passwd = params.get("chap_password", "")
    secret_usage_target = params.get("secret_usage_target")
    secret_ephemeral = params.get("secret_ephemeral", "no")
    secret_private = params.get("secret_private", "yes")
    status_error = "yes" == params.get("status_error", "no")
    vg_name = params.get("virt_disk_vg_name", "vg_test_0")
    lv_name = params.get("virt_disk_lv_name", "lv_test_0")
    driver_packed = params.get("driver_packed", "on")
    disk_packed = "yes" == params.get("disk_packed", "no")
    scsi_packed = "yes" == params.get("scsi_packed", "no")

    # Indicate the PPC platform
    on_ppc = False
    if platform.platform().count('ppc64'):
        on_ppc = True

    if disk_src_protocol == 'iscsi':
        if not libvirt_version.version_compare(1, 0, 4):
            test.cancel("'iscsi' disk doesn't support in"
                        " current libvirt version.")
    if disk_type == "volume":
        if not libvirt_version.version_compare(1, 0, 5):
            test.cancel("'volume' type disk doesn't support in"
                        " current libvirt version.")
    if pool_type == "iscsi-direct":
        if not libvirt_version.version_compare(4, 7, 0):
            test.cancel("iscsi-direct pool is not supported in"
                        " current libvirt version.")
    if ((disk_packed or scsi_packed)
            and not libvirt_version.version_compare(6, 3, 0)):
        test.cancel("The virtio packed attribute is not supported in"
                    " current libvirt version.")
    # Back VM XML
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Fix no more PCI slots issue in certain cases.
    vm_dump_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    machine_type = params.get("machine_type", "pc")
    if machine_type == 'q35':
        vm_dump_xml.remove_all_device_by_type('controller')
        machine_list = vm_dump_xml.os.machine.split("-")
        vm_dump_xml.set_os_attrs(
            **{"machine": machine_list[0] + "-q35-" + machine_list[2]})
        q35_pcie_dict0 = {
            'controller_model': 'pcie-root',
            'controller_type': 'pci',
            'controller_index': 0
        }
        q35_pcie_dict1 = {
            'controller_model': 'pcie-root-port',
            'controller_type': 'pci'
        }
        vm_dump_xml.add_device(libvirt.create_controller_xml(q35_pcie_dict0))
        # Add enough controllers to match multiple times disk attaching requirements
        for i in list(range(1, 12)):
            q35_pcie_dict1.update({'controller_index': "%d" % i})
            vm_dump_xml.add_device(
                libvirt.create_controller_xml(q35_pcie_dict1))
        vm_dump_xml.sync()

    virsh_dargs = {'debug': True, 'ignore_status': True}
    try:
        start_vm = "yes" == params.get("start_vm", "yes")
        if start_vm:
            if vm.is_dead():
                vm.start()
            vm.wait_for_login()
        else:
            if not vm.is_dead():
                vm.destroy()

        if chap_auth:
            # Create a secret xml to define it
            secret_xml = SecretXML(secret_ephemeral, secret_private)
            secret_xml.auth_type = "chap"
            secret_xml.auth_username = chap_user
            secret_xml.usage = disk_src_protocol
            secret_xml.target = secret_usage_target
            with open(secret_xml.xml) as f:
                logging.debug("Define secret by XML: %s", f.read())
            # Define secret
            cmd_result = virsh.secret_define(secret_xml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            # Get secret uuid
            try:
                secret_uuid = cmd_result.stdout.strip().split()[1]
            except IndexError:
                test.error("Fail to get new created secret uuid")

            # Set secret value
            encoding = locale.getpreferredencoding()
            secret_string = base64.b64encode(
                chap_passwd.encode(encoding)).decode(encoding)
            cmd_result = virsh.secret_set_value(secret_uuid, secret_string,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
        else:
            # Set chap_user and chap_passwd to empty to avoid setup
            # CHAP authentication when export iscsi target
            chap_user = ""
            chap_passwd = ""

        # Setup iscsi target
        if disk_type == "block":
            iscsi_target = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=True,
                image_size="1G",
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        else:
            iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                is_setup=True,
                is_login=False,
                image_size='1G',
                chap_user=chap_user,
                chap_passwd=chap_passwd,
                portal_ip=disk_src_host)
        # Create iscsi pool
        if disk_type == "volume":
            # Create an iscsi pool xml to create it
            pool_src_xml = pool_xml.SourceXML()
            pool_src_xml.host_name = pool_src_host
            pool_src_xml.device_path = iscsi_target
            poolxml = pool_xml.PoolXML(pool_type=pool_type)
            poolxml.name = disk_src_pool
            poolxml.set_source(pool_src_xml)
            poolxml.target_path = pool_target
            if chap_auth:
                pool_src_xml.auth_type = "chap"
                pool_src_xml.auth_username = chap_user
                pool_src_xml.secret_usage = secret_usage_target
                poolxml.set_source(pool_src_xml)
            if pool_type == "iscsi-direct":
                iscsi_initiator = params.get('iscsi_initiator')
                pool_src_xml.iqn_name = iscsi_initiator
                poolxml.set_source(pool_src_xml)
            # Create iscsi/iscsi-direct pool
            cmd_result = virsh.pool_create(poolxml.xml, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            xml = virsh.pool_dumpxml(disk_src_pool)
            logging.debug("Pool '%s' XML:\n%s", disk_src_pool, xml)

            def get_vol():
                """Get the volume info"""
                # Refresh the pool
                cmd_result = virsh.pool_refresh(disk_src_pool)
                libvirt.check_exit_status(cmd_result)
                # Get volume name
                cmd_result = virsh.vol_list(disk_src_pool, **virsh_dargs)
                libvirt.check_exit_status(cmd_result)
                vol_list = []
                vol_list = re.findall(r"(\S+)\ +(\S+)",
                                      str(cmd_result.stdout.strip()))
                if len(vol_list) > 1:
                    return vol_list[1]
                else:
                    return None

            # Wait for a while so that we can get the volume info
            vol_info = utils_misc.wait_for(get_vol, 10)
            if vol_info:
                vol_name, vol_path = vol_info
            else:
                test.error("Failed to get volume info")
            # Snapshot doesn't support raw disk format, create a qcow2 volume
            # disk for snapshot operation.
            if pool_type == "iscsi":
                process.run('qemu-img create -f qcow2 %s %s' %
                            (vol_path, '100M'),
                            shell=True,
                            verbose=True)
            else:
                # Get iscsi URL to create a qcow2 volume disk
                disk_path = ("iscsi://[%s]/%s/%s" %
                             (disk_src_host, iscsi_target, lun_num))
                blk_source = "/mnt/test.qcow2"
                process.run('qemu-img create -f qcow2 %s %s' %
                            (blk_source, '100M'),
                            shell=True,
                            verbose=True)
                process.run('qemu-img convert -O qcow2 %s %s' %
                            (blk_source, disk_path),
                            shell=True,
                            verbose=True)

        # Create block device
        if disk_type == "block":
            logging.debug("iscsi dev name: %s", iscsi_target)
            lv_utils.vg_create(vg_name, iscsi_target)
            device_source = libvirt.create_local_disk("lvm",
                                                      size="10M",
                                                      vgname=vg_name,
                                                      lvname=lv_name)
            logging.debug("New created volume: %s", lv_name)

        # Create iscsi network disk XML
        disk_params = {
            'device_type': disk_device,
            'type_name': disk_type,
            'target_dev': disk_target,
            'target_bus': disk_target_bus,
            'readonly': disk_readonly
        }
        disk_params_src = {}
        if disk_type == "network":
            disk_params_src = {
                'source_protocol': disk_src_protocol,
                'source_name': iscsi_target + "/%s" % lun_num,
                'source_host_name': disk_src_host,
                'source_host_port': disk_src_port
            }
        elif disk_type == "volume":
            if pool_type == "iscsi":
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2',
                    'source_mode': disk_src_mode
                }
            # iscsi-direct pool don't include source_mode option
            else:
                disk_params_src = {
                    'source_pool': disk_src_pool,
                    'source_volume': vol_name,
                    'driver_type': 'qcow2'
                }
        elif disk_type == "block":
            disk_params_src = {
                'source_file': device_source,
                'driver_type': 'raw'
            }
            # Start guest with packed attribute in disk
            if disk_packed:
                disk_params_src['driver_packed'] = driver_packed
            # Start guest with packed attribute in scsi controller
            if scsi_packed:
                scsi_controller = Controller("controller")
                scsi_controller.type = "scsi"
                scsi_controller.model = "virtio-scsi"
                scsi_controller.driver = {'packed': driver_packed}
                vm_dump_xml.add_device(scsi_controller)
                vm_dump_xml.sync()
        else:
            test.cancel("Unsupported disk type in this test")
        disk_params.update(disk_params_src)
        if chap_auth and disk_type != "volume":
            disk_params_auth = {
                'auth_user': chap_user,
                'secret_type': disk_src_protocol,
                'secret_usage': secret_xml.target
            }
            disk_params.update(disk_params_auth)
        disk_xml = libvirt.create_disk_xml(disk_params)
        attach_option = params.get("attach_option", "")
        cmd_result = virsh.attach_device(domainarg=vm_name,
                                         filearg=disk_xml,
                                         flagstr=attach_option,
                                         dargs=virsh_dargs)
        libvirt.check_exit_status(cmd_result, status_error)

        if vm.is_dead():
            cmd_result = virsh.start(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

        # Wait for domain is stable
        vm.wait_for_login().close()
        domain_operation = params.get("domain_operation", "")
        if domain_operation == "save":
            save_file = os.path.join(data_dir.get_tmp_dir(), "vm.save")
            cmd_result = virsh.save(vm_name, save_file, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)
            cmd_result = virsh.restore(save_file)
            libvirt.check_exit_status(cmd_result)
            if os.path.exists(save_file):
                os.remove(save_file)
        elif domain_operation == "snapshot":
            # Run snapshot related commands: snapshot-create-as, snapshot-list
            # snapshot-info, snapshot-dumpxml, snapshot-create
            # virsh snapshot-revert is not supported on combined internal and external snapshots
            # see more details from,https://bugzilla.redhat.com/show_bug.cgi?id=1733173
            snapshot_name1 = "snap1"
            snapshot_name2 = "snap2"
            cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name1,
                                                  **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            try:
                virsh.snapshot_list(vm_name, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots list for %s" % vm_name)

            try:
                virsh.snapshot_info(vm_name, snapshot_name1, **virsh_dargs)
            except process.CmdError:
                test.fail("Failed getting snapshots info for %s" % vm_name)

            cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name1,
                                                **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_create(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            cmd_result = virsh.snapshot_current(vm_name, **virsh_dargs)
            libvirt.check_exit_status(cmd_result)

            virsh.snapshot_create_as(vm_name,
                                     snapshot_name2,
                                     ignore_status=False,
                                     debug=True)

            cmd_result = virsh.snapshot_revert(vm_name, snapshot_name1,
                                               **virsh_dargs)

            cmd_result = virsh.snapshot_list(vm_name, **virsh_dargs)
            if snapshot_name2 not in cmd_result:
                test.error("Snapshot %s not found" % snapshot_name2)
        elif domain_operation == "start_with_packed":
            expect_xml_line = "packed=\"%s\"" % driver_packed
            libvirt.check_dumpxml(vm, expect_xml_line)
            expect_qemu_line = "packed=%s" % driver_packed
            libvirt.check_qemu_cmd_line(expect_qemu_line)
        elif domain_operation == "":
            logging.debug("No domain operation provided, so skip it")
        else:
            logging.error("Unsupported operation %s in this case, so skip it",
                          domain_operation)

        def find_attach_disk(expect=True):
            """
            Find attached disk inside the VM
            """
            found_disk = False
            if vm.is_dead():
                test.error("Domain %s is not running" % vm_name)
            else:
                try:
                    session = vm.wait_for_login()
                    # Here the script needs wait for a while for the guest to
                    # recognize the hotplugged disk on PPC
                    if on_ppc:
                        time.sleep(10)
                    cmd = "grep %s /proc/partitions" % disk_target
                    s, o = session.cmd_status_output(cmd)
                    logging.info("%s output: %s", cmd, o)
                    session.close()
                    if s == 0:
                        found_disk = True
                except (LoginError, VMError, ShellError) as e:
                    logging.error(str(e))
            if found_disk == expect:
                logging.debug("Check disk inside the VM PASS as expected")
            else:
                test.error("Check disk inside the VM FAIL")

        # Check disk inside the VM, expect is False if status_error=True
        find_attach_disk(not status_error)

        # Detach disk
        cmd_result = virsh.detach_disk(vm_name,
                                       disk_target,
                                       wait_remove_event=True)
        libvirt.check_exit_status(cmd_result, status_error)

        # Check disk inside the VM
        find_attach_disk(False)

    finally:
        # Clean up snapshot
        # Shut down before cleaning up snapshots
        if vm.is_alive():
            vm.destroy()
        libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)
        # Restore vm
        vmxml_backup.sync("--snapshots-metadata")
        # Destroy pool and undefine secret, which may not exist
        try:
            if disk_type == "volume":
                virsh.pool_destroy(disk_src_pool)
            if disk_type == "block":
                clean_up_lvm(iscsi_target, vg_name, lv_name)
            if chap_auth:
                virsh.secret_undefine(secret_uuid)
        except Exception:
            pass
        libvirt.setup_or_cleanup_iscsi(is_setup=False)
示例#26
0
            result = virsh.snapshot_create_as(vm_name, opt)
            if result.exit_status:
                raise error.TestFail("Failed to create snapshot. Error:%s." %
                                     result.stderr.strip())
            time.sleep(1)

        snapshot_oldlist = virsh.snapshot_list(vm_name)

        # Get the snapshot xml before edit
        if len(snap_name) > 0:
            pre_name = check_name = snap_name
        else:
            cmd_result = virsh.snapshot_current(vm_name)
            pre_name = check_name = cmd_result.stdout.strip()

        ret = virsh.snapshot_dumpxml(vm_name, pre_name)
        if ret.exit_status == 0:
            pre_xml = ret.stdout
        else:
            raise error.TestFail("Fail to dumpxml of snapshot %s:%s" %
                                 (pre_name, ret.stderr.strip()))

        edit_cmd = []
        # Delete description element first then add it back with replacement
        edit_cmd.append(":g/<description>.*</d")
        replace_cmd = '%s<\/name>/%s<\/name>' % (pre_name, pre_name)
        replace_cmd += '\\r<description>%s<\/description>' % snap_desc
        replace_cmd = ":%s/" + replace_cmd + "/"
        edit_cmd.append(replace_cmd)
        # if have --clone or --rename, need to change snapshot name in xml
        if len(snap_opt) > 0:
def run_virsh_snapshot_create_as(test, params, env):
    """
    Test snapshot-create-as command
    Make sure that the clean repo can be used because qemu-guest-agent need to
    be installed in guest

    The command create a snapshot (disk and RAM) from arguments which including
    the following point
    * virsh snapshot-create-as --print-xml --diskspec --name --description
    * virsh snapshot-create-as --print-xml with multi --diskspec
    * virsh snapshot-create-as --print-xml --memspec
    * virsh snapshot-create-as --description
    * virsh snapshot-create-as --no-metadata
    * virsh snapshot-create-as --no-metadata --print-xml (negtive test)
    * virsh snapshot-create-as --atomic --disk-only
    * virsh snapshot-create-as --quiesce --disk-only (positive and negtive)
    * virsh snapshot-create-as --reuse-external
    * virsh snapshot-create-as --disk-only --diskspec
    * virsh snapshot-create-as --memspec --reuse-external --atomic(negtive)
    * virsh snapshot-create-as --disk-only and --memspec (negtive)
    * Create multi snapshots with snapshot-create-as
    * Create snapshot with name a--a a--a--snap1
    """

    if not virsh.has_help_command('snapshot-create-as'):
        raise error.TestNAError("This version of libvirt does not support "
                                "the snapshot-create-as test")

    vm_name = params.get("main_vm")
    status_error = params.get("status_error", "no")
    options = params.get("snap_createas_opts")
    multi_num = params.get("multi_num", "1")
    diskspec_num = params.get("diskspec_num", "1")
    bad_disk = params.get("bad_disk")
    external_disk = params.get("external_disk")
    start_ga =  params.get("start_ga", "yes")
    domain_state = params.get("domain_state")
    memspec_opts = params.get("memspec_opts")
    diskspec_opts = params.get("diskspec_opts")

    opt_names = locals()
    if memspec_opts is not None:
        mem_options = compose_disk_options(test, params, memspec_opts)
        # if the parameters have the disk without "file=" then we only need to
        # add testdir for it.
        if mem_options is None:
            mem_options = os.path.join(test.virtdir, memspec_opts)
        options += " --memspec " + mem_options

    tag_diskspec = 0
    dnum = int(diskspec_num)
    if diskspec_opts is not None:
        tag_diskspec = 1
        opt_names['diskopts_1'] = diskspec_opts

    # diskspec_opts[n] is used in cfg when more than 1 --diskspec is used
    if dnum > 1:
        tag_diskspec = 1
        for i in range(1, dnum + 1):
            opt_names["diskopts_%s" % i] = params.get("diskspec_opts%s" % i)

    if tag_diskspec == 1:
        for i in range(1, dnum + 1):
            disk_options = compose_disk_options(test, params,
                                                opt_names["diskopts_%s" % i])
            options += " --diskspec " + disk_options

    logging.debug("options are %s", options)

    vm = env.get_vm(vm_name)
    option_dict = {}
    option_dict = utils_misc.valued_option_dict(options, r' --(?!-)')
    logging.debug("option_dict is %s", option_dict)

    # A backup of original vm
    vmxml_backup = vm_xml.VMXML.new_from_dumpxml(vm_name)
    logging.debug("original xml is %s", vmxml_backup)

    # Generate empty image for negtive test
    if bad_disk is not None:
        bad_disk = os.path.join(test.virtdir, bad_disk)
        os.open(bad_disk, os.O_RDWR | os.O_CREAT)

    # Gererate external disk
    if external_disk is not None:
        external_disk = os.path.join(test.virtdir, external_disk)
        commands.getoutput("qemu-img create -f qcow2 %s 1G" % external_disk)

    # Start qemu-ga on guest if have --quiesce
    if options.find("quiesce") >= 0:
        if vm.is_alive():
            vm.destroy()
        virt_xml_obj = libvirt_xml.VMXML(virsh_instance=virsh)
        virt_xml_obj.set_agent_channel(vm_name)
        vm.start()
        if start_ga == "yes":
            session = vm.wait_for_login()

            # Check if qemu-ga already started automatically
            cmd = "rpm -q qemu-guest-agent || yum install -y qemu-guest-agent"
            stat_install = session.cmd_status(cmd, 300)
            if stat_install != 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Fail to install qemu-guest-agent, make"
                                     "sure that you have usable repo in guest")

            # Check if qemu-ga already started
            stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
            if stat_ps != 0:
                session.cmd("qemu-ga -d")
                # Check if the qemu-ga really started
                stat_ps = session.cmd_status("ps aux |grep [q]emu-ga")
                if stat_ps != 0:
                    xml_recover(vmxml_backup)
                    raise error.TestFail("Fail to run qemu-ga in guest")

        if domain_state == "paused":
            virsh.suspend(vm_name)

    # Record the previous snapshot-list
    snaps_before = virsh.snapshot_list(vm_name)

    # Run virsh command
    # May create several snapshots, according to configuration
    for count in range(int(multi_num)):
        cmd_result = virsh.snapshot_create_as(vm_name, options,
                                              ignore_status=True, debug=True)
        output = cmd_result.stdout.strip()
        status = cmd_result.exit_status

        # check status_error
        if status_error == "yes":
            if status == 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Run successfully with wrong command!")
            else:
                # Check memspec file should be removed if failed
                if (options.find("memspec") >= 0
                    and options.find("atomic") >= 0):
                    if os.path.isfile(option_dict['memspec']):
                        os.remove(option_dict['memspec'])
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Run failed but file %s exist"
                                             % option_dict['memspec'])
                    else:
                        logging.info("Run failed as expected and memspec file"
                                     " already beed removed")
                else:
                    logging.info("Run failed as expected")

        elif status_error == "no":
            if status != 0:
                xml_recover(vmxml_backup)
                raise error.TestFail("Run failed with right command: %s"
                                     % output)
            else:
                # Check the special options
                snaps_list = virsh.snapshot_list(vm_name)
                logging.debug("snaps_list is %s", snaps_list)

                no_metadata = options.find("--no-metadata")
                fdisks = "disks"

                # command with print-xml will not really create snapshot
                if options.find("print-xml") >= 0:
                    xtf = xml_utils.XMLTreeFile(output)

                    # With --print-xml there isn't new snapshot created
                    if len(snaps_before) != len(snaps_list):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("--print-xml create new snapshot")

                else:
                    # The following does not check with print-xml
                    get_sname = output.split()[2]

                    # check domain/snapshot xml depends on if have metadata
                    if no_metadata < 0:
                        output_dump = virsh.snapshot_dumpxml(vm_name, get_sname)
                    else:
                        output_dump = virsh.dumpxml(vm_name)
                        fdisks = "devices"

                    xtf = xml_utils.XMLTreeFile(output_dump)

                    find = 0
                    for snap in snaps_list:
                        if snap == get_sname:
                            find = 1
                            break

                    # Should find snap in snaplist without --no-metadata
                    if (find == 0 and no_metadata < 0):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Can not find snapshot %s!"
                                             % get_sname)
                    # Should not find snap in list without metadata
                    elif (find == 1 and no_metadata >= 0):
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Can find snapshot metadata even "
                                             "if have --no-metadata")
                    elif (find == 0 and no_metadata >= 0):
                        logging.info("Can not find snapshot %s as no-metadata "
                                     "is given" % get_sname)

                        # Check snapshot only in qemu-img
                        if (options.find("--disk-only") < 0
                            and options.find("--memspec") < 0):
                            ret = check_snap_in_image(vm_name, get_sname)

                            if ret == False:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("No snap info in image")

                    else:
                        logging.info("Find snapshot %s in snapshot list."
                                     % get_sname)

                    # Check if the disk file exist when disk-only is given
                    if options.find("disk-only") >= 0:
                        for disk in xtf.find(fdisks).findall('disk'):
                            diskpath = disk.find('source').get('file')
                            if os.path.isfile(diskpath):
                                logging.info("disk file %s exist" % diskpath)
                                os.remove(diskpath)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Can not find disk %s"
                                                     % diskpath)

                    # Check if the guest is halted when 'halt' is given
                    if options.find("halt") >= 0:
                        domstate = virsh.domstate(vm_name)
                        if re.match("shut off", domstate.stdout):
                            logging.info("Domain is halted after create "
                                         "snapshot")
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Domain is not halted after "
                                                 "snapshot created")



                # Check the snapshot xml regardless of having print-xml or not
                if (options.find("name") >= 0 and no_metadata < 0):
                    if xtf.findtext('name') == option_dict["name"]:
                        logging.info("get snapshot name same as set")
                    else:
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Get wrong snapshot name %s" %
                                             xtf.findtext('name'))

                if (options.find("description") >= 0 and no_metadata < 0):
                    desc = xtf.findtext('description')
                    if desc == option_dict["description"]:
                        logging.info("get snapshot description same as set")
                    else:
                        xml_recover(vmxml_backup)
                        raise error.TestFail("Get wrong description on xml")

                if options.find("diskspec") >= 0:
                    if isinstance(option_dict['diskspec'], list):
                        index = len(option_dict['diskspec'])
                    else:
                        index = 1

                    disks = xtf.find(fdisks).findall('disk')

                    for num in range(index):
                        if isinstance(option_dict['diskspec'], list):
                            option_disk = option_dict['diskspec'][num]
                        else:
                            option_disk = option_dict['diskspec']

                        option_disk = "name=" + option_disk
                        disk_dict = utils_misc.valued_option_dict(option_disk,
                                                                  ",", 0, "=")
                        logging.debug("disk_dict is %s", disk_dict)

                        # For no metadata snapshot do not check name and
                        # snapshot
                        if no_metadata < 0:
                            dname = disks[num].get('name')
                            logging.debug("dname is %s", dname)
                            if dname == disk_dict['name']:
                                logging.info("get disk%d name same as set in "
                                             "diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d name %s"
                                                      % num, dname)

                            if option_disk.find('snapshot=') >= 0:
                                dsnap = disks[num].get('snapshot')
                                logging.debug("dsnap is %s", dsnap)
                                if dsnap == disk_dict['snapshot']:
                                    logging.info("get disk%d snapshot type same"
                                                 " as set in diskspec", num)
                                else:
                                    xml_recover(vmxml_backup)
                                    raise error.TestFail("Get wrong disk%d "
                                                         "snapshot type %s" %
                                                          num, dsnap)

                        if option_disk.find('driver=') >= 0:
                            dtype = disks[num].find('driver').get('type')
                            if dtype == disk_dict['driver']:
                                logging.info("get disk%d driver type same as "
                                             "set in diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d driver "
                                                     "type %s" % num, dtype)

                        if option_disk.find('file=') >=0:
                            sfile = disks[num].find('source').get('file')
                            if sfile == disk_dict['file']:
                                logging.info("get disk%d source file same as "
                                             "set in diskspec", num)
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong disk%d source "
                                                     "file %s" % num, sfile)


                # For memspec check if the xml is same as setting
                # Also check if the mem file exists
                if options.find("memspec") >= 0:
                    memspec = option_dict['memspec']
                    if re.search('file=', option_dict['memspec']) < 0:
                        memspec = 'file=' + option_dict['memspec']

                    mem_dict = utils_misc.valued_option_dict(memspec, ",", 0,
                                                             "=")
                    logging.debug("mem_dict is %s", mem_dict)

                    if no_metadata < 0:
                        if memspec.find('snapshot=') >= 0:
                            snap = xtf.find('memory').get('snapshot')
                            if snap == mem_dict['snapshot']:
                                logging.info("get memory snapshot type same as"
                                             " set in diskspec")
                            else:
                                xml_recover(vmxml_backup)
                                raise error.TestFail("Get wrong memory snapshot"
                                                     " type on print xml")

                        memfile = xtf.find('memory').get('file')
                        if memfile == mem_dict['file']:
                            logging.info("get memory file same as set in "
                                         "diskspec")
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Get wrong memory file on "
                                                 "print xml %s", memfile)

                    if options.find("print-xml") < 0:
                        if os.path.isfile(mem_dict['file']):
                            logging.info("memory file generated")
                            os.remove(mem_dict['file'])
                        else:
                            xml_recover(vmxml_backup)
                            raise error.TestFail("Fail to generate memory file"
                                                 " %s", mem_dict['file'])

    # Environment clean
    if options.find("quiesce") >= 0 and start_ga == "yes":
        session.cmd("rpm -e qemu-guest-agent")

    # recover domain xml
    xml_recover(vmxml_backup)
    path = "/var/lib/libvirt/qemu/snapshot/" + vm_name
    if os.path.isfile(path):
        raise error.TestFail("Still can find snapshot metadata")

    # rm bad disks
    if bad_disk is not None:
        os.remove(bad_disk)
示例#28
0
    cmd_result = virsh.snapshot_create_as(vm_name, snapshot_name)
    libvirt.check_exit_status(cmd_result)

    def check_info(s1, s2, errorstr="Values differ"):
        if s1 != s2:
            error.TestFail("%s (%s != %s)" % (errorstr, s1, s2))
    try:
        ss_info = virsh.snapshot_info(vm_name, snapshot_name)
        check_info(ss_info["Name"], snapshot_name, "Incorrect snapshot name")
        check_info(ss_info["Domain"], vm_name, "Incorrect domain name")
    except error.CmdError, e:
        error.TestFail(str(e))
    except error.TestFail, e:
        error.TestFail(str(e))

    cmd_result = virsh.snapshot_dumpxml(vm_name, snapshot_name, to_file=file)
    libvirt.check_exit_status(cmd_result)


def network_validate(net_name, file=None, **virsh_dargs):
    """
    Test for schema network
    """
    if net_name is None:
        raise error.TestNAError("None network is specified.")

    # Confirm the network exists.
    output = virsh.net_list("--all").stdout.strip()
    if not re.search(net_name, output):
        raise error.TestNAError("Make sure the network exists!!")
示例#29
0
def run(test, params, env):
    """
    Test virsh undefine command.

    Undefine an inactive domain, or convert persistent to transient.
    1.Prepare test environment.
    2.Backup the VM's information to a xml file.
    3.When the libvirtd == "off", stop the libvirtd service.
    4.Perform virsh undefine operation.
    5.Recover test environment.(libvirts service,VM)
    6.Confirm the test result.
    """

    vm_ref = params.get("undefine_vm_ref", "vm_name")
    extra = params.get("undefine_extra", "")
    option = params.get("undefine_option", "")
    libvirtd_state = params.get("libvirtd", "on")
    status_error = ("yes" == params.get("status_error", "no"))
    undefine_twice = ("yes" == params.get("undefine_twice", 'no'))
    local_ip = params.get("local_ip", "LOCAL.EXAMPLE.COM")
    local_pwd = params.get("local_pwd", "password")
    remote_ip = params.get("remote_ip", "REMOTE.EXAMPLE.COM")
    remote_user = params.get("remote_user", "user")
    remote_pwd = params.get("remote_pwd", "password")
    remote_prompt = params.get("remote_prompt", "#")
    pool_type = params.get("pool_type")
    pool_name = params.get("pool_name", "test")
    pool_target = params.get("pool_target")
    volume_size = params.get("volume_size", "1G")
    vol_name = params.get("vol_name", "test_vol")
    emulated_img = params.get("emulated_img", "emulated_img")
    emulated_size = "%sG" % (int(volume_size[:-1]) + 1)
    disk_target = params.get("disk_target", "vdb")
    wipe_data = "yes" == params.get("wipe_data", "no")
    if wipe_data:
        option += " --wipe-storage"
    nvram_o = None
    if platform.machine() == 'aarch64':
        nvram_o = " --nvram"
        option += nvram_o

    vm_name = params.get("main_vm", "avocado-vt-vm1")
    vm = env.get_vm(vm_name)
    vm_id = vm.get_id()
    vm_uuid = vm.get_uuid()

    # polkit acl related params
    uri = params.get("virsh_uri")
    unprivileged_user = params.get('unprivileged_user')
    if unprivileged_user:
        if unprivileged_user.count('EXAMPLE'):
            unprivileged_user = '******'

    if not libvirt_version.version_compare(1, 1, 1):
        if params.get('setup_libvirt_polkit') == 'yes':
            test.cancel("API acl test not supported in current"
                        " libvirt version.")

    # Back up xml file.Xen host has no guest xml file to define a guset.
    backup_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Confirm how to reference a VM.
    if vm_ref == "vm_name":
        vm_ref = vm_name
    elif vm_ref == "id":
        vm_ref = vm_id
    elif vm_ref == "hex_vm_id":
        vm_ref = hex(int(vm_id))
    elif vm_ref == "uuid":
        vm_ref = vm_uuid
    elif vm_ref.find("invalid") != -1:
        vm_ref = params.get(vm_ref)

    volume = None
    pvtest = None
    status3 = None

    elems = backup_xml.xmltreefile.findall('/devices/disk/source')
    existing_images = [elem.get('file') for elem in elems]

    # Backup images since remove-all-storage could remove existing libvirt
    # managed guest images
    if existing_images and option.count("remove-all-storage"):
        for img in existing_images:
            backup_img = img + '.bak'
            logging.info('Backup %s to %s', img, backup_img)
            shutil.copyfile(img, backup_img)

    try:
        save_file = "/var/lib/libvirt/qemu/save/%s.save" % vm_name
        if option.count("managedsave") and vm.is_alive():
            virsh.managedsave(vm_name)

        if not vm.is_lxc():
            snp_list = virsh.snapshot_list(vm_name)
            if option.count("snapshot"):
                snp_file_list = []
                if not len(snp_list):
                    virsh.snapshot_create(vm_name)
                    logging.debug("Create a snapshot for test!")
                else:
                    # Backup snapshots for domain
                    for snp_item in snp_list:
                        tmp_file = os.path.join(data_dir.get_tmp_dir(), snp_item + ".xml")
                        virsh.snapshot_dumpxml(vm_name, snp_item, to_file=tmp_file)
                        snp_file_list.append(tmp_file)
            else:
                if len(snp_list):
                    test.cancel("This domain has snapshot(s), "
                                "cannot be undefined!")
        if option.count("remove-all-storage"):
            pvtest = utlv.PoolVolumeTest(test, params)
            pvtest.pre_pool(pool_name, pool_type, pool_target, emulated_img,
                            emulated_size=emulated_size)
            new_pool = libvirt_storage.PoolVolume(pool_name)
            if not new_pool.create_volume(vol_name, volume_size):
                test.fail("Creation of volume %s failed." % vol_name)
            volumes = new_pool.list_volumes()
            volume = volumes[vol_name]
            virsh.attach_disk(vm_name, volume, disk_target, "--config")

        # Turn libvirtd into certain state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_stop()

        # Test virsh undefine command.
        output = ""
        if vm_ref != "remote":
            vm_ref = "%s %s" % (vm_ref, extra)
            cmdresult = virsh.undefine(vm_ref, option,
                                       unprivileged_user=unprivileged_user,
                                       uri=uri,
                                       ignore_status=True, debug=True)
            status = cmdresult.exit_status
            output = cmdresult.stdout.strip()
            if status:
                logging.debug("Error status, command output: %s",
                              cmdresult.stderr.strip())
            if undefine_twice:
                status2 = virsh.undefine(vm_ref, nvram_o,
                                         ignore_status=True).exit_status
        else:
            if remote_ip.count("EXAMPLE.COM") or local_ip.count("EXAMPLE.COM"):
                test.cancel("remote_ip and/or local_ip parameters"
                            " not changed from default values")
            try:
                local_user = params.get("username", "root")
                uri = libvirt_vm.complete_uri(local_ip)
                # setup ssh auto login from remote machine to test machine
                # for the command to execute remotely
                ssh_key.setup_remote_ssh_key(remote_ip, remote_user,
                                             remote_pwd, hostname2=local_ip,
                                             user2=local_user,
                                             password2=local_pwd)
                session = remote.remote_login("ssh", remote_ip, "22",
                                              remote_user, remote_pwd,
                                              remote_prompt)
                cmd_undefine = "virsh -c %s undefine %s" % (uri, vm_name)
                status, output = session.cmd_status_output(cmd_undefine)
                logging.info("Undefine output: %s", output)
            except (process.CmdError, remote.LoginError, aexpect.ShellError) as de:
                logging.error("Detail: %s", de)
                status = 1

        # Recover libvirtd state.
        if libvirtd_state == "off":
            utils_libvirtd.libvirtd_start()

        # Shutdown VM.
        if virsh.domain_exists(vm.name):
            try:
                if vm.is_alive():
                    vm.destroy(gracefully=False)
            except process.CmdError as detail:
                logging.error("Detail: %s", detail)

        # After vm.destroy, virsh.domain_exists returns True due to
        # timing issue and tests fails.
        time.sleep(2)
        # Check if VM exists.
        vm_exist = virsh.domain_exists(vm_name)

        # Check if xml file exists.
        xml_exist = False
        if vm.is_qemu() and os.path.exists("/etc/libvirt/qemu/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_lxc() and os.path.exists("/etc/libvirt/lxc/%s.xml" % vm_name):
            xml_exist = True
        if vm.is_xen() and os.path.exists("/etc/xen/%s" % vm_name):
            xml_exist = True

        # Check if save file exists if use --managed-save
        save_exist = os.path.exists(save_file)

        # Check if save file exists if use --managed-save
        volume_exist = volume and os.path.exists(volume)

        # Test define with acl control and recover domain.
        if params.get('setup_libvirt_polkit') == 'yes':
            if virsh.domain_exists(vm.name):
                virsh.undefine(vm_ref, nvram_o, ignore_status=True)
            cmd = "chmod 666 %s" % backup_xml.xml
            process.run(cmd, ignore_status=False, shell=True)
            s_define = virsh.define(backup_xml.xml,
                                    unprivileged_user=unprivileged_user,
                                    uri=uri, ignore_status=True, debug=True)
            status3 = s_define.exit_status

    finally:
        # Recover main VM.
        try:
            backup_xml.sync()
        except LibvirtXMLError:
            # sync() tries to undefines and define the xml to sync
            # but virsh_undefine test would have undefined already
            # may lead to error out
            backup_xml.define()

        # Recover existing guest images
        if existing_images and option.count("remove-all-storage"):
            for img in existing_images:
                backup_img = img + '.bak'
                logging.info('Recover image %s to %s', backup_img, img)
                shutil.move(backup_img, img)

        # Clean up pool
        if pvtest:
            pvtest.cleanup_pool(pool_name, pool_type,
                                pool_target, emulated_img)
        # Recover VM snapshots.
        if option.count("snapshot") and (not vm.is_lxc()):
            logging.debug("Recover snapshots for domain!")
            for file_item in snp_file_list:
                virsh.snapshot_create(vm_name, file_item)

    # Check results.
    if status_error:
        if not status:
            test.fail("virsh undefine return unexpected result.")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3 == 0:
                test.fail("virsh define with false acl permission" +
                          " should failed.")
    else:
        if status:
            test.fail("virsh undefine failed.")
        if undefine_twice:
            if not status2:
                test.fail("Undefine the same VM twice succeeded.")
        if vm_exist:
            test.fail("VM still exists after undefine.")
        if xml_exist:
            test.fail("Xml file still exists after undefine.")
        if option.count("managedsave") and save_exist:
            test.fail("Save file still exists after undefine.")
        if option.count("remove-all-storage") and volume_exist:
            test.fail("Volume file '%s' still exists after"
                      " undefine." % volume)
        if wipe_data and option.count("remove-all-storage"):
            if not output.count("Wiping volume '%s'" % disk_target):
                test.fail("Command didn't wipe volume storage!")
        if params.get('setup_libvirt_polkit') == 'yes':
            if status3:
                test.fail("virsh define with right acl permission" +
                          " should succeeded")