else:
            dev_attrs = "dev"
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}

        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuid.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format, v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            disk_xml.encryption = disk_xml.new_encryption(
                **{"encryption": v_xml.encryption.format, "secret": {
                   "type": v_xml.encryption.secret["type"],
                   "uuid": v_xml.encryption.secret["uuid"]}})

        # Sync VM xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()

        try:
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(
            secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def get_secret_list():
        """
        Get secret list.

        :return: secret list
        """
        logging.info("Get secret list ...")
        secret_list = virsh.secret_list().stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is not empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status(
                "rpm -q parted || "
                "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded",
                                            "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {
            "name": volume_name,
            "capacity": int(volume_cap),
            "allocation": int(volume_alloc),
            "format": volume_target_format,
            "path": volume_target_path,
            "label": volume_target_label,
            "capacity_unit": volume_cap_unit
        }
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update(
                {"secret": {
                    "type": secret_type,
                    "uuid": sec_encryption_uuid
                }})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume." "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
            **{"attrs": {
                dev_attrs: volume_target_path
            }})
        disk_xml.driver = {
            "name": "qemu",
            "type": volume_target_format,
            "cache": "none"
        }
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {
                "encryption": v_xml.encryption.format,
                "secret": {
                    "type": v_xml.encryption.secret["type"],
                    "uuid": v_xml.encryption.secret["uuid"]
                }
            }
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                    **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                    **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name,
                                                 disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail(
                        "In hotplug scenario, VM should "
                        "start successfully but not."
                        "Error: %s", str(e))
                else:
                    logging.debug(
                        "VM failed to start as expected."
                        "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start." "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)
Ejemplo n.º 3
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = (
            "qemu-img create -f luks "
            "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
            "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h',
                                 ignore_status=True,
                                 shell=True,
                                 verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test".format(
                       added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source)
            and not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {
                        "sec_usage": "iscsi",
                        "sec_target": auth_sec_usage
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid,
                                           chap_passwd,
                                           encode=True,
                                           debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        chap_user=chap_user,
                        chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_uuid": auth_sec_uuid
                        }
                    elif use_auth_usage:
                        disk_auth_dict = {
                            "auth_user": chap_user,
                            "secret_type": auth_sec_usage_type,
                            "secret_usage": auth_sec_usage_target
                        }
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True,
                        is_login=False,
                        image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (
                    iscsi_host, iscsi_port, iscsi_target, lun_num)
                disk_src_dict = {
                    "attrs": {
                        "protocol": "iscsi",
                        "name": "%s/%s" % (iscsi_target, lun_num)
                    },
                    "hosts": [{
                        "name": iscsi_host,
                        "port": iscsi_port
                    }]
                }
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name",
                                           "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                is_setup=True,
                vol_name=gluster_vol_name,
                pool_name=gluster_pool_name,
                **params)
            device_source = "gluster://%s/%s/%s" % (
                gluster_host_ip, gluster_vol_name, gluster_img_name)
            disk_src_dict = {
                "attrs": {
                    "protocol": "gluster",
                    "name": "%s/%s" % (gluster_vol_name, gluster_img_name)
                },
                "hosts": [{
                    "name": gluster_host_ip,
                    "port": "24007"
                }]
            }
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name",
                                        "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {
                        "sec_usage": auth_sec_usage_type,
                        "sec_name": "ceph_auth_secret"
                    }
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid,
                                           ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {
                        "auth_user": ceph_auth_user,
                        "secret_type": auth_sec_usage_type,
                        "secret_uuid": auth_sec_uuid
                    }
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (
                    ceph_disk_name, ceph_mon_ip, key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name,
                                                        ceph_mon_ip)
            disk_src_dict = {
                "attrs": {
                    "protocol": "rbd",
                    "name": ceph_disk_name
                },
                "hosts": [{
                    "name": ceph_host_ip,
                    "port": ceph_host_port
                }]
            }
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {
                'attrs': {
                    'file': device_source,
                    'type_name': 'file'
                }
            }
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid,
                                     luks_secret_passwd,
                                     encode=True,
                                     debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {
            "encryption": "luks",
            "secret": {
                "type": "passphrase",
                "uuid": luks_sec_uuid
            }
        }
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" %
                             str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name,
                                         disk_xml.xml,
                                         ignore_status=True,
                                         debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Ejemplo n.º 4
0
def run(test, params, env):
    """
    Test virsh domblkthreshold option.

    1.Prepare backend storage (file/luks/iscsi/gluster/ceph/nbd)
    2.Start VM
    3.Set domblkthreshold on target device in VM
    4.Trigger one threshold event
    5.Check threshold event is received as expected
    6.Clean up test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}
    block_threshold_timeout = params.get("block_threshold_timeout", "120")
    event_type = params.get("event_type", "block-threshold")
    block_threshold_option = params.get("block_threshold_option", "--loop")

    def set_vm_block_domblkthreshold(vm_name, target_device, threshold, **dargs):
        """
        Set VM block threshold on specific target device.

        :param vm_name: VM name.
        :param target_device: target device in VM
        :param threshold: threshold value with specific unit such as 100M
        :param dargs: mutable parameter dict
        """
        ret = virsh.domblkthreshold(vm_name, target_device, threshold, **dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_threshold_event(vm_domain, target):
        """
        Trigger block threshold event.

        :param vm_domain: VM name
        :param target: Disk dev in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            time.sleep(10)
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   " mount /dev/{0} /mnt && "
                   " dd if=/dev/urandom of=/mnt/bigfile bs=1M count=101"
                   .format(target))
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to mount and fill data in VM: %s" % output)
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise

    def check_threshold_event(vm_name, event_type, event_timeout, options, **dargs):
        """
        Check threshold event.

        :param vm_name: VM name
        :param event_type: event type.
        :param event_timeout: event timeout value
        :param options: event option
        :dargs: dynamic parameters.
        """
        ret = virsh.event(vm_name, event_type, event_timeout, options, **dargs)
        logging.debug(ret.stdout_text)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def trigger_block_commit(vm_name, target, blockcommit_options, **virsh_dargs):
        """
        Trigger blockcommit.

        :param vm_name: VM name
        :param target: Disk dev in VM.
        :param blockcommit_options: blockcommit option
        :param virsh_dargs: additional parameters
        """
        result = virsh.blockcommit(vm_name, target,
                                   blockcommit_options, ignore_status=False, **virsh_dargs)

    def trigger_block_copy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs):
        """
        Trigger blockcopy

        :param vm_name: string, VM name
        :param target: string, target disk
        :param dest_path: string, the path of copied disk
        :param blockcopy_options: string, some options applied
        :param virsh_dargs: additional options
        """
        result = virsh.blockcopy(vm_name, target, dest_path, blockcopy_options, **virsh_dargs)
        libvirt.check_exit_status(result)

    def trigger_mirror_threshold_event(vm_domain, target):
        """
        Trigger mirror mode block threshold event.

        :param vm_domain: VM name
        :param target: Disk target in VM.
        """
        try:
            session = vm_domain.wait_for_login()
            # Sleep 10 seconds to let wait for events thread start first in main thread
            time.sleep(10)
            cmd = ("dd if=/dev/urandom of=file bs=1G count=3")
            status, output = session.cmd_status_output(cmd)
            if status:
                test.error("Failed to fill data in VM target: %s with %s" % (target, output))
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            raise
        except Exception as ex:
            raise

    def get_mirror_source_index(vm_name, dev_index=0):
        """
        Get mirror source index

        :param vm_name: VM name
        :param dev_index: Disk device index.
        :return mirror source index in integer
        """
        disk_list = vm_xml.VMXML.get_disk_source(vm_name)
        disk_mirror = disk_list[dev_index].find('mirror')
        if disk_mirror is None:
            test.fail("Failed to get disk mirror")
        disk_mirror_source = disk_mirror.find('source')
        return int(disk_mirror_source.get('index'))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage auth info
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")
    use_auth_usage = "yes" == params.get("use_auth_usage")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")

    mirror_mode_blockcommit = "yes" == params.get("mirror_mode_blockcommit", "no")
    mirror_mode_blockcopy = "yes" == params.get("mirror_mode_blockcopy", "no")
    default_snapshot_test = "yes" == params.get("default_snapshot_test", "no")
    block_threshold_value = params.get("block_threshold_value", "100M")
    snapshot_external_disks = []
    tmp_dir = data_dir.get_tmp_dir()
    dest_path = params.get("dest_path", "/var/lib/libvirt/images/newclone")

    pvt = None
    # Initialize one NbdExport object
    nbd = None
    img_file = os.path.join(data_dir.get_tmp_dir(),
                            "%s_test.img" % vm_name)
    if ((backend_storage_type == "luks") and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot support <encryption> inside disk in this libvirt version.")

    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    # Additional disk images.
    disks_img = []
    try:
        # Clean up dirty secrets in test environments if there are.
        utils_secret.clean_up_secrets()
        # Setup backend storage
        if backend_storage_type == "file":
            image_filename = params.get("image_filename", "raw.img")
            disk_path = os.path.join(data_dir.get_tmp_dir(), image_filename)
            device_source = libvirt.create_local_disk(backend_storage_type, disk_path, storage_size, device_format)
            disks_img.append({"format": device_format,
                              "source": disk_path, "path": disk_path})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Setup backend storage
        elif backend_storage_type == "luks":
            luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
            luks_secret_passwd = params.get("luks_secret_passwd", "password")
            # Create secret
            luks_sec_uuid = libvirt.create_secret(params)
            logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
            virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                   encode=True, ignore_status=False, debug=True)
            image_filename = params.get("image_filename", "raw.img")
            device_source = os.path.join(data_dir.get_tmp_dir(), image_filename)

            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}

            cmd = ("qemu-img create -f luks "
                   "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
                   "-o key-secret=sec0 %s %s" % (luks_encrypt_passwd, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a luks encrypted img by qemu-img")
        elif backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                chap_user = params.get("chap_user", "redhat")
                chap_passwd = params.get("chap_passwd", "password")
                auth_sec_usage = params.get("auth_sec_usage",
                                            "libvirtiscsi")
                auth_sec_dict = {"sec_usage": "iscsi",
                                 "sec_target": auth_sec_usage}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                # Set password of auth secret (not luks encryption secret)
                virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                       encode=True, ignore_status=False, debug=True)
                iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                    is_setup=True, is_login=False, image_size=storage_size,
                    chap_user=chap_user, chap_passwd=chap_passwd,
                    portal_ip=iscsi_host)
                # ISCSI auth attributes for disk xml
                disk_auth_dict = {"auth_user": chap_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_usage": auth_sec_usage_target}
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)

            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            cmd = ("qemu-img create -f %s "
                   "%s %s" % (device_format, device_source, storage_size))
            if process.system(cmd, shell=True):
                test.error("Can't create a gluster type img by qemu-img")
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")

            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            # If enable auth, prepare a local file to save key
            if ceph_client_name and ceph_client_key:
                with open(key_file, 'w') as f:
                    f.write("[%s]\n\tkey = %s\n" %
                            (ceph_client_name, ceph_client_key))
                key_opt = "--keyring %s" % key_file
                auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                 "sec_name": "ceph_auth_secret"}
                auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                       debug=True)
                disk_auth_dict = {"auth_user": ceph_auth_user,
                                  "secret_type": auth_sec_usage_type,
                                  "secret_uuid": auth_sec_uuid}
            else:
                test.error("No ceph client name/key provided.")
            device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                               ceph_mon_ip,
                                                               key_file)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            # Create an local image and make FS on it.
            disk_cmd = ("qemu-img create -f %s %s %s" %
                        (device_format, img_file, storage_size))
            process.run(disk_cmd, ignore_status=False, shell=True)
            # Convert the image to remote storage
            disk_path = ("rbd:%s:mon_host=%s" %
                         (ceph_disk_name, ceph_mon_ip))
            if ceph_client_name and ceph_client_key:
                disk_path += (":id=%s:key=%s" %
                              (ceph_auth_user, ceph_auth_key))
            rbd_cmd = ("rbd -m %s %s info %s 2> /dev/null|| qemu-img convert -O"
                       " %s %s %s" % (ceph_mon_ip, key_opt, ceph_disk_name,
                                      device_format, img_file, disk_path))
            process.run(rbd_cmd, ignore_status=False, shell=True)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            # Set virt_use_nfs
            virt_use_nfs = params.get("virt_use_nfs", "off")
            result = process.run("setsebool virt_use_nfs %s" % virt_use_nfs, shell=True)
            if result.exit_status:
                test.error("Failed to set virt_use_nfs value")

            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            # Create one image on nfs server
            libvirt.create_local_disk("file", device_source, '1', "raw")
            disks_img.append({"format": device_format,
                              "source": device_source, "path": device_source})
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            try:
                # If Libvirt version is lower than 2.5.0
                # Creating luks encryption volume is not supported,so skip it.
                create_vol(pool_name, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "nbd":
            # Get server hostname.
            hostname = process.run('hostname', ignore_status=False, shell=True, verbose=True).stdout_text.strip()
            # Setup backend storage
            nbd_server_host = hostname
            nbd_server_port = params.get("nbd_server_port")
            image_path = params.get("emulated_image", "/var/lib/libvirt/images/nbdtest.img")
            # Create NbdExport object
            nbd = NbdExport(image_path, image_format=device_format,
                            port=nbd_server_port)
            nbd.start_nbd_server()
            # Prepare disk source xml
            source_attrs_dict = {"protocol": "nbd"}
            disk_src_dict = {}
            disk_src_dict.update({"attrs": source_attrs_dict})
            disk_src_dict.update({"hosts": [{"name": nbd_server_host, "port": nbd_server_port}]})
            device_source = "nbd://%s:%s/%s" % (nbd_server_host,
                                                nbd_server_port,
                                                image_path)

        logging.debug("device source is: %s", device_source)

        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        if disk_encryption_dict:
            disk_encryption_dict = {"encryption": "luks",
                                    "secret": {"type": "passphrase",
                                               "uuid": luks_sec_uuid}}
            disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)

            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml except mirror_mode_blockcommit or mirror_mode_blockcopy
        if (not mirror_mode_blockcommit and not mirror_mode_blockcopy):
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login().close()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s", str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error:
                logging.info("VM failed to start as expected: %s", str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        func_name = trigger_block_threshold_event
        # Additional operations before set block threshold
        if backend_storage_type == "file":
            logging.info("Create snapshot...")
            snap_opt = " %s --disk-only "
            snap_opt += "%s,snapshot=external,file=%s"
            if default_snapshot_test:
                for index in range(1, 5):
                    snapshot_name = "snapshot_%s" % index
                    snap_path = "%s/%s_%s.snap" % (tmp_dir, vm_name, index)
                    snapshot_external_disks.append(snap_path)
                    snap_option = snap_opt % (snapshot_name, device_target, snap_path)
                    virsh.snapshot_create_as(vm_name, snap_option,
                                             ignore_status=False, debug=True)

            if mirror_mode_blockcommit:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                vmxml.del_device(disk_xml)
                virsh.snapshot_create_as(vm_name, "--disk-only --no-metadata",
                                         ignore_status=False, debug=True)
                # Do active blockcommit in background.
                blockcommit_options = "--active"
                mirror_blockcommit_thread = threading.Thread(target=trigger_block_commit,
                                                             args=(vm_name, 'vda', blockcommit_options,),
                                                             kwargs={'debug': True})
                mirror_blockcommit_thread.start()
                device_target = "vda[1]"
                func_name = trigger_mirror_threshold_event
            if mirror_mode_blockcopy:
                if not libvirt_version.version_compare(6, 6, 0):
                    test.cancel("Set threshold for disk mirroring feature is not supported on current version")
                # Do transient blockcopy in backgroud.
                blockcopy_options = "--transient-job "
                # Do cleanup
                if os.path.exists(dest_path):
                    libvirt.delete_local_disk("file", dest_path)
                mirror_blockcopy_thread = threading.Thread(target=trigger_block_copy,
                                                           args=(vm_name, 'vda', dest_path, blockcopy_options,),
                                                           kwargs={'debug': True})
                mirror_blockcopy_thread.start()
                mirror_blockcopy_thread.join(10)
                device_target = "vda[%d]" % get_mirror_source_index(vm_name)
                func_name = trigger_mirror_threshold_event
        set_vm_block_domblkthreshold(vm_name, device_target, block_threshold_value, **{"debug": True})
        cli_thread = threading.Thread(target=func_name,
                                      args=(vm, device_target))
        cli_thread.start()
        check_threshold_event(vm_name, event_type, block_threshold_timeout, block_threshold_option, **{"debug": True})
    finally:
        # Delete snapshots.
        if virsh.domain_exists(vm_name):
            #To delete snapshot, destroy VM first.
            if vm.is_alive():
                vm.destroy()
            libvirt.clean_up_snapshots(vm_name, domxml=vmxml_backup)

        vmxml_backup.sync("--snapshots-metadata")

        if os.path.exists(img_file):
            libvirt.delete_local_disk("file", img_file)
        for img in disks_img:
            if os.path.exists(img["path"]):
                libvirt.delete_local_disk("file", img["path"])

        for disk in snapshot_external_disks:
            libvirt.delete_local_disk('file', disk)

        if os.path.exists(dest_path):
            libvirt.delete_local_disk("file", dest_path)

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)
        elif backend_storage_type == "nfs":
            result = process.run("setsebool virt_use_nfs off",
                                 shell=True)
            if result.exit_status:
                logging.info("Failed to restore virt_use_nfs value")
        elif backend_storage_type == "nbd":
            if nbd:
                try:
                    nbd.cleanup()
                except Exception as ndbEx:
                    logging.info("Clean Up nbd failed: %s" % str(ndbEx))
        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Ejemplo n.º 5
0
        else:
            dev_attrs = "dev"
        disk_xml.source = disk_xml.new_disk_source(
            **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}

        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuid.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format, v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            disk_xml.encryption = disk_xml.new_encryption(
                **{"encryption": v_xml.encryption.format, "secret": {
                   "type": v_xml.encryption.secret["type"],
                   "uuid": v_xml.encryption.secret["uuid"]}})

        # Sync VM xml.
        vmxml.add_device(disk_xml)
        vmxml.sync()

        try:
            # Start the VM and check status.
            vm.start()
            if status_error:
                raise error.TestFail("VM started unexpectedly.")

            if not check_in_vm(vm, device_target, old_parts):
                raise error.TestFail("Check encryption disk in VM failed")
        except virt_vm.VMStartError, e:
Ejemplo n.º 6
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage device to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        preallocation = params.get("preallocation")
        cmd = ("qemu-img create -f luks "
               "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
               "-o key-secret=sec0 %s %s" % (password, device, size))
        # Add preallocation if it is given in params
        if preallocation:
            cmd = cmd.replace("key-secret=sec0", "key-secret=sec0,preallocation=%s" % preallocation)
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage device to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h', ignore_status=True,
                                 shell=True, verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = utils_disk.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name: Pool name.
        :param target_encrypt_params: encrypt parameters in dict.
        :param vol_params: Volume parameters dict.
        """
        # Clean up dirty volumes if pool has.
        pv = libvirt_storage.PoolVolume(p_name)
        vol_name_list = pv.list_volumes()
        for vol_name in vol_name_list:
            pv.delete_volume(vol_name)

        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def get_secret_list():
        """
        Get secret list.

        :return secret list
        """
        logging.info("Get secret list ...")
        secret_list_result = virsh.secret_list()
        secret_list = secret_list_result.stdout.strip().splitlines()
        # First two lines contain table header followed by entries
        # for each secret, such as:
        #
        # UUID                                  Usage
        # --------------------------------------------------------------------------------
        # b4e8f6d3-100c-4e71-9f91-069f89742273  ceph client.libvirt secret
        secret_list = secret_list[2:]
        result = []
        # If secret list is empty.
        if secret_list:
            for line in secret_list:
                # Split on whitespace, assume 1 column
                linesplit = line.split(None, 1)
                result.append(linesplit[0])
        return result

    def check_top_image_in_xml(expected_top_image):
        """
        check top image in src file

        :param expected_top_image: expect top image
        """
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disks = vmxml.devices.by_device_tag('disk')
        disk_xml = None
        for disk in disks:
            if disk.target['dev'] != device_target:
                continue
            else:
                disk_xml = disk.xmltreefile
                break
        logging.debug("disk xml in top: %s\n", disk_xml)
        src_file = disk_xml.find('source').get('file')
        if src_file is None:
            src_file = disk_xml.find('source').get('name')
        if src_file != expected_top_image:
            test.fail("Current top img %s is not the same with %s"
                      % (src_file, expected_top_image))

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_format = params.get("virt_disk_device_format", "raw")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")
    volume_target_format = params.get("target_format", "raw")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    define_error = "yes" == params.get("define_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None
    duplicated_encryption = "yes" == params.get("duplicated_encryption", "no")
    slice_support_enable = "yes" == params.get("slice_support_enable", "no")
    block_copy_test = "yes" == params.get("block_copy_test", "no")

    if ((encryption_in_source or auth_in_source) and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partitions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = utils_disk.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Clean up dirty secrets in test environments if there are.
        dirty_secret_list = get_secret_list()
        if dirty_secret_list:
            for dirty_secret_uuid in dirty_secret_list:
                virsh.secret_undefine(dirty_secret_uuid)
        # Create secret
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                     encode=True, debug=True)
        libvirt.check_exit_status(ret)
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {"sec_usage": "iscsi",
                                     "sec_target": auth_sec_usage}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                           encode=True, debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        chap_user=chap_user, chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_uuid": auth_sec_uuid}
                    elif use_auth_usage:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_usage": auth_sec_usage_target}
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = gluster.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name,
                    **params)
            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(TMP_DATA_DIR, "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                     "sec_name": "ceph_auth_secret"}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {"auth_user": ceph_auth_user,
                                      "secret_type": auth_sec_usage_type,
                                      "secret_uuid": auth_sec_uuid}
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                                   ceph_mon_ip,
                                                                   key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("pre clean up rbd disk if exists: %s", cmd_result)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = TMP_DATA_DIR
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        # Create dir based pool,and then create one volume on it.
        elif backend_storage_type == "dir":
            pool_name = params.get("pool_name", "dir_pool")
            pool_target = params.get("pool_target")
            pool_type = params.get("pool_type")
            emulated_image = params.get("emulated_image")
            image_name = params.get("dir_image_name", "luks_1.img")
            # Create and start dir_based pool.
            pvt = libvirt.PoolVolumeTest(test, params)
            if not os.path.exists(pool_target):
                os.mkdir(pool_target)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            sp = libvirt_storage.StoragePool()
            if not sp.is_pool_active(pool_name):
                sp.set_pool_autostart(pool_name)
                sp.start_pool(pool_name)
            # Create one volume on the pool.
            volume_name = params.get("vol_name")
            volume_alloc = params.get("vol_alloc")
            volume_cap_unit = params.get("vol_cap_unit")
            volume_cap = params.get("vol_cap")
            volume_target_path = params.get("sec_volume")
            volume_target_format = params.get("target_format")
            device_format = volume_target_format
            volume_target_encypt = params.get("target_encypt", "")
            volume_target_label = params.get("target_label")
            vol_params = {"name": volume_name, "capacity": int(volume_cap),
                          "allocation": int(volume_alloc), "format":
                          volume_target_format, "path": volume_target_path,
                          "label": volume_target_label,
                          "capacity_unit": volume_cap_unit}
            vol_encryption_params = {}
            vol_encryption_params.update({"format": "luks"})
            vol_encryption_params.update({"secret": {"type": "passphrase", "uuid": luks_sec_uuid}})
            try:
                # If target format is qcow2,need to create test image with "qemu-img create"
                if volume_target_format == "qcow2":
                    option = params.get("luks_extra_elements")
                    libvirt.create_local_disk("file", path=volume_target_path, extra=option,
                                              disk_format="qcow2", size="1")
                else:
                    # If Libvirt version is lower than 2.5.0
                    # Creating luks encryption volume is not supported,so skip it.
                    create_vol(pool_name, vol_encryption_params, vol_params)
            except AssertionError as info:
                err_msgs = ("create: invalid option")
                if str(info).count(err_msgs):
                    test.cancel("Creating luks encryption volume "
                                "is not supported on this libvirt version")
                else:
                    test.error("Failed to create volume."
                               "Error: %s" % str(info))
            disk_src_dict = {'attrs': {'file': volume_target_path}}
            device_source = volume_target_path
        elif backend_storage_type == "file":
            tmp_dir = TMP_DATA_DIR
            image_name = params.get("file_image_name", "slice.img")
            device_source = os.path.join(tmp_dir, image_name)
            disk_src_dict = {'attrs': {'file': device_source}}
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs/file can be tested for now.")
        logging.debug("device source is: %s", device_source)
        if backend_storage_type != "dir":
            encrypt_dev(device_source, params)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        print()
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {"encryption": "luks",
                                "secret": {"type": "passphrase",
                                           "uuid": luks_sec_uuid}}
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        if duplicated_encryption:
            disk_xml.encryption = disk_encryption
        if slice_support_enable:
            if not libvirt_version.version_compare(6, 0, 0):
                test.cancel("Cannot put <slice> inside disk <source> "
                            "in this libvirt version.")
            else:
                check_du_output = process.run("du -b %s" % device_source, shell=True).stdout_text
                slice_size = re.findall(r'[0-9]+', check_du_output)
                disk_source.slices = disk_xml.new_slices(
                        **{"slice_type": "storage", "slice_offset": "0", "slice_size": slice_size[0]})
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        try:
            vmxml.sync()
            vm.start()
            vm.wait_for_login()
        except xcepts.LibvirtXMLError as xml_error:
            if not define_error:
                test.fail("Failed to define VM:\n%s" % str(xml_error))
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" % str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        if volume_target_format == "qcow2":
            check_dev_format(device_source, fmt="qcow2")
        else:
            check_dev_format(device_source)
        if block_copy_test:
            # Create a transient VM
            transient_vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
            if vm.is_alive():
                vm.destroy(gracefully=False)
            virsh.undefine(vm_name, debug=True, ignore_status=False)
            virsh.create(transient_vmxml.xml, ignore_status=False, debug=True)
            expected_top_image = vm.get_blk_devices()[device_target].get('source')
            options = params.get("blockcopy_options")
            tmp_dir = TMP_DATA_DIR
            tmp_file = time.strftime("%Y-%m-%d-%H.%M.%S.img")
            dest_path = os.path.join(tmp_dir, tmp_file)

            # Need cover a few scenarios:single blockcopy, blockcopy and abort combined
            virsh.blockcopy(vm_name, device_target, dest_path,
                            options, ignore_status=False, debug=True)
            if encryption_in_source:
                virsh.blockjob(vm_name, device_target, " --pivot", ignore_status=False)
                expected_top_image = dest_path
            else:
                virsh.blockjob(vm_name, device_target, " --abort", ignore_status=False)
            check_top_image_in_xml(expected_top_image)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            gluster.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name,
                                             **params)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
Ejemplo n.º 7
0
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare backend storage (blkdev/iscsi/gluster/ceph)
    2.Use luks format to encrypt the backend storage
    3.Prepare a disk xml indicating to the backend storage with valid/invalid
      luks password
    4.Start VM with disk hot/cold plugged
    5.Check some disk operations in VM
    6.Check backend storage is still in luks format
    7.Recover test environment
    """
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def encrypt_dev(device, params):
        """
        Encrypt device with luks format

        :param device: Storage deivce to be encrypted.
        :param params: From the dict to get encryption password.
        """
        password = params.get("luks_encrypt_passwd", "password")
        size = params.get("luks_size", "500M")
        cmd = ("qemu-img create -f luks "
               "--object secret,id=sec0,data=`printf '%s' | base64`,format=base64 "
               "-o key-secret=sec0 %s %s" % (password, device, size))
        if process.system(cmd, shell=True):
            test.fail("Can't create a luks encrypted img by qemu-img")

    def check_dev_format(device, fmt="luks"):
        """
        Check if device is in luks format

        :param device: Storage deivce to be checked.
        :param fmt: Expected disk format.
        :return: If device's format equals to fmt, return True, else return False.
        """
        cmd_result = process.run("qemu-img" + ' -h', ignore_status=True,
                                 shell=True, verbose=False)
        if b'-U' in cmd_result.stdout:
            cmd = ("qemu-img info -U %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        else:
            cmd = ("qemu-img info %s| grep -i 'file format' "
                   "| grep -i %s" % (device, fmt))
        cmd_result = process.run(cmd, ignore_status=True, shell=True)
        if cmd_result.exit_status:
            test.fail("device %s is not in %s format. err is: %s" %
                      (device, fmt, cmd_result.stderr))

    def check_in_vm(target, old_parts):
        """
        Check mount/read/write disk in VM.

        :param target: Disk dev in VM.
        :param old_parts: Original disk partitions in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            if platform.platform().count('ppc64'):
                time.sleep(10)
            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False
            else:
                added_part = added_parts[0]
            cmd = ("fdisk -l /dev/{0} && mkfs.ext4 -F /dev/{0} && "
                   "mkdir -p test && mount /dev/{0} test && echo"
                   " teststring > test/testfile && umount test"
                   .format(added_part))
            status, output = session.cmd_status_output(cmd)
            logging.debug("Disk operation in VM:\nexit code:\n%s\noutput:\n%s",
                          status, output)
            return status == 0

        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_format = params.get("virt_disk_device_format", "raw")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    backend_storage_type = params.get("backend_storage_type", "iscsi")

    # Backend storage options.
    storage_size = params.get("storage_size", "1G")
    enable_auth = "yes" == params.get("enable_auth")

    # Luks encryption info, luks_encrypt_passwd is the password used to encrypt
    # luks image, and luks_secret_passwd is the password set to luks secret, you
    # can set a wrong password to luks_secret_passwd for negative tests
    luks_encrypt_passwd = params.get("luks_encrypt_passwd", "password")
    luks_secret_passwd = params.get("luks_secret_passwd", "password")
    # Backend storage auth info
    use_auth_usage = "yes" == params.get("use_auth_usage")
    if use_auth_usage:
        use_auth_uuid = False
    else:
        use_auth_uuid = "yes" == params.get("use_auth_uuid", "yes")
    auth_sec_usage_type = params.get("auth_sec_usage_type", "iscsi")
    auth_sec_usage_target = params.get("auth_sec_usage_target", "libvirtiscsi")

    status_error = "yes" == params.get("status_error")
    check_partitions = "yes" == params.get("virt_disk_check_partitions", "yes")
    hotplug_disk = "yes" == params.get("hotplug_disk", "no")
    encryption_in_source = "yes" == params.get("encryption_in_source", "no")
    auth_in_source = "yes" == params.get("auth_in_source", "no")
    auth_sec_uuid = ""
    luks_sec_uuid = ""
    disk_auth_dict = {}
    disk_encryption_dict = {}
    pvt = None

    if ((encryption_in_source or auth_in_source) and
            not libvirt_version.version_compare(3, 9, 0)):
        test.cancel("Cannot put <encryption> or <auth> inside disk <source> "
                    "in this libvirt version.")
    # Start VM and get all partions in VM.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)

    try:
        # Setup backend storage
        if backend_storage_type == "iscsi":
            iscsi_host = params.get("iscsi_host")
            iscsi_port = params.get("iscsi_port")
            if device_type == "block":
                device_source = libvirt.setup_or_cleanup_iscsi(is_setup=True)
                disk_src_dict = {'attrs': {'dev': device_source}}
            elif device_type == "network":
                if enable_auth:
                    chap_user = params.get("chap_user", "redhat")
                    chap_passwd = params.get("chap_passwd", "password")
                    auth_sec_usage = params.get("auth_sec_usage",
                                                "libvirtiscsi")
                    auth_sec_dict = {"sec_usage": "iscsi",
                                     "sec_target": auth_sec_usage}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    # Set password of auth secret (not luks encryption secret)
                    virsh.secret_set_value(auth_sec_uuid, chap_passwd,
                                           encode=True, debug=True)
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        chap_user=chap_user, chap_passwd=chap_passwd,
                        portal_ip=iscsi_host)
                    # ISCSI auth attributes for disk xml
                    if use_auth_uuid:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_uuid": auth_sec_uuid}
                    elif use_auth_usage:
                        disk_auth_dict = {"auth_user": chap_user,
                                          "secret_type": auth_sec_usage_type,
                                          "secret_usage": auth_sec_usage_target}
                else:
                    iscsi_target, lun_num = libvirt.setup_or_cleanup_iscsi(
                        is_setup=True, is_login=False, image_size=storage_size,
                        portal_ip=iscsi_host)
                device_source = "iscsi://%s:%s/%s/%s" % (iscsi_host, iscsi_port,
                                                         iscsi_target, lun_num)
                disk_src_dict = {"attrs": {"protocol": "iscsi",
                                           "name": "%s/%s" % (iscsi_target, lun_num)},
                                 "hosts": [{"name": iscsi_host, "port": iscsi_port}]}
        elif backend_storage_type == "gluster":
            gluster_vol_name = params.get("gluster_vol_name", "gluster_vol1")
            gluster_pool_name = params.get("gluster_pool_name", "gluster_pool1")
            gluster_img_name = params.get("gluster_img_name", "gluster1.img")
            gluster_host_ip = libvirt.setup_or_cleanup_gluster(
                    is_setup=True,
                    vol_name=gluster_vol_name,
                    pool_name=gluster_pool_name)
            device_source = "gluster://%s/%s/%s" % (gluster_host_ip,
                                                    gluster_vol_name,
                                                    gluster_img_name)
            disk_src_dict = {"attrs": {"protocol": "gluster",
                                       "name": "%s/%s" % (gluster_vol_name,
                                                          gluster_img_name)},
                             "hosts":  [{"name": gluster_host_ip,
                                         "port": "24007"}]}
        elif backend_storage_type == "ceph":
            ceph_host_ip = params.get("ceph_host_ip", "EXAMPLE_HOSTS")
            ceph_mon_ip = params.get("ceph_mon_ip", "EXAMPLE_MON_HOST")
            ceph_host_port = params.get("ceph_host_port", "EXAMPLE_PORTS")
            ceph_disk_name = params.get("ceph_disk_name", "EXAMPLE_SOURCE_NAME")
            ceph_client_name = params.get("ceph_client_name")
            ceph_client_key = params.get("ceph_client_key")
            ceph_auth_user = params.get("ceph_auth_user")
            ceph_auth_key = params.get("ceph_auth_key")
            enable_auth = "yes" == params.get("enable_auth")
            key_file = os.path.join(data_dir.get_tmp_dir(), "ceph.key")
            key_opt = ""
            # Prepare a blank params to confirm if delete the configure at the end of the test
            ceph_cfg = ""
            if not utils_package.package_install(["ceph-common"]):
                test.error("Failed to install ceph-common")
            # Create config file if it doesn't exist
            ceph_cfg = ceph.create_config_file(ceph_mon_ip)
            if enable_auth:
                # If enable auth, prepare a local file to save key
                if ceph_client_name and ceph_client_key:
                    with open(key_file, 'w') as f:
                        f.write("[%s]\n\tkey = %s\n" %
                                (ceph_client_name, ceph_client_key))
                    key_opt = "--keyring %s" % key_file
                    auth_sec_dict = {"sec_usage": auth_sec_usage_type,
                                     "sec_name": "ceph_auth_secret"}
                    auth_sec_uuid = libvirt.create_secret(auth_sec_dict)
                    virsh.secret_set_value(auth_sec_uuid, ceph_auth_key,
                                           debug=True)
                    disk_auth_dict = {"auth_user": ceph_auth_user,
                                      "secret_type": auth_sec_usage_type,
                                      "secret_uuid": auth_sec_uuid}
                    cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                           "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
                else:
                    test.error("No ceph client name/key provided.")
                device_source = "rbd:%s:mon_host=%s:keyring=%s" % (ceph_disk_name,
                                                                   ceph_mon_ip,
                                                                   key_file)
            else:
                device_source = "rbd:%s:mon_host=%s" % (ceph_disk_name, ceph_mon_ip)
            disk_src_dict = {"attrs": {"protocol": "rbd",
                                       "name": ceph_disk_name},
                             "hosts":  [{"name": ceph_host_ip,
                                         "port": ceph_host_port}]}
        elif backend_storage_type == "nfs":
            pool_name = params.get("pool_name", "nfs_pool")
            pool_target = params.get("pool_target", "nfs_mount")
            pool_type = params.get("pool_type", "netfs")
            nfs_server_dir = params.get("nfs_server_dir", "nfs_server")
            emulated_image = params.get("emulated_image")
            image_name = params.get("nfs_image_name", "nfs.img")
            tmp_dir = data_dir.get_tmp_dir()
            pvt = libvirt.PoolVolumeTest(test, params)
            pvt.pre_pool(pool_name, pool_type, pool_target, emulated_image)
            nfs_mount_dir = os.path.join(tmp_dir, pool_target)
            device_source = nfs_mount_dir + image_name
            disk_src_dict = {'attrs': {'file': device_source,
                                       'type_name': 'file'}}
        else:
            test.cancel("Only iscsi/gluster/rbd/nfs can be tested for now.")
        logging.debug("device source is: %s", device_source)
        luks_sec_uuid = libvirt.create_secret(params)
        logging.debug("A secret created with uuid = '%s'", luks_sec_uuid)
        ret = virsh.secret_set_value(luks_sec_uuid, luks_secret_passwd,
                                     encode=True, debug=True)
        encrypt_dev(device_source, params)
        libvirt.check_exit_status(ret)
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        driver_dict = {"name": "qemu", "type": device_format}
        disk_xml.driver = driver_dict
        disk_source = disk_xml.new_disk_source(**disk_src_dict)
        if disk_auth_dict:
            logging.debug("disk auth dict is: %s" % disk_auth_dict)
            if auth_in_source:
                disk_source.auth = disk_xml.new_auth(**disk_auth_dict)
            else:
                disk_xml.auth = disk_xml.new_auth(**disk_auth_dict)
        disk_encryption_dict = {"encryption": "luks",
                                "secret": {"type": "passphrase",
                                           "uuid": luks_sec_uuid}}
        disk_encryption = disk_xml.new_encryption(**disk_encryption_dict)
        if encryption_in_source:
            disk_source.encryption = disk_encryption
        else:
            disk_xml.encryption = disk_encryption
        disk_xml.source = disk_source
        logging.debug("new disk xml is: %s", disk_xml)
        # Sync VM xml
        if not hotplug_disk:
            vmxml.add_device(disk_xml)
        vmxml.sync()
        try:
            vm.start()
            vm.wait_for_login()
        except virt_vm.VMStartError as details:
            # When use wrong password in disk xml for cold plug cases,
            # VM cannot be started
            if status_error and not hotplug_disk:
                logging.info("VM failed to start as expected: %s" % str(details))
            else:
                test.fail("VM should start but failed: %s" % str(details))
        if hotplug_disk:
            result = virsh.attach_device(vm_name, disk_xml.xml,
                                         ignore_status=True, debug=True)
            libvirt.check_exit_status(result, status_error)
        if check_partitions and not status_error:
            if not check_in_vm(device_target, old_parts):
                test.fail("Check disk partitions in VM failed")
        check_dev_format(device_source)
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        vmxml_backup.sync("--snapshots-metadata")

        # Clean up backend storage
        if backend_storage_type == "iscsi":
            libvirt.setup_or_cleanup_iscsi(is_setup=False)
        elif backend_storage_type == "gluster":
            libvirt.setup_or_cleanup_gluster(is_setup=False,
                                             vol_name=gluster_vol_name,
                                             pool_name=gluster_pool_name)
        elif backend_storage_type == "ceph":
            # Remove ceph configure file if created.
            if ceph_cfg:
                os.remove(ceph_cfg)
            cmd = ("rbd -m {0} {1} info {2} && rbd -m {0} {1} rm "
                   "{2}".format(ceph_mon_ip, key_opt, ceph_disk_name))
            cmd_result = process.run(cmd, ignore_status=True, shell=True)
            logging.debug("result of rbd removal: %s", cmd_result)
            if os.path.exists(key_file):
                os.remove(key_file)

        # Clean up secrets
        if auth_sec_uuid:
            virsh.secret_undefine(auth_sec_uuid)
        if luks_sec_uuid:
            virsh.secret_undefine(luks_sec_uuid)

        # Clean up pools
        if pvt:
            pvt.cleanup_pool(pool_name, pool_type, pool_target, emulated_image)
def run(test, params, env):
    """
    Test disk encryption option.

    1.Prepare test environment,destroy or suspend a VM.
    2.Prepare pool, volume.
    3.Edit disks xml and start the domain.
    4.Perform test operation.
    5.Recover test environment.
    6.Confirm the test result.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    virsh_dargs = {'debug': True, 'ignore_status': True}

    def create_pool(p_name, p_type, p_target):
        """
        Define and start a pool.

        :param p_name. Pool name.
        :param p_type. Pool type.
        :param p_target. Pool target path.
        """
        p_xml = pool_xml.PoolXML(pool_type=p_type)
        p_xml.name = p_name
        p_xml.target_path = p_target

        if not os.path.exists(p_target):
            os.mkdir(p_target)
        p_xml.xmltreefile.write()
        ret = virsh.pool_define(p_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_build(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)
        ret = virsh.pool_start(p_name, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_vol(p_name, target_encrypt_params, vol_params):
        """
        Create volume.

        :param p_name. Pool name.
        :param target_encrypt_params encrypt parameters in dict.
        :param vol_params. Volume parameters dict.
        :return: True if create successfully.
        """
        volxml = vol_xml.VolXML()
        v_xml = volxml.new_vol(**vol_params)
        v_xml.encryption = volxml.new_encryption(**target_encrypt_params)
        v_xml.xmltreefile.write()

        ret = virsh.vol_create(p_name, v_xml.xml, **virsh_dargs)
        libvirt.check_exit_status(ret)

    def create_secret(vol_path):
        """
        Create secret.

        :param vol_path. volume path.
        :return: secret id if create successfully.
        """
        sec_xml = secret_xml.SecretXML("no", "yes")
        sec_xml.description = "volume secret"

        sec_xml.usage = 'volume'
        sec_xml.volume = vol_path
        sec_xml.xmltreefile.write()

        ret = virsh.secret_define(sec_xml.xml)
        libvirt.check_exit_status(ret)
        # Get secret uuid.
        try:
            encryption_uuid = re.findall(r".+\S+(\ +\S+)\ +.+\S+",
                                         ret.stdout.strip())[0].lstrip()
        except IndexError as e:
            test.error("Fail to get newly created secret uuid")
        logging.debug("Secret uuid %s", encryption_uuid)

        # Set secret value.
        encoding = locale.getpreferredencoding()
        secret_string = base64.b64encode(secret_password_no_encoded.encode(encoding)).decode(encoding)
        ret = virsh.secret_set_value(encryption_uuid, secret_string,
                                     **virsh_dargs)
        libvirt.check_exit_status(ret)
        return encryption_uuid

    def check_in_vm(vm, target, old_parts):
        """
        Check mount/read/write disk in VM.
        :param vm. VM guest.
        :param target. Disk dev in VM.
        :return: True if check successfully.
        """
        try:
            session = vm.wait_for_login()
            rpm_stat = session.cmd_status("rpm -q parted || "
                                          "yum install -y parted", 300)
            if rpm_stat != 0:
                test.fail("Failed to query/install parted, make sure"
                          " that you have usable repo in guest")

            new_parts = libvirt.get_parts_list(session)
            added_parts = list(set(new_parts).difference(set(old_parts)))
            logging.info("Added parts:%s", added_parts)
            if len(added_parts) != 1:
                logging.error("The number of new partitions is invalid in VM")
                return False

            added_part = None
            if target.startswith("vd"):
                if added_parts[0].startswith("vd"):
                    added_part = added_parts[0]
            elif target.startswith("hd"):
                if added_parts[0].startswith("sd"):
                    added_part = added_parts[0]

            if not added_part:
                logging.error("Can't see added partition in VM")
                return False

            device_source = os.path.join(os.sep, 'dev', added_part)
            libvirt.mk_label(device_source, session=session)
            libvirt.mk_part(device_source, size="10M", session=session)
            # Run partprobe to make the change take effect.
            process.run("partprobe", ignore_status=True, shell=True)
            libvirt.mkfs("/dev/%s1" % added_part, "ext3", session=session)

            cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile"
                   " && cat /mnt/testfile && umount /mnt" % added_part)
            s, o = session.cmd_status_output(cmd)
            logging.info("Check disk operation in VM:\n%s", o)
            session.close()
            if s != 0:
                return False
            return True
        except (remote.LoginError, virt_vm.VMError, aexpect.ShellError) as e:
            logging.error(str(e))
            return False

    # Disk specific attributes.
    device = params.get("virt_disk_device", "disk")
    device_target = params.get("virt_disk_device_target", "vdd")
    device_type = params.get("virt_disk_device_type", "file")
    device_bus = params.get("virt_disk_device_bus", "virtio")
    encryption_in_source = "yes" == params.get("encryption_in_source")
    encryption_out_source = "yes" == params.get("encryption_out_source")
    if encryption_in_source and not libvirt_version.version_compare(3, 9, 0):
        test.cancel("Cannot put <encryption> inside disk <source> in "
                    "this libvirt version.")
    # Pool/Volume options.
    pool_name = params.get("pool_name")
    pool_type = params.get("pool_type")
    pool_target = params.get("pool_target")
    volume_name = params.get("vol_name")
    volume_alloc = params.get("vol_alloc")
    volume_cap_unit = params.get("vol_cap_unit")
    volume_cap = params.get("vol_cap")
    volume_target_path = params.get("target_path")
    volume_target_format = params.get("target_format")
    volume_target_encypt = params.get("target_encypt", "")
    volume_target_label = params.get("target_label")

    hotplug = "yes" == params.get("virt_disk_device_hotplug")
    status_error = "yes" == params.get("status_error")
    secret_type = params.get("secret_type", "passphrase")
    secret_password_no_encoded = params.get("secret_password_no_encoded", "redhat")
    virt_disk_qcow2_format = "yes" == params.get("virt_disk_qcow2_format")

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)

    # Start vm and get all partions in vm.
    if vm.is_dead():
        vm.start()
    session = vm.wait_for_login()
    old_parts = libvirt.get_parts_list(session)
    session.close()
    vm.destroy(gracefully=False)

    # Back up xml file.
    vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    sec_encryption_uuid = None
    try:
        # Prepare the disk.
        sec_uuids = []
        create_pool(pool_name, pool_type, pool_target)
        vol_params = {"name": volume_name, "capacity": int(volume_cap),
                      "allocation": int(volume_alloc), "format":
                      volume_target_format, "path": volume_target_path,
                      "label": volume_target_label,
                      "capacity_unit": volume_cap_unit}
        vol_encryption_params = {}
        vol_encryption_params.update({"format": volume_target_encypt})
        # For any disk format other than qcow2, it need create secret firstly.
        if not virt_disk_qcow2_format:
            # create secret.
            sec_encryption_uuid = create_secret(volume_target_path)
            sec_uuids.append(sec_encryption_uuid)
            vol_encryption_params.update({"secret": {"type": secret_type, "uuid": sec_encryption_uuid}})
        try:
            # If Libvirt version is lower than 2.5.0
            # Creating luks encryption volume is not supported,so skip it.
            create_vol(pool_name, vol_encryption_params, vol_params)
        except AssertionError as info:
            err_msgs = ("create: invalid option")
            if str(info).count(err_msgs):
                test.error("Creating luks encryption volume "
                           "is not supported on this libvirt version")
            else:
                test.error("Failed to create volume."
                           "Error: %s" % str(info))
        # Add disk xml.
        vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)

        disk_xml = Disk(type_name=device_type)
        disk_xml.device = device
        if device_type == "file":
            dev_attrs = "file"
        elif device_type == "dir":
            dev_attrs = "dir"
        else:
            dev_attrs = "dev"
        disk_source = disk_xml.new_disk_source(
                **{"attrs": {dev_attrs: volume_target_path}})
        disk_xml.driver = {"name": "qemu", "type": volume_target_format,
                           "cache": "none"}
        disk_xml.target = {"dev": device_target, "bus": device_bus}
        v_xml = vol_xml.VolXML.new_from_vol_dumpxml(volume_name, pool_name)
        sec_uuids.append(v_xml.encryption.secret["uuid"])
        if not status_error:
            logging.debug("vol info -- format: %s, type: %s, uuid: %s",
                          v_xml.encryption.format,
                          v_xml.encryption.secret["type"],
                          v_xml.encryption.secret["uuid"])
            encryption_dict = {"encryption": v_xml.encryption.format,
                               "secret": {"type": v_xml.encryption.secret["type"],
                                          "uuid": v_xml.encryption.secret["uuid"]}}
            if encryption_in_source:
                disk_source.encryption = disk_xml.new_encryption(
                        **encryption_dict)
            if encryption_out_source:
                disk_xml.encryption = disk_xml.new_encryption(
                        **encryption_dict)
        disk_xml.source = disk_source
        logging.debug("disk xml is:\n%s" % disk_xml)
        if not hotplug:
            # Sync VM xml.
            vmxml.add_device(disk_xml)
            vmxml.sync()

        try:
            # Start the VM and do disk hotplug if required,
            # then check disk status in vm.
            # Note that LUKS encrypted virtual disk without <encryption>
            # can be normally started or attached since qemu will just treat
            # it as RAW, so we don't test LUKS with status_error=TRUE.
            vm.start()
            vm.wait_for_login()
            if status_error:
                if hotplug:
                    logging.debug("attaching disk, expecting error...")
                    result = virsh.attach_device(vm_name, disk_xml.xml)
                    libvirt.check_exit_status(result, status_error)
                else:
                    test.fail("VM started unexpectedly.")
            else:
                if hotplug:
                    result = virsh.attach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
                    result = virsh.detach_device(vm_name, disk_xml.xml,
                                                 debug=True)
                    libvirt.check_exit_status(result)
                else:
                    if not check_in_vm(vm, device_target, old_parts):
                        test.fail("Check encryption disk in VM failed")
        except virt_vm.VMStartError as e:
            if status_error:
                if hotplug:
                    test.fail("In hotplug scenario, VM should "
                              "start successfully but not."
                              "Error: %s", str(e))
                else:
                    logging.debug("VM failed to start as expected."
                                  "Error: %s", str(e))
            else:
                # Libvirt2.5.0 onward,AES-CBC encrypted qcow2 images is no
                # longer supported.
                err_msgs = ("AES-CBC encrypted qcow2 images is"
                            " no longer supported in system emulators")
                if str(e).count(err_msgs):
                    test.cancel(err_msgs)
                else:
                    test.fail("VM failed to start."
                              "Error: %s" % str(e))
    finally:
        # Recover VM.
        if vm.is_alive():
            vm.destroy(gracefully=False)
        logging.info("Restoring vm...")
        vmxml_backup.sync()

        # Clean up pool, vol
        for sec_uuid in set(sec_uuids):
            virsh.secret_undefine(sec_uuid, **virsh_dargs)
            virsh.vol_delete(volume_name, pool_name, **virsh_dargs)
        if pool_name in virsh.pool_state_dict():
            virsh.pool_destroy(pool_name, **virsh_dargs)
            virsh.pool_undefine(pool_name, **virsh_dargs)